From 883f2055431e5e6eca51f81db6e7519c65d1a985 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 May 2023 02:47:27 +0000 Subject: [PATCH] build(deps): bump github.com/gin-gonic/gin from 1.8.2 to 1.9.0 Bumps [github.com/gin-gonic/gin](https://github.com/gin-gonic/gin) from 1.8.2 to 1.9.0. - [Release notes](https://github.com/gin-gonic/gin/releases) - [Changelog](https://github.com/gin-gonic/gin/blob/master/CHANGELOG.md) - [Commits](https://github.com/gin-gonic/gin/compare/v1.8.2...v1.9.0) --- updated-dependencies: - dependency-name: github.com/gin-gonic/gin dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- go.mod | 28 +- go.sum | 84 +- vendor/github.com/bytedance/sonic/.gitignore | 52 + vendor/github.com/bytedance/sonic/.gitmodules | 3 + .../bytedance/sonic/.licenserc.yaml | 24 + .../bytedance/sonic/CODE_OF_CONDUCT.md | 128 + .../bytedance/sonic/CONTRIBUTING.md | 63 + vendor/github.com/bytedance/sonic/CREDITS | 0 .../bytedance/sonic/INTRODUCTION.md | 48 + vendor/github.com/bytedance/sonic/LICENSE | 201 + vendor/github.com/bytedance/sonic/Makefile | 112 + vendor/github.com/bytedance/sonic/README.md | 359 + vendor/github.com/bytedance/sonic/api.go | 185 + .../bytedance/sonic/ast/api_amd64.go | 126 + .../bytedance/sonic/ast/api_compat.go | 102 + vendor/github.com/bytedance/sonic/ast/asm.s | 0 .../github.com/bytedance/sonic/ast/decode.go | 430 + .../github.com/bytedance/sonic/ast/encode.go | 259 + .../github.com/bytedance/sonic/ast/error.go | 98 + .../bytedance/sonic/ast/iterator.go | 164 + vendor/github.com/bytedance/sonic/ast/node.go | 1802 ++ .../github.com/bytedance/sonic/ast/parser.go | 618 + .../github.com/bytedance/sonic/ast/search.go | 30 + vendor/github.com/bytedance/sonic/ast/sort.go | 206 + .../bytedance/sonic/ast/stubs_go115.go | 55 + .../bytedance/sonic/ast/stubs_go120.go | 55 + .../github.com/bytedance/sonic/bench-arm.sh | 14 + .../bytedance/sonic/bench-large.png | Bin 0 -> 87463 bytes .../bytedance/sonic/bench-small.png | Bin 0 -> 87635 bytes vendor/github.com/bytedance/sonic/bench.py | 134 + vendor/github.com/bytedance/sonic/bench.sh | 27 + .../bytedance/sonic/check_branch_name.sh | 10 + vendor/github.com/bytedance/sonic/compat.go | 131 + .../github.com/bytedance/sonic/decoder/asm.s | 0 .../sonic/decoder/assembler_amd64_go116.go | 1943 ++ .../sonic/decoder/assembler_amd64_go117.go | 1922 ++ .../bytedance/sonic/decoder/compiler.go | 1136 ++ .../bytedance/sonic/decoder/debug.go | 70 + .../bytedance/sonic/decoder/decoder.go | 245 + .../bytedance/sonic/decoder/errors.go | 181 + .../sonic/decoder/generic_amd64_go116.go | 776 + .../sonic/decoder/generic_amd64_go117.go | 772 + .../sonic/decoder/generic_amd64_go117_test.s | 37 + .../sonic/decoder/generic_amd64_test.s | 37 + .../bytedance/sonic/decoder/pools.go | 143 + .../bytedance/sonic/decoder/primitives.go | 46 + .../bytedance/sonic/decoder/stream.go | 217 + .../bytedance/sonic/decoder/stubs_go115.go | 111 + .../bytedance/sonic/decoder/stubs_go120.go | 111 + .../bytedance/sonic/decoder/types.go | 58 + .../bytedance/sonic/decoder/utils.go | 39 + .../github.com/bytedance/sonic/encoder/asm.s | 0 .../sonic/encoder/assembler_amd64_go116.go | 1198 ++ .../sonic/encoder/assembler_amd64_go117.go | 1201 ++ .../bytedance/sonic/encoder/compiler.go | 885 + .../bytedance/sonic/encoder/debug_go116.go | 66 + .../bytedance/sonic/encoder/debug_go117.go | 205 + .../bytedance/sonic/encoder/encoder.go | 311 + .../bytedance/sonic/encoder/errors.go | 65 + .../bytedance/sonic/encoder/mapiter.go | 199 + .../bytedance/sonic/encoder/pools.go | 194 + .../bytedance/sonic/encoder/primitives.go | 168 + .../bytedance/sonic/encoder/sort.go | 206 + .../bytedance/sonic/encoder/stream.go | 84 + .../bytedance/sonic/encoder/stubs_go116.go | 65 + .../bytedance/sonic/encoder/stubs_go117.go | 66 + .../bytedance/sonic/encoder/stubs_go120.go | 66 + .../bytedance/sonic/encoder/types.go | 47 + .../bytedance/sonic/encoder/utils.go | 52 + vendor/github.com/bytedance/sonic/go.work | 8 + .../bytedance/sonic/internal/caching/asm.s | 0 .../sonic/internal/caching/fcache.go | 115 + .../sonic/internal/caching/hashing.go | 40 + .../sonic/internal/caching/pcache.go | 173 + .../bytedance/sonic/internal/cpu/features.go | 40 + .../sonic/internal/jit/arch_amd64.go | 67 + .../bytedance/sonic/internal/jit/asm.s | 0 .../sonic/internal/jit/assembler_amd64.go | 269 + .../bytedance/sonic/internal/jit/backend.go | 120 + .../bytedance/sonic/internal/jit/runtime.go | 54 + .../bytedance/sonic/internal/loader/asm.s | 0 .../sonic/internal/loader/funcdata.go | 124 + .../sonic/internal/loader/funcdata_go115.go | 169 + .../sonic/internal/loader/funcdata_go116.go | 175 + .../sonic/internal/loader/funcdata_go118.go | 201 + .../sonic/internal/loader/funcdata_go120.go | 201 + .../bytedance/sonic/internal/loader/loader.go | 74 + .../sonic/internal/loader/loader_windows.go | 111 + .../sonic/internal/native/avx/native_amd64.go | 135 + .../sonic/internal/native/avx/native_amd64.s | 15286 ++++++++++++++ .../native/avx/native_export_amd64.go | 49 + .../internal/native/avx/native_subr_amd64.go | 109 + .../internal/native/avx2/native_amd64.go | 135 + .../sonic/internal/native/avx2/native_amd64.s | 16629 ++++++++++++++++ .../native/avx2/native_export_amd64.go | 49 + .../internal/native/avx2/native_subr_amd64.go | 109 + .../sonic/internal/native/dispatch_amd64.go | 202 + .../sonic/internal/native/dispatch_amd64.s | 137 + .../internal/native/fastfloat_amd64_test.tmpl | 138 + .../internal/native/fastint_amd64_test.tmpl | 151 + .../sonic/internal/native/native_amd64.tmpl | 133 + .../internal/native/native_amd64_test.tmpl | 593 + .../internal/native/native_export_amd64.tmpl | 47 + .../sonic/internal/native/sse/native_amd64.go | 135 + .../sonic/internal/native/sse/native_amd64.s | 15078 ++++++++++++++ .../native/sse/native_export_amd64.go | 49 + .../internal/native/sse/native_subr_amd64.go | 109 + .../sonic/internal/native/types/types.go | 134 + .../bytedance/sonic/internal/resolver/asm.s | 0 .../sonic/internal/resolver/resolver.go | 214 + .../sonic/internal/resolver/stubs.go | 46 + .../bytedance/sonic/internal/rt/asm_amd64.s | 60 + .../bytedance/sonic/internal/rt/asm_arm64.s | 10 + .../bytedance/sonic/internal/rt/fastmem.go | 113 + .../bytedance/sonic/internal/rt/fastvalue.go | 200 + .../bytedance/sonic/internal/rt/gcwb.go | 124 + .../bytedance/sonic/internal/rt/int48.go | 36 + .../bytedance/sonic/internal/rt/stackmap.go | 180 + .../bytedance/sonic/introduction-1.png | Bin 0 -> 57447 bytes .../bytedance/sonic/introduction-2.png | Bin 0 -> 68075 bytes .../bytedance/sonic/loader/funcdata.go | 144 + .../bytedance/sonic/loader/funcdata_go115.go | 541 + .../bytedance/sonic/loader/funcdata_go118.go | 541 + .../bytedance/sonic/loader/funcdata_go120.go | 545 + .../bytedance/sonic/loader/loader.go | 37 + .../bytedance/sonic/loader/loader_go115.go | 33 + .../bytedance/sonic/loader/loader_go118.go | 104 + .../bytedance/sonic/loader/mmap_unix.go | 45 + .../bytedance/sonic/loader/mmap_windows.go | 84 + .../bytedance/sonic/loader/pcdata.go | 100 + .../bytedance/sonic/loader/stubs.go | 35 + .../bytedance/sonic/option/option.go | 78 + .../bytedance/sonic/other-langs.png | Bin 0 -> 96490 bytes vendor/github.com/bytedance/sonic/sonic.go | 161 + .../bytedance/sonic/unquote/unquote.go | 56 + .../github.com/bytedance/sonic/utf8/utf8.go | 71 + .../github.com/chenzhuoyu/base64x/.gitignore | 43 + .../github.com/chenzhuoyu/base64x/.gitmodules | 3 + vendor/github.com/chenzhuoyu/base64x/LICENSE | 201 + vendor/github.com/chenzhuoyu/base64x/Makefile | 28 + .../github.com/chenzhuoyu/base64x/README.md | 4 + .../github.com/chenzhuoyu/base64x/base64x.go | 157 + vendor/github.com/chenzhuoyu/base64x/cpuid.go | 17 + .../github.com/chenzhuoyu/base64x/faststr.go | 23 + .../chenzhuoyu/base64x/native_amd64.go | 16 + .../chenzhuoyu/base64x/native_amd64.s | 4416 ++++ .../chenzhuoyu/base64x/native_subr_amd64.go | 29 + vendor/github.com/gin-gonic/gin/CHANGELOG.md | 81 +- vendor/github.com/gin-gonic/gin/Makefile | 2 +- vendor/github.com/gin-gonic/gin/README.md | 2333 +-- .../gin/binding/default_validator.go | 2 +- .../gin-gonic/gin/binding/form_mapping.go | 2 +- .../gin-gonic/gin/binding/protobuf.go | 4 +- .../github.com/gin-gonic/gin/binding/toml.go | 16 +- .../github.com/gin-gonic/gin/binding/yaml.go | 2 +- vendor/github.com/gin-gonic/gin/context.go | 96 +- vendor/github.com/gin-gonic/gin/debug.go | 6 +- vendor/github.com/gin-gonic/gin/errors.go | 11 +- vendor/github.com/gin-gonic/gin/gin.go | 9 +- .../gin-gonic/gin/internal/json/json.go | 6 +- .../gin-gonic/gin/internal/json/sonic.go | 27 + vendor/github.com/gin-gonic/gin/mode.go | 5 +- vendor/github.com/gin-gonic/gin/path.go | 12 +- vendor/github.com/gin-gonic/gin/recovery.go | 9 +- .../github.com/gin-gonic/gin/render/json.go | 7 +- .../github.com/gin-gonic/gin/render/yaml.go | 2 +- .../gin-gonic/gin/response_writer.go | 7 +- .../github.com/gin-gonic/gin/routergroup.go | 33 +- vendor/github.com/gin-gonic/gin/tree.go | 22 +- vendor/github.com/gin-gonic/gin/version.go | 2 +- .../go-playground/locales/README.md | 4 +- .../universal-translator/README.md | 4 +- .../universal-translator/import_export.go | 6 +- .../go-playground/validator/v10/.gitignore | 1 + .../go-playground/validator/v10/README.md | 2 +- vendor/github.com/goccy/go-json/Makefile | 4 +- vendor/github.com/goccy/go-json/README.md | 2 +- vendor/github.com/goccy/go-json/decode.go | 31 + vendor/github.com/goccy/go-json/error.go | 2 + .../internal/decoder/anonymous_field.go | 4 + .../goccy/go-json/internal/decoder/array.go | 5 + .../goccy/go-json/internal/decoder/assign.go | 438 + .../goccy/go-json/internal/decoder/bool.go | 5 + .../goccy/go-json/internal/decoder/bytes.go | 5 + .../goccy/go-json/internal/decoder/float.go | 12 + .../goccy/go-json/internal/decoder/func.go | 5 + .../goccy/go-json/internal/decoder/int.go | 4 + .../go-json/internal/decoder/interface.go | 70 + .../goccy/go-json/internal/decoder/invalid.go | 10 + .../goccy/go-json/internal/decoder/map.go | 93 + .../goccy/go-json/internal/decoder/number.go | 11 + .../goccy/go-json/internal/decoder/option.go | 2 + .../goccy/go-json/internal/decoder/path.go | 670 + .../goccy/go-json/internal/decoder/ptr.go | 9 + .../goccy/go-json/internal/decoder/slice.go | 79 + .../goccy/go-json/internal/decoder/string.go | 11 + .../goccy/go-json/internal/decoder/struct.go | 4 + .../goccy/go-json/internal/decoder/type.go | 1 + .../goccy/go-json/internal/decoder/uint.go | 4 + .../internal/decoder/unmarshal_json.go | 5 + .../internal/decoder/unmarshal_text.go | 5 + .../internal/decoder/wrapped_string.go | 5 + .../go-json/internal/encoder/compiler.go | 2 - .../goccy/go-json/internal/errors/error.go | 19 + vendor/github.com/goccy/go-json/path.go | 84 + .../github.com/klauspost/cpuid/v2/.gitignore | 24 + .../klauspost/cpuid/v2/.goreleaser.yml | 74 + .../klauspost/cpuid/v2/CONTRIBUTING.txt | 35 + vendor/github.com/klauspost/cpuid/v2/LICENSE | 22 + .../github.com/klauspost/cpuid/v2/README.md | 137 + vendor/github.com/klauspost/cpuid/v2/cpuid.go | 1070 + .../github.com/klauspost/cpuid/v2/cpuid_386.s | 47 + .../klauspost/cpuid/v2/cpuid_amd64.s | 72 + .../klauspost/cpuid/v2/cpuid_arm64.s | 26 + .../klauspost/cpuid/v2/detect_arm64.go | 246 + .../klauspost/cpuid/v2/detect_ref.go | 14 + .../klauspost/cpuid/v2/detect_x86.go | 35 + .../klauspost/cpuid/v2/featureid_string.go | 185 + .../klauspost/cpuid/v2/os_darwin_arm64.go | 19 + .../klauspost/cpuid/v2/os_linux_arm64.go | 130 + .../klauspost/cpuid/v2/os_other_arm64.go | 17 + .../klauspost/cpuid/v2/os_safe_linux_arm64.go | 7 + .../cpuid/v2/os_unsafe_linux_arm64.go | 10 + .../klauspost/cpuid/v2/test-architectures.sh | 15 + .../github.com/mattn/go-isatty/isatty_bsd.go | 4 +- .../twitchyliquid64/golang-asm/LICENSE | 27 + .../golang-asm/asm/arch/arch.go | 716 + .../golang-asm/asm/arch/arm.go | 257 + .../golang-asm/asm/arch/arm64.go | 350 + .../golang-asm/asm/arch/mips.go | 72 + .../golang-asm/asm/arch/ppc64.go | 102 + .../golang-asm/asm/arch/riscv64.go | 28 + .../golang-asm/asm/arch/s390x.go | 81 + .../twitchyliquid64/golang-asm/bio/buf.go | 148 + .../golang-asm/bio/buf_mmap.go | 62 + .../golang-asm/bio/buf_nommap.go | 11 + .../twitchyliquid64/golang-asm/bio/must.go | 43 + .../twitchyliquid64/golang-asm/dwarf/dwarf.go | 1650 ++ .../golang-asm/dwarf/dwarf_defs.go | 493 + .../golang-asm/goobj/builtin.go | 45 + .../golang-asm/goobj/builtinlist.go | 245 + .../golang-asm/goobj/funcinfo.go | 233 + .../golang-asm/goobj/objfile.go | 871 + .../golang-asm/obj/abi_string.go | 16 + .../golang-asm/obj/addrtype_string.go | 16 + .../golang-asm/obj/arm/a.out.go | 410 + .../golang-asm/obj/arm/anames.go | 144 + .../golang-asm/obj/arm/anames5.go | 77 + .../golang-asm/obj/arm/asm5.go | 3096 +++ .../golang-asm/obj/arm/list5.go | 124 + .../golang-asm/obj/arm/obj5.go | 784 + .../golang-asm/obj/arm64/a.out.go | 1033 + .../golang-asm/obj/arm64/anames.go | 512 + .../golang-asm/obj/arm64/anames7.go | 100 + .../golang-asm/obj/arm64/asm7.go | 7140 +++++++ .../golang-asm/obj/arm64/doc.go | 249 + .../golang-asm/obj/arm64/list7.go | 288 + .../golang-asm/obj/arm64/obj7.go | 998 + .../golang-asm/obj/arm64/sysRegEnc.go | 895 + .../twitchyliquid64/golang-asm/obj/data.go | 200 + .../twitchyliquid64/golang-asm/obj/dwarf.go | 690 + .../twitchyliquid64/golang-asm/obj/go.go | 16 + .../twitchyliquid64/golang-asm/obj/inl.go | 131 + .../twitchyliquid64/golang-asm/obj/ld.go | 85 + .../twitchyliquid64/golang-asm/obj/line.go | 30 + .../twitchyliquid64/golang-asm/obj/link.go | 771 + .../golang-asm/obj/mips/a.out.go | 481 + .../golang-asm/obj/mips/anames.go | 135 + .../golang-asm/obj/mips/anames0.go | 45 + .../golang-asm/obj/mips/asm0.go | 2108 ++ .../golang-asm/obj/mips/list0.go | 83 + .../golang-asm/obj/mips/obj0.go | 1457 ++ .../twitchyliquid64/golang-asm/obj/objfile.go | 755 + .../twitchyliquid64/golang-asm/obj/pass.go | 176 + .../twitchyliquid64/golang-asm/obj/pcln.go | 413 + .../twitchyliquid64/golang-asm/obj/plist.go | 314 + .../golang-asm/obj/ppc64/a.out.go | 1032 + .../golang-asm/obj/ppc64/anames.go | 615 + .../golang-asm/obj/ppc64/anames9.go | 51 + .../golang-asm/obj/ppc64/asm9.go | 5367 +++++ .../golang-asm/obj/ppc64/doc.go | 244 + .../golang-asm/obj/ppc64/list9.go | 104 + .../golang-asm/obj/ppc64/obj9.go | 1278 ++ .../golang-asm/obj/riscv/anames.go | 258 + .../golang-asm/obj/riscv/cpu.go | 644 + .../golang-asm/obj/riscv/inst.go | 459 + .../golang-asm/obj/riscv/list.go | 33 + .../golang-asm/obj/riscv/obj.go | 1999 ++ .../golang-asm/obj/s390x/a.out.go | 1003 + .../golang-asm/obj/s390x/anames.go | 720 + .../golang-asm/obj/s390x/anamesz.go | 39 + .../golang-asm/obj/s390x/asmz.go | 5043 +++++ .../golang-asm/obj/s390x/condition_code.go | 126 + .../golang-asm/obj/s390x/listz.go | 73 + .../golang-asm/obj/s390x/objz.go | 735 + .../golang-asm/obj/s390x/rotate.go | 47 + .../golang-asm/obj/s390x/vector.go | 1069 + .../twitchyliquid64/golang-asm/obj/sym.go | 421 + .../golang-asm/obj/textflag.go | 54 + .../twitchyliquid64/golang-asm/obj/util.go | 598 + .../golang-asm/obj/wasm/a.out.go | 331 + .../golang-asm/obj/wasm/anames.go | 208 + .../golang-asm/obj/wasm/wasmobj.go | 1185 ++ .../golang-asm/obj/x86/a.out.go | 423 + .../golang-asm/obj/x86/aenum.go | 1609 ++ .../golang-asm/obj/x86/anames.go | 1607 ++ .../golang-asm/obj/x86/asm6.go | 5446 +++++ .../golang-asm/obj/x86/avx_optabs.go | 4628 +++++ .../golang-asm/obj/x86/evex.go | 382 + .../golang-asm/obj/x86/list6.go | 264 + .../golang-asm/obj/x86/obj6.go | 1261 ++ .../golang-asm/obj/x86/ytab.go | 44 + .../golang-asm/objabi/autotype.go | 38 + .../twitchyliquid64/golang-asm/objabi/flag.go | 162 + .../golang-asm/objabi/funcdata.go | 54 + .../golang-asm/objabi/funcid.go | 100 + .../twitchyliquid64/golang-asm/objabi/head.go | 109 + .../twitchyliquid64/golang-asm/objabi/line.go | 114 + .../twitchyliquid64/golang-asm/objabi/path.go | 41 + .../golang-asm/objabi/reloctype.go | 269 + .../golang-asm/objabi/reloctype_string.go | 17 + .../golang-asm/objabi/stack.go | 33 + .../golang-asm/objabi/symkind.go | 79 + .../golang-asm/objabi/symkind_string.go | 41 + .../golang-asm/objabi/typekind.go | 40 + .../twitchyliquid64/golang-asm/objabi/util.go | 203 + .../twitchyliquid64/golang-asm/src/pos.go | 470 + .../twitchyliquid64/golang-asm/src/xpos.go | 176 + .../twitchyliquid64/golang-asm/sys/arch.go | 187 + .../golang-asm/sys/supported.go | 116 + .../golang-asm/unsafeheader/unsafeheader.go | 37 + vendor/github.com/ugorji/go/codec/README.md | 157 +- vendor/github.com/ugorji/go/codec/binc.go | 102 +- vendor/github.com/ugorji/go/codec/build.sh | 12 +- vendor/github.com/ugorji/go/codec/cbor.go | 7 +- vendor/github.com/ugorji/go/codec/decode.go | 192 +- vendor/github.com/ugorji/go/codec/doc.go | 157 +- vendor/github.com/ugorji/go/codec/encode.go | 165 +- .../ugorji/go/codec/fast-path.generated.go | 134 +- .../ugorji/go/codec/fast-path.go.tmpl | 6 +- .../ugorji/go/codec/gen-dec-array.go.tmpl | 4 +- .../ugorji/go/codec/gen-dec-map.go.tmpl | 2 +- .../ugorji/go/codec/gen-helper.generated.go | 37 +- .../ugorji/go/codec/gen-helper.go.tmpl | 50 +- .../ugorji/go/codec/gen.generated.go | 6 +- vendor/github.com/ugorji/go/codec/gen.go | 293 +- .../goversion_growslice_unsafe_gte_go120.go | 28 + .../goversion_growslice_unsafe_lt_go120.go | 16 + vendor/github.com/ugorji/go/codec/helper.go | 102 +- .../ugorji/go/codec/helper_not_unsafe.go | 62 +- .../go/codec/helper_not_unsafe_not_gc.go | 10 +- .../ugorji/go/codec/helper_unsafe.go | 104 +- .../go/codec/helper_unsafe_compiler_gc.go | 24 +- vendor/github.com/ugorji/go/codec/json.go | 303 +- vendor/github.com/ugorji/go/codec/msgpack.go | 9 +- vendor/github.com/ugorji/go/codec/reader.go | 645 +- vendor/github.com/ugorji/go/codec/rpc.go | 36 +- .../ugorji/go/codec/sort-slice.generated.go | 20 +- .../ugorji/go/codec/sort-slice.go.tmpl | 8 +- vendor/github.com/ugorji/go/codec/writer.go | 67 +- vendor/golang.org/x/{crypto => arch}/AUTHORS | 0 .../x/{crypto => arch}/CONTRIBUTORS | 0 vendor/golang.org/x/arch/LICENSE | 27 + vendor/golang.org/x/arch/PATENTS | 22 + vendor/golang.org/x/arch/x86/x86asm/Makefile | 3 + vendor/golang.org/x/arch/x86/x86asm/decode.go | 1724 ++ vendor/golang.org/x/arch/x86/x86asm/gnu.go | 956 + vendor/golang.org/x/arch/x86/x86asm/inst.go | 649 + vendor/golang.org/x/arch/x86/x86asm/intel.go | 560 + vendor/golang.org/x/arch/x86/x86asm/plan9x.go | 382 + vendor/golang.org/x/arch/x86/x86asm/tables.go | 9925 +++++++++ vendor/golang.org/x/crypto/sha3/doc.go | 12 +- vendor/golang.org/x/crypto/sha3/keccakf.go | 194 +- vendor/golang.org/x/crypto/sha3/sha3.go | 2 +- vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 2 + vendor/golang.org/x/net/http2/flow.go | 88 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/hpack/hpack.go | 81 +- vendor/golang.org/x/net/http2/server.go | 105 +- vendor/golang.org/x/net/http2/transport.go | 88 +- vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 1 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 44 +- vendor/golang.org/x/sys/cpu/endian_big.go | 11 + vendor/golang.org/x/sys/cpu/endian_little.go | 11 + vendor/golang.org/x/sys/cpu/parse.go | 43 + .../x/sys/cpu/proc_cpuinfo_linux.go | 54 + vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- vendor/golang.org/x/sys/unix/ioctl.go | 4 +- vendor/golang.org/x/sys/unix/mkall.sh | 4 +- .../golang.org/x/sys/unix/syscall_darwin.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 1 + .../x/sys/unix/syscall_freebsd_386.go | 9 +- .../x/sys/unix/syscall_freebsd_amd64.go | 9 +- .../x/sys/unix/syscall_freebsd_arm.go | 9 +- .../x/sys/unix/syscall_freebsd_arm64.go | 9 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 9 +- vendor/golang.org/x/sys/unix/syscall_hurd.go | 22 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + vendor/golang.org/x/sys/unix/syscall_linux.go | 51 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 15 + .../golang.org/x/sys/unix/syscall_openbsd.go | 1 + .../x/sys/unix/syscall_openbsd_libc.go | 4 +- .../golang.org/x/sys/unix/syscall_solaris.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 57 +- vendor/golang.org/x/sys/unix/timestruct.go | 2 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 9 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 30 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_linux_loong64.go | 1 + .../x/sys/unix/zerrors_linux_mips.go | 1 + .../x/sys/unix/zerrors_linux_mips64.go | 1 + .../x/sys/unix/zerrors_linux_mips64le.go | 1 + .../x/sys/unix/zerrors_linux_mipsle.go | 1 + .../x/sys/unix/zerrors_linux_ppc.go | 1 + .../x/sys/unix/zerrors_linux_ppc64.go | 1 + .../x/sys/unix/zerrors_linux_ppc64le.go | 1 + .../x/sys/unix/zerrors_linux_riscv64.go | 1 + .../x/sys/unix/zerrors_linux_s390x.go | 1 + .../x/sys/unix/zerrors_linux_sparc64.go | 1 + .../x/sys/unix/zerrors_openbsd_386.go | 356 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 189 +- .../x/sys/unix/zerrors_openbsd_arm.go | 348 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 160 +- .../x/sys/unix/zerrors_openbsd_mips64.go | 95 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_386.go | 10 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm.go | 10 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 10 + .../x/sys/unix/zsyscall_freebsd_riscv64.go | 10 + .../golang.org/x/sys/unix/zsyscall_linux.go | 11 + .../x/sys/unix/zsyscall_netbsd_386.go | 10 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm.go | 10 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 10 + .../x/sys/unix/zsyscall_openbsd_386.go | 14 + .../x/sys/unix/zsyscall_openbsd_386.s | 137 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 14 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 137 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm.s | 137 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 14 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 137 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 812 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 669 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 14 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 14 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 137 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 13 + .../x/sys/unix/zsysctl_openbsd_386.go | 51 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 51 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +- .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 217 +- .../x/sys/unix/ztypes_netbsd_386.go | 84 + .../x/sys/unix/ztypes_netbsd_amd64.go | 84 + .../x/sys/unix/ztypes_netbsd_arm.go | 84 + .../x/sys/unix/ztypes_netbsd_arm64.go | 84 + .../x/sys/unix/ztypes_openbsd_386.go | 97 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 33 +- .../x/sys/unix/ztypes_openbsd_arm.go | 9 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 9 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 9 +- .../internal/language/compact/language.go | 2 +- .../x/text/internal/language/language.go | 2 +- vendor/golang.org/x/text/language/language.go | 2 +- vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 + vendor/gopkg.in/yaml.v3/decode.go | 1000 + vendor/gopkg.in/yaml.v3/emitterc.go | 2020 ++ vendor/gopkg.in/yaml.v3/encode.go | 577 + vendor/gopkg.in/yaml.v3/parserc.go | 1258 ++ vendor/gopkg.in/yaml.v3/readerc.go | 434 + vendor/gopkg.in/yaml.v3/resolve.go | 326 + vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +++ vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 698 + vendor/gopkg.in/yaml.v3/yamlh.go | 807 + vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/modules.txt | 80 +- 491 files changed, 184488 insertions(+), 5229 deletions(-) create mode 100644 vendor/github.com/bytedance/sonic/.gitignore create mode 100644 vendor/github.com/bytedance/sonic/.gitmodules create mode 100644 vendor/github.com/bytedance/sonic/.licenserc.yaml create mode 100644 vendor/github.com/bytedance/sonic/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/bytedance/sonic/CONTRIBUTING.md create mode 100644 vendor/github.com/bytedance/sonic/CREDITS create mode 100644 vendor/github.com/bytedance/sonic/INTRODUCTION.md create mode 100644 vendor/github.com/bytedance/sonic/LICENSE create mode 100644 vendor/github.com/bytedance/sonic/Makefile create mode 100644 vendor/github.com/bytedance/sonic/README.md create mode 100644 vendor/github.com/bytedance/sonic/api.go create mode 100644 vendor/github.com/bytedance/sonic/ast/api_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/ast/api_compat.go create mode 100644 vendor/github.com/bytedance/sonic/ast/asm.s create mode 100644 vendor/github.com/bytedance/sonic/ast/decode.go create mode 100644 vendor/github.com/bytedance/sonic/ast/encode.go create mode 100644 vendor/github.com/bytedance/sonic/ast/error.go create mode 100644 vendor/github.com/bytedance/sonic/ast/iterator.go create mode 100644 vendor/github.com/bytedance/sonic/ast/node.go create mode 100644 vendor/github.com/bytedance/sonic/ast/parser.go create mode 100644 vendor/github.com/bytedance/sonic/ast/search.go create mode 100644 vendor/github.com/bytedance/sonic/ast/sort.go create mode 100644 vendor/github.com/bytedance/sonic/ast/stubs_go115.go create mode 100644 vendor/github.com/bytedance/sonic/ast/stubs_go120.go create mode 100644 vendor/github.com/bytedance/sonic/bench-arm.sh create mode 100644 vendor/github.com/bytedance/sonic/bench-large.png create mode 100644 vendor/github.com/bytedance/sonic/bench-small.png create mode 100644 vendor/github.com/bytedance/sonic/bench.py create mode 100644 vendor/github.com/bytedance/sonic/bench.sh create mode 100644 vendor/github.com/bytedance/sonic/check_branch_name.sh create mode 100644 vendor/github.com/bytedance/sonic/compat.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/asm.s create mode 100644 vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/compiler.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/debug.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/decoder.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/errors.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s create mode 100644 vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s create mode 100644 vendor/github.com/bytedance/sonic/decoder/pools.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/primitives.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/stream.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/stubs_go115.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/stubs_go120.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/types.go create mode 100644 vendor/github.com/bytedance/sonic/decoder/utils.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/asm.s create mode 100644 vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/compiler.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/debug_go116.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/debug_go117.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/encoder.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/errors.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/mapiter.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/pools.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/primitives.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/sort.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/stream.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/stubs_go116.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/stubs_go117.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/stubs_go120.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/types.go create mode 100644 vendor/github.com/bytedance/sonic/encoder/utils.go create mode 100644 vendor/github.com/bytedance/sonic/go.work create mode 100644 vendor/github.com/bytedance/sonic/internal/caching/asm.s create mode 100644 vendor/github.com/bytedance/sonic/internal/caching/fcache.go create mode 100644 vendor/github.com/bytedance/sonic/internal/caching/hashing.go create mode 100644 vendor/github.com/bytedance/sonic/internal/caching/pcache.go create mode 100644 vendor/github.com/bytedance/sonic/internal/cpu/features.go create mode 100644 vendor/github.com/bytedance/sonic/internal/jit/arch_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/jit/asm.s create mode 100644 vendor/github.com/bytedance/sonic/internal/jit/assembler_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/jit/backend.go create mode 100644 vendor/github.com/bytedance/sonic/internal/jit/runtime.go create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/asm.s create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/funcdata.go create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/funcdata_go115.go create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/funcdata_go116.go create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/funcdata_go118.go create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/funcdata_go120.go create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/loader.go create mode 100644 vendor/github.com/bytedance/sonic/internal/loader/loader_windows.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx/native_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx/native_amd64.s create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx/native_export_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx/native_subr_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx2/native_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx2/native_amd64.s create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx2/native_export_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/avx2/native_subr_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/dispatch_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/dispatch_amd64.s create mode 100644 vendor/github.com/bytedance/sonic/internal/native/fastfloat_amd64_test.tmpl create mode 100644 vendor/github.com/bytedance/sonic/internal/native/fastint_amd64_test.tmpl create mode 100644 vendor/github.com/bytedance/sonic/internal/native/native_amd64.tmpl create mode 100644 vendor/github.com/bytedance/sonic/internal/native/native_amd64_test.tmpl create mode 100644 vendor/github.com/bytedance/sonic/internal/native/native_export_amd64.tmpl create mode 100644 vendor/github.com/bytedance/sonic/internal/native/sse/native_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/sse/native_amd64.s create mode 100644 vendor/github.com/bytedance/sonic/internal/native/sse/native_export_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/sse/native_subr_amd64.go create mode 100644 vendor/github.com/bytedance/sonic/internal/native/types/types.go create mode 100644 vendor/github.com/bytedance/sonic/internal/resolver/asm.s create mode 100644 vendor/github.com/bytedance/sonic/internal/resolver/resolver.go create mode 100644 vendor/github.com/bytedance/sonic/internal/resolver/stubs.go create mode 100644 vendor/github.com/bytedance/sonic/internal/rt/asm_amd64.s create mode 100644 vendor/github.com/bytedance/sonic/internal/rt/asm_arm64.s create mode 100644 vendor/github.com/bytedance/sonic/internal/rt/fastmem.go create mode 100644 vendor/github.com/bytedance/sonic/internal/rt/fastvalue.go create mode 100644 vendor/github.com/bytedance/sonic/internal/rt/gcwb.go create mode 100644 vendor/github.com/bytedance/sonic/internal/rt/int48.go create mode 100644 vendor/github.com/bytedance/sonic/internal/rt/stackmap.go create mode 100644 vendor/github.com/bytedance/sonic/introduction-1.png create mode 100644 vendor/github.com/bytedance/sonic/introduction-2.png create mode 100644 vendor/github.com/bytedance/sonic/loader/funcdata.go create mode 100644 vendor/github.com/bytedance/sonic/loader/funcdata_go115.go create mode 100644 vendor/github.com/bytedance/sonic/loader/funcdata_go118.go create mode 100644 vendor/github.com/bytedance/sonic/loader/funcdata_go120.go create mode 100644 vendor/github.com/bytedance/sonic/loader/loader.go create mode 100644 vendor/github.com/bytedance/sonic/loader/loader_go115.go create mode 100644 vendor/github.com/bytedance/sonic/loader/loader_go118.go create mode 100644 vendor/github.com/bytedance/sonic/loader/mmap_unix.go create mode 100644 vendor/github.com/bytedance/sonic/loader/mmap_windows.go create mode 100644 vendor/github.com/bytedance/sonic/loader/pcdata.go create mode 100644 vendor/github.com/bytedance/sonic/loader/stubs.go create mode 100644 vendor/github.com/bytedance/sonic/option/option.go create mode 100644 vendor/github.com/bytedance/sonic/other-langs.png create mode 100644 vendor/github.com/bytedance/sonic/sonic.go create mode 100644 vendor/github.com/bytedance/sonic/unquote/unquote.go create mode 100644 vendor/github.com/bytedance/sonic/utf8/utf8.go create mode 100644 vendor/github.com/chenzhuoyu/base64x/.gitignore create mode 100644 vendor/github.com/chenzhuoyu/base64x/.gitmodules create mode 100644 vendor/github.com/chenzhuoyu/base64x/LICENSE create mode 100644 vendor/github.com/chenzhuoyu/base64x/Makefile create mode 100644 vendor/github.com/chenzhuoyu/base64x/README.md create mode 100644 vendor/github.com/chenzhuoyu/base64x/base64x.go create mode 100644 vendor/github.com/chenzhuoyu/base64x/cpuid.go create mode 100644 vendor/github.com/chenzhuoyu/base64x/faststr.go create mode 100644 vendor/github.com/chenzhuoyu/base64x/native_amd64.go create mode 100644 vendor/github.com/chenzhuoyu/base64x/native_amd64.s create mode 100644 vendor/github.com/chenzhuoyu/base64x/native_subr_amd64.go create mode 100644 vendor/github.com/gin-gonic/gin/internal/json/sonic.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/assign.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/path.go create mode 100644 vendor/github.com/goccy/go-json/path.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/.gitignore create mode 100644 vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt create mode 100644 vendor/github.com/klauspost/cpuid/v2/LICENSE create mode 100644 vendor/github.com/klauspost/cpuid/v2/README.md create mode 100644 vendor/github.com/klauspost/cpuid/v2/cpuid.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/cpuid_386.s create mode 100644 vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s create mode 100644 vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s create mode 100644 vendor/github.com/klauspost/cpuid/v2/detect_arm64.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/detect_ref.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/detect_x86.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/featureid_string.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go create mode 100644 vendor/github.com/klauspost/cpuid/v2/test-architectures.sh create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/LICENSE create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/asm/arch/mips.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/asm/arch/ppc64.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/asm/arch/riscv64.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/asm/arch/s390x.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/bio/buf.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/bio/must.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf_defs.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/goobj/builtin.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/goobj/builtinlist.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/abi_string.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/addrtype_string.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm/a.out.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames5.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm/obj5.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/a.out.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames7.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/list7.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/obj7.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/sysRegEnc.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/data.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/dwarf.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/go.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/inl.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ld.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/line.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/link.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/mips/a.out.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/mips/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/mips/anames0.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/mips/asm0.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/mips/list0.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/mips/obj0.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/objfile.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/pass.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/pcln.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/plist.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/a.out.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/anames9.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/asm9.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/doc.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/list9.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/ppc64/obj9.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/cpu.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/inst.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/list.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/riscv/obj.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/a.out.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/anamesz.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/asmz.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/condition_code.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/listz.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/objz.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/rotate.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/s390x/vector.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/sym.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/textflag.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/util.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/a.out.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/wasm/wasmobj.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/a.out.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/aenum.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/anames.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/asm6.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/avx_optabs.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/evex.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/list6.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/obj6.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/obj/x86/ytab.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/autotype.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/flag.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/funcdata.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/funcid.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/head.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/line.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/path.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/reloctype.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/reloctype_string.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/stack.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/symkind.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/symkind_string.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/typekind.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/objabi/util.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/src/pos.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/src/xpos.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/sys/arch.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/sys/supported.go create mode 100644 vendor/github.com/twitchyliquid64/golang-asm/unsafeheader/unsafeheader.go create mode 100644 vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_gte_go120.go create mode 100644 vendor/github.com/ugorji/go/codec/goversion_growslice_unsafe_lt_go120.go rename vendor/golang.org/x/{crypto => arch}/AUTHORS (100%) rename vendor/golang.org/x/{crypto => arch}/CONTRIBUTORS (100%) create mode 100644 vendor/golang.org/x/arch/LICENSE create mode 100644 vendor/golang.org/x/arch/PATENTS create mode 100644 vendor/golang.org/x/arch/x86/x86asm/Makefile create mode 100644 vendor/golang.org/x/arch/x86/x86asm/decode.go create mode 100644 vendor/golang.org/x/arch/x86/x86asm/gnu.go create mode 100644 vendor/golang.org/x/arch/x86/x86asm/inst.go create mode 100644 vendor/golang.org/x/arch/x86/x86asm/intel.go create mode 100644 vendor/golang.org/x/arch/x86/x86asm/plan9x.go create mode 100644 vendor/golang.org/x/arch/x86/x86asm/tables.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go create mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go create mode 100644 vendor/golang.org/x/sys/cpu/parse.go create mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go diff --git a/go.mod b/go.mod index e169fae..9bbd128 100644 --- a/go.mod +++ b/go.mod @@ -3,29 +3,35 @@ module golang.design/x/ssaplayground go 1.19 require ( - github.com/gin-gonic/gin v1.8.2 + github.com/gin-gonic/gin v1.9.0 github.com/google/uuid v1.3.0 golang.org/x/tools v0.4.0 gopkg.in/yaml.v2 v2.4.0 ) require ( + github.com/bytedance/sonic v1.8.0 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-playground/locales v0.14.0 // indirect - github.com/go-playground/universal-translator v0.18.0 // indirect - github.com/go-playground/validator/v10 v10.11.1 // indirect - github.com/goccy/go-json v0.9.11 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.11.2 // indirect + github.com/goccy/go-json v0.10.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.0.9 // indirect github.com/leodido/go-urn v1.2.1 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect - github.com/ugorji/go/codec v1.2.7 // indirect - golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.9 // indirect + golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect + golang.org/x/crypto v0.5.0 // indirect golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index d4a5739..44e0d96 100644 --- a/go.sum +++ b/go.sum @@ -1,21 +1,25 @@ -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA= +github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.8.2 h1:UzKToD9/PoFj/V4rvlKqTRKnQYyz8Sc1MJlv4JHPtvY= -github.com/gin-gonic/gin v1.8.2/go.mod h1:qw5AYuDrzRTnhvusDsrov+fDIxp9Dleuu12h8nfB398= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= -github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= +github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.11.2 h1:q3SHpufmypg+erIExEKUmsgmhDTyhcJ38oeKGACXohU= +github.com/go-playground/validator/v10 v10.11.2/go.mod h1:NieE624vt4SCTJtD87arVLvdmjPAeV8BQlHtMnw9D7s= +github.com/goccy/go-json v0.10.0 h1:mXKd9Qw4NuzShiRlOXKews24ufknHO7gx30lsDyokKA= +github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -24,30 +28,23 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -58,29 +55,23 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= -github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.9 h1:rmenucSohSTiyL09Y+l2OCk+FrMxGMzho2+tjr5ticU= +github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670 h1:18EFjUmQOcUvxNYSkA6jO9VAiXCnxFY6NyDX0bHDmkU= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -89,13 +80,10 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/vendor/github.com/bytedance/sonic/.gitignore b/vendor/github.com/bytedance/sonic/.gitignore new file mode 100644 index 0000000..0d88447 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/.gitignore @@ -0,0 +1,52 @@ +*.o +*.swp +*.swm +*.swn +*.a +*.so +_obj +_test +*.[568vq] +[568vq].out +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* +_testmain.go +*.exe +*.exe~ +*.test +*.prof +*.rar +*.zip +*.gz +*.psd +*.bmd +*.cfg +*.pptx +*.log +*nohup.out +*settings.pyc +*.sublime-project +*.sublime-workspace +.DS_Store +/.idea/ +/.vscode/ +/output/ +/vendor/ +/Gopkg.lock +/Gopkg.toml +coverage.html +coverage.out +coverage.xml +junit.xml +*.profile +*.svg +*.out +ast/test.out +ast/bench.sh + +!testdata/*.json.gz +fuzz/testdata +*__debug_bin \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/.gitmodules b/vendor/github.com/bytedance/sonic/.gitmodules new file mode 100644 index 0000000..b8d11c9 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/.gitmodules @@ -0,0 +1,3 @@ +[submodule "tools/asm2asm"] + path = tools/asm2asm + url = https://github.com/chenzhuoyu/asm2asm diff --git a/vendor/github.com/bytedance/sonic/.licenserc.yaml b/vendor/github.com/bytedance/sonic/.licenserc.yaml new file mode 100644 index 0000000..1cb993e --- /dev/null +++ b/vendor/github.com/bytedance/sonic/.licenserc.yaml @@ -0,0 +1,24 @@ +header: + license: + spdx-id: Apache-2.0 + copyright-owner: ByteDance Inc. + + paths: + - '**/*.go' + - '**/*.s' + + paths-ignore: + - 'ast/asm.s' # empty file + - 'decoder/asm.s' # empty file + - 'encoder/asm.s' # empty file + - 'internal/caching/asm.s' # empty file + - 'internal/jit/asm.s' # empty file + - 'internal/native/avx/native_amd64.s' # auto-generated by asm2asm + - 'internal/native/avx/native_subr_amd64.go' # auto-generated by asm2asm + - 'internal/native/avx2/native_amd64.s' # auto-generated by asm2asm + - 'internal/native/avx2/native_subr_amd64.go' # auto-generated by asm2asm + - 'internal/resolver/asm.s' # empty file + - 'internal/rt/asm.s' # empty file + - 'internal/loader/asm.s' # empty file + + comment: on-failure \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/CODE_OF_CONDUCT.md b/vendor/github.com/bytedance/sonic/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..8505feb --- /dev/null +++ b/vendor/github.com/bytedance/sonic/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +wudi.daniel@bytedance.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/vendor/github.com/bytedance/sonic/CONTRIBUTING.md b/vendor/github.com/bytedance/sonic/CONTRIBUTING.md new file mode 100644 index 0000000..7f63c66 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/CONTRIBUTING.md @@ -0,0 +1,63 @@ +# How to Contribute + +## Your First Pull Request +We use GitHub for our codebase. You can start by reading [How To Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests). + +## Without Semantic Versioning +We keep the stable code in branch `main` like `golang.org/x`. Development base on branch `develop`. We promise the **Forward Compatibility** by adding new package directory with suffix `v2/v3` when code has break changes. + +## Branch Organization +We use [git-flow](https://nvie.com/posts/a-successful-git-branching-model/) as our branch organization, as known as [FDD](https://en.wikipedia.org/wiki/Feature-driven_development) + + +## Bugs +### 1. How to Find Known Issues +We are using [Github Issues](https://github.com/bytedance/sonic/issues) for our public bugs. We keep a close eye on this and try to make it clear when we have an internal fix in progress. Before filing a new task, try to make sure your problem doesn’t already exist. + +### 2. Reporting New Issues +Providing a reduced test code is a recommended way for reporting issues. Then can be placed in: +- Just in issues +- [Golang Playground](https://play.golang.org/) + +### 3. Security Bugs +Please do not report the safe disclosure of bugs to public issues. Contact us by [Support Email](mailto:sonic@bytedance.com) + +## How to Get in Touch +- [Email](mailto:wudi.daniel@bytedance.com) + +## Submit a Pull Request +Before you submit your Pull Request (PR) consider the following guidelines: +1. Search [GitHub](https://github.com/bytedance/sonic/pulls) for an open or closed PR that relates to your submission. You don't want to duplicate existing efforts. +2. Be sure that an issue describes the problem you're fixing, or documents the design for the feature you'd like to add. Discussing the design upfront helps to ensure that we're ready to accept your work. +3. [Fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) the bytedance/sonic repo. +4. In your forked repository, make your changes in a new git branch: + ``` + git checkout -b bugfix/security_bug develop + ``` +5. Create your patch, including appropriate test cases. +6. Follow our [Style Guides](#code-style-guides). +7. Commit your changes using a descriptive commit message that follows [AngularJS Git Commit Message Conventions](https://docs.google.com/document/d/1QrDFcIiPjSLDn3EL15IJygNPiHORgU1_OOAqWjiDU5Y/edit). + Adherence to these conventions is necessary because release notes will be automatically generated from these messages. +8. Push your branch to GitHub: + ``` + git push origin bugfix/security_bug + ``` +9. In GitHub, send a pull request to `sonic:main` + +Note: you must use one of `optimize/feature/bugfix/doc/ci/test/refactor` following a slash(`/`) as the branch prefix. + +Your pr title and commit message should follow https://www.conventionalcommits.org/. + +## Contribution Prerequisites +- Our development environment keeps up with [Go Official](https://golang.org/project/). +- You need fully checking with lint tools before submit your pull request. [gofmt](https://golang.org/pkg/cmd/gofmt/) & [golangci-lint](https://github.com/golangci/golangci-lint) +- You are familiar with [Github](https://github.com) +- Maybe you need familiar with [Actions](https://github.com/features/actions)(our default workflow tool). + +## Code Style Guides +See [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments). + +Good resources: +- [Effective Go](https://golang.org/doc/effective_go) +- [Pingcap General advice](https://pingcap.github.io/style-guide/general.html) +- [Uber Go Style Guide](https://github.com/uber-go/guide/blob/master/style.md) diff --git a/vendor/github.com/bytedance/sonic/CREDITS b/vendor/github.com/bytedance/sonic/CREDITS new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/bytedance/sonic/INTRODUCTION.md b/vendor/github.com/bytedance/sonic/INTRODUCTION.md new file mode 100644 index 0000000..6b42118 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/INTRODUCTION.md @@ -0,0 +1,48 @@ +# Introduction to Sonic +## Background +According to the overall profiling of production services in Bytedance, we found that the overhead of JSON serialization and deserialization is unexpectedly high: the total is near to 10% CPU, and the extreme one accounts for more than 40% CPU. Therefore, **the performance of JSON lib is a key issue for the promotion of machine utilization**. + +## Research +We conducted a series of surveys and benchmarks on open-sourced JSON libraries for Golang, but the result is disappointing: **no silver bullet**. First of all, no one can perform at least the top three across various business scenarios. Even the most widely used [json-iterator](https://github.com/json-iterator/go) will severely degrade in generic (no-schema) or big-volume JSON serialization and deserialization. Secondly, compared with other JSON libraries writing in other languages, their speed is generally much slower. For example, [Simdjson-go](https://github.com/minio/simdjson-go) has a 50% reduction in decoding performance compared to [simdjson](https://github.com/simdjson/simdjson). What's more, we barely found JSON libraries which provide API to modify the underlying values. + +Therefore, we decided to **develop a brand-new JSON library with high performance as well as wide applicability**. + +## Thinking +Before starting our design, we need to figure out some questions: + +### Why is Json-iterator faster than Standard Library? +First of all, the **schema-based processing mechanism** used by the standard library is commendable, in which the parser can obtain meta information in advance when scanning, thereby shortening the time of branch selection. However, its original implementation did not make good use of this mechanism, instead, **it spent a lot of time reflecting to obtain meta info of schema**. Meanwhile, The approach of json-iterator is: Interprete structure as field-by-field encoding and decoding functions, and then assembled and cached them, minimizing the performance loss cost by reflection. But does it work once and for all? No. In practical tests, we found that **the deeper and larger the input JSON got, the smaller the gap between json-iterator and other libraries gradually became** - eventually event got surpassed: +![Scalability](introduction-1.png) + +The reason is that **this implementation transforms into a large number of interface encapsulations and function calls**, followed by function-call losses: +1. **Calling interface involves dynamic addressing of itab** +2. **Assembly functions cannot be inlined**, while Golang's function-call performance is poor (no parameter-passing-by-register) + +#### Is there a way to avoid the function-call overhead of dynamic assembly? +The first thing we thought about was code generation like [easyjson](https://github.com/mailru/easyjson). But it comes with **schema dependency and convenience losses**. To achieve a real drop-in replacement of the standard library, we turned to another technology - **[JIT](https://en.wikipedia.org/wiki/Jit) (just-in-time compiling)**. Because the compiled codec function is an integrated function, which can greatly reduce function calls while ensuring flexibility. + +### Why is Simdjson-go not fast enough? +[SIMD](https://en.wikipedia.org/wiki/SIMD) (Single-Instruction-Multi-Data) is a special set of CPU instructions for the parallel processing of vectorized data. At present, it is supported by most CPUs and widely used in image processing and big data computing. Undoubtedly, SIMD is useful in JSON processing (itoa, char-search, and so on are all suitable scenarios). We can see that simdjson-go is very competitive in large JSON scenarios (>100KB). However, for some extremely small or irregular character strings, **the extra load operation required by SIMD will lead to performance degradation**. Therefore, we need to dedicate ourselves to branch predicting and decide which scenarios should use SIMD and which should not (for example, the string length is less than 16 bytes). + +The second problem comes from the Go compiler itself. In order to ensure the compilation speed, **Golang does very little optimization work during the compilation phase** and cannot directly use compiler backends such as [LLVM](https://en.wikipedia.org/wiki/LLVM) (Low-Level Virtual Machine) for optimization. + +So, **can some crucial calculation functions be written in another language with higher execution efficiency**? +C/Clang is an ideal compilation tool (internal integration LLVM). But the key is how to embed the optimized assembly into Golang. + +### How to use Gjson well? +We also found that [gjson](https://github.com/tidwall/gjson) has a huge advantage in single-key lookup scenarios. This is because its lookup is implemented by a **lazy-load mechanism**, which subtlely skips passing-by values and effectively reduces a lot of unnecessary parsing. Practical application has proved that making good use of this feature in product can indeed bring benefits. But when it comes to multi-key lookup, Gjson does worse event than std, which is a side effect of its skipping mechanism - **searching for the same path leads to repeated parsing** (skip is also a lightweight parsing). Therefore, the accurate adaptation of practical scenarios is the key. + +## Design +Based on the above questions, our design is easy to implement: + +1. Aiming at the function-call overhead cost by the codec dynamic-assembly, **`JIT` tech is used to assemble opcodes (asm) corresponding to the schema at runtime**, which is finally cached into the off-heap memory in the form of Golang functions. +2. For practical scenarios where big data and small data coexist, we **use pre-conditional judgment** (string size, floating precision, etc.) **to combine `SIMD` with scalar instructions** to achieve the best adaptation. +3. As for insufficiency in compiling optimization of go language, we decided to **use `C/Clang` to write and compile core computational functions**, and **developed a set of [asm2asm](https://github.com/chenzhuoyu/asm2asm) tools to translate the fully optimized x86 assembly into plan9** and finally load it into Golang runtime. +4. Giving the big speed gap between parsing and skipping, the **`lazy-load` mechanism** is certainly used in our AST parser, but in **a more adaptive and efficient way to reduce the overhead of multiple-key queries**. +![design](introduction-2.png) + +In detail, we conducted some further optimization: +1. Since the native-asm functions cannot be inlined in Golang, we found that its cost even exceeded the improvement brought by the optimization of the C compiler. So we reimplemented a set of lightweight function-calls in JIT: + - `Global-function-table + static offset` for calling instruction + - **Pass parameters using registers** +2. `Sync.Map` was used to cache the codecs at first, but for our **quasi-static** (read far more than write), **fewer elements** (usually no more than a few dozen) scenarios, its performance is not optimal, so we reimplement a high-performance and concurrent-safe cache with `open-addressing-hash + RCU` tech. \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/LICENSE b/vendor/github.com/bytedance/sonic/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/bytedance/sonic/Makefile b/vendor/github.com/bytedance/sonic/Makefile new file mode 100644 index 0000000..8cc0acf --- /dev/null +++ b/vendor/github.com/bytedance/sonic/Makefile @@ -0,0 +1,112 @@ +# +# Copyright 2021 ByteDance Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ARCH := avx avx2 sse +TMP_DIR := output +OUT_DIR := internal/native +SRC_FILE := native/native.c + +CPU_avx := amd64 +CPU_avx2 := amd64 +CPU_sse := amd64 + +TMPL_avx := fastint_amd64_test fastfloat_amd64_test native_amd64_test native_export_amd64 +TMPL_avx2 := fastint_amd64_test fastfloat_amd64_test native_amd64_test native_export_amd64 +TMPL_sse := fastint_amd64_test fastfloat_amd64_test native_amd64_test native_export_amd64 + +CFLAGS_avx := -msse -mno-sse4 -mavx -mpclmul -mno-avx2 -DUSE_AVX=1 -DUSE_AVX2=0 +CFLAGS_avx2 := -msse -mno-sse4 -mavx -mpclmul -mavx2 -DUSE_AVX=1 -DUSE_AVX2=1 +CFLAGS_sse := -msse -mno-sse4 -mno-avx -mno-avx2 -mpclmul + +CC_amd64 := clang +ASM2ASM_amd64 := tools/asm2asm/asm2asm.py + +CFLAGS := -mno-red-zone +CFLAGS += -target x86_64-apple-macos11 +CFLAGS += -fno-asynchronous-unwind-tables +CFLAGS += -fno-builtin +CFLAGS += -fno-exceptions +CFLAGS += -fno-rtti +CFLAGS += -fno-stack-protector +CFLAGS += -nostdlib +CFLAGS += -O3 +CFLAGS += -Wall -Werror + +NATIVE_SRC := $(wildcard native/*.h) +NATIVE_SRC += $(wildcard native/*.c) + +.PHONY: all clean ${ARCH} + +define build_tmpl + $(eval @arch := $(1)) + $(eval @tmpl := $(2)) + $(eval @dest := $(3)) + +${@dest}: ${@tmpl} + mkdir -p $(dir ${@dest}) + echo '// Code generated by Makefile, DO NOT EDIT.' > ${@dest} + echo >> ${@dest} + sed -e 's/{{PACKAGE}}/${@arch}/g' ${@tmpl} >> ${@dest} +endef + +define build_arch + $(eval @cpu := $(value CPU_$(1))) + $(eval @deps := $(foreach tmpl,$(value TMPL_$(1)),${OUT_DIR}/$(1)/${tmpl}.go)) + $(eval @asmin := ${TMP_DIR}/$(1)/native.s) + $(eval @asmout := ${OUT_DIR}/$(1)/native_${@cpu}.s) + $(eval @stubin := ${OUT_DIR}/native_${@cpu}.tmpl) + $(eval @stubout := ${OUT_DIR}/$(1)/native_${@cpu}.go) + +$(1): ${@asmout} ${@deps} + +${@asmout}: ${@stubout} ${NATIVE_SRC} + mkdir -p ${TMP_DIR}/$(1) + $${CC_${@cpu}} $${CFLAGS} $${CFLAGS_$(1)} -S -o ${TMP_DIR}/$(1)/native.s ${SRC_FILE} + python3 $${ASM2ASM_${@cpu}} ${@asmout} ${TMP_DIR}/$(1)/native.s + asmfmt -w ${@asmout} + +$(eval $(call \ + build_tmpl, \ + $(1), \ + ${@stubin}, \ + ${@stubout} \ +)) + +$(foreach \ + tmpl, \ + $(value TMPL_$(1)), \ + $(eval $(call \ + build_tmpl, \ + $(1), \ + ${OUT_DIR}/${tmpl}.tmpl, \ + ${OUT_DIR}/$(1)/${tmpl}.go \ + )) \ +) +endef + +all: ${ARCH} + +clean: + for arch in ${ARCH}; do \ + rm -vfr ${TMP_DIR}/$${arch}; \ + rm -vfr ${OUT_DIR}/$${arch}; \ + done + +$(foreach \ + arch, \ + ${ARCH}, \ + $(eval $(call build_arch,${arch})) \ +) diff --git a/vendor/github.com/bytedance/sonic/README.md b/vendor/github.com/bytedance/sonic/README.md new file mode 100644 index 0000000..9cc42c4 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/README.md @@ -0,0 +1,359 @@ +# Sonic + +A blazingly fast JSON serializing & deserializing library, accelerated by JIT (just-in-time compiling) and SIMD (single-instruction-multiple-data). + +## Requirement +- Go 1.15~1.20 +- Linux/MacOS/Windows +- Amd64 ARCH + +## Features +- Runtime object binding without code generation +- Complete APIs for JSON value manipulation +- Fast, fast, fast! + +## Benchmarks +For **all sizes** of json and **all scenarios** of usage, **Sonic performs best**. +- [Medium](https://github.com/bytedance/sonic/blob/main/decoder/testdata_test.go#L19) (13KB, 300+ key, 6 layers) +```powershell +goversion: 1.17.1 +goos: darwin +goarch: amd64 +cpu: Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz +BenchmarkEncoder_Generic_Sonic-16 32393 ns/op 402.40 MB/s 11965 B/op 4 allocs/op +BenchmarkEncoder_Generic_Sonic_Fast-16 21668 ns/op 601.57 MB/s 10940 B/op 4 allocs/op +BenchmarkEncoder_Generic_JsonIter-16 42168 ns/op 309.12 MB/s 14345 B/op 115 allocs/op +BenchmarkEncoder_Generic_GoJson-16 65189 ns/op 199.96 MB/s 23261 B/op 16 allocs/op +BenchmarkEncoder_Generic_StdLib-16 106322 ns/op 122.60 MB/s 49136 B/op 789 allocs/op +BenchmarkEncoder_Binding_Sonic-16 6269 ns/op 2079.26 MB/s 14173 B/op 4 allocs/op +BenchmarkEncoder_Binding_Sonic_Fast-16 5281 ns/op 2468.16 MB/s 12322 B/op 4 allocs/op +BenchmarkEncoder_Binding_JsonIter-16 20056 ns/op 649.93 MB/s 9488 B/op 2 allocs/op +BenchmarkEncoder_Binding_GoJson-16 8311 ns/op 1568.32 MB/s 9481 B/op 1 allocs/op +BenchmarkEncoder_Binding_StdLib-16 16448 ns/op 792.52 MB/s 9479 B/op 1 allocs/op +BenchmarkEncoder_Parallel_Generic_Sonic-16 6681 ns/op 1950.93 MB/s 12738 B/op 4 allocs/op +BenchmarkEncoder_Parallel_Generic_Sonic_Fast-16 4179 ns/op 3118.99 MB/s 10757 B/op 4 allocs/op +BenchmarkEncoder_Parallel_Generic_JsonIter-16 9861 ns/op 1321.84 MB/s 14362 B/op 115 allocs/op +BenchmarkEncoder_Parallel_Generic_GoJson-16 18850 ns/op 691.52 MB/s 23278 B/op 16 allocs/op +BenchmarkEncoder_Parallel_Generic_StdLib-16 45902 ns/op 283.97 MB/s 49174 B/op 789 allocs/op +BenchmarkEncoder_Parallel_Binding_Sonic-16 1480 ns/op 8810.09 MB/s 13049 B/op 4 allocs/op +BenchmarkEncoder_Parallel_Binding_Sonic_Fast-16 1209 ns/op 10785.23 MB/s 11546 B/op 4 allocs/op +BenchmarkEncoder_Parallel_Binding_JsonIter-16 6170 ns/op 2112.58 MB/s 9504 B/op 2 allocs/op +BenchmarkEncoder_Parallel_Binding_GoJson-16 3321 ns/op 3925.52 MB/s 9496 B/op 1 allocs/op +BenchmarkEncoder_Parallel_Binding_StdLib-16 3739 ns/op 3486.49 MB/s 9480 B/op 1 allocs/op + +BenchmarkDecoder_Generic_Sonic-16 66812 ns/op 195.10 MB/s 57602 B/op 723 allocs/op +BenchmarkDecoder_Generic_Sonic_Fast-16 54523 ns/op 239.07 MB/s 49786 B/op 313 allocs/op +BenchmarkDecoder_Generic_StdLib-16 124260 ns/op 104.90 MB/s 50869 B/op 772 allocs/op +BenchmarkDecoder_Generic_JsonIter-16 91274 ns/op 142.81 MB/s 55782 B/op 1068 allocs/op +BenchmarkDecoder_Generic_GoJson-16 88569 ns/op 147.17 MB/s 66367 B/op 973 allocs/op +BenchmarkDecoder_Binding_Sonic-16 32557 ns/op 400.38 MB/s 28302 B/op 137 allocs/op +BenchmarkDecoder_Binding_Sonic_Fast-16 28649 ns/op 455.00 MB/s 24999 B/op 34 allocs/op +BenchmarkDecoder_Binding_StdLib-16 111437 ns/op 116.97 MB/s 10576 B/op 208 allocs/op +BenchmarkDecoder_Binding_JsonIter-16 35090 ns/op 371.48 MB/s 14673 B/op 385 allocs/op +BenchmarkDecoder_Binding_GoJson-16 28738 ns/op 453.59 MB/s 22039 B/op 49 allocs/op +BenchmarkDecoder_Parallel_Generic_Sonic-16 12321 ns/op 1057.91 MB/s 57233 B/op 723 allocs/op +BenchmarkDecoder_Parallel_Generic_Sonic_Fast-16 10644 ns/op 1224.64 MB/s 49362 B/op 313 allocs/op +BenchmarkDecoder_Parallel_Generic_StdLib-16 57587 ns/op 226.35 MB/s 50874 B/op 772 allocs/op +BenchmarkDecoder_Parallel_Generic_JsonIter-16 38666 ns/op 337.12 MB/s 55789 B/op 1068 allocs/op +BenchmarkDecoder_Parallel_Generic_GoJson-16 30259 ns/op 430.79 MB/s 66370 B/op 974 allocs/op +BenchmarkDecoder_Parallel_Binding_Sonic-16 5965 ns/op 2185.28 MB/s 27747 B/op 137 allocs/op +BenchmarkDecoder_Parallel_Binding_Sonic_Fast-16 5170 ns/op 2521.31 MB/s 24715 B/op 34 allocs/op +BenchmarkDecoder_Parallel_Binding_StdLib-16 27582 ns/op 472.58 MB/s 10576 B/op 208 allocs/op +BenchmarkDecoder_Parallel_Binding_JsonIter-16 13571 ns/op 960.51 MB/s 14685 B/op 385 allocs/op +BenchmarkDecoder_Parallel_Binding_GoJson-16 10031 ns/op 1299.51 MB/s 22111 B/op 49 allocs/op + +BenchmarkGetOne_Sonic-16 3276 ns/op 3975.78 MB/s 24 B/op 1 allocs/op +BenchmarkGetOne_Gjson-16 9431 ns/op 1380.81 MB/s 0 B/op 0 allocs/op +BenchmarkGetOne_Jsoniter-16 51178 ns/op 254.46 MB/s 27936 B/op 647 allocs/op +BenchmarkGetOne_Parallel_Sonic-16 216.7 ns/op 60098.95 MB/s 24 B/op 1 allocs/op +BenchmarkGetOne_Parallel_Gjson-16 1076 ns/op 12098.62 MB/s 0 B/op 0 allocs/op +BenchmarkGetOne_Parallel_Jsoniter-16 17741 ns/op 734.06 MB/s 27945 B/op 647 allocs/op +BenchmarkSetOne_Sonic-16 9571 ns/op 1360.61 MB/s 1584 B/op 17 allocs/op +BenchmarkSetOne_Sjson-16 36456 ns/op 357.22 MB/s 52180 B/op 9 allocs/op +BenchmarkSetOne_Jsoniter-16 79475 ns/op 163.86 MB/s 45862 B/op 964 allocs/op +BenchmarkSetOne_Parallel_Sonic-16 850.9 ns/op 15305.31 MB/s 1584 B/op 17 allocs/op +BenchmarkSetOne_Parallel_Sjson-16 18194 ns/op 715.77 MB/s 52247 B/op 9 allocs/op +BenchmarkSetOne_Parallel_Jsoniter-16 33560 ns/op 388.05 MB/s 45892 B/op 964 allocs/op +``` +- [Small](https://github.com/bytedance/sonic/blob/main/testdata/small.go) (400B, 11 keys, 3 layers) +![small benchmarks](bench-small.png) +- [Large](https://github.com/bytedance/sonic/blob/main/testdata/twitter.json) (635KB, 10000+ key, 6 layers) +![large benchmarks](bench-large.png) + +See [bench.sh](https://github.com/bytedance/sonic/blob/main/bench.sh) for benchmark codes. + +## How it works +See [INTRODUCTION.md](INTRODUCTION.md). + +## Usage + +### Marshal/Unmarshal + +Default behaviors are mostly consistent with `encoding/json`, except HTML escaping form (see [Escape HTML](https://github.com/bytedance/sonic/blob/main/README.md#escape-html)) and `SortKeys` feature (optional support see [Sort Keys](https://github.com/bytedance/sonic/blob/main/README.md#sort-keys)) that is **NOT** in conformity to [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259). + ```go +import "github.com/bytedance/sonic" + +var data YourSchema +// Marshal +output, err := sonic.Marshal(&data) +// Unmarshal +err := sonic.Unmarshal(output, &data) + ``` + +### Streaming IO +Sonic supports decoding json from `io.Reader` or encoding objects into `io.`Writer`, aims at handling multiple values as well as reducing memory consumption. +- encoder +```go +import "github.com/bytedance/sonic/encoder" + +var o1 = map[string]interface{}{ + "a": "b", +} +var o2 = 1 +var w = bytes.NewBuffer(nil) +var enc = encoder.NewStreamEncoder(w) +enc.Encode(o1) +enc.Encode(o2) +println(w.String()) // "{"a":"b"}\n1" +``` +- decoder +```go +import "github.com/bytedance/sonic/decoder" + +var o = map[string]interface{}{} +var r = strings.NewReader(`{"a":"b"}{"1":"2"}`) +var dec = decoder.NewStreamDecoder(r) +dec.Decode(&o) +dec.Decode(&o) +fmt.Printf("%+v", o) // map[1:2 a:b] +``` + +### Use Number/Use Int64 + ```go +import "github.com/bytedance/sonic/decoder" + +var input = `1` +var data interface{} + +// default float64 +dc := decoder.NewDecoder(input) +dc.Decode(&data) // data == float64(1) +// use json.Number +dc = decoder.NewDecoder(input) +dc.UseNumber() +dc.Decode(&data) // data == json.Number("1") +// use int64 +dc = decoder.NewDecoder(input) +dc.UseInt64() +dc.Decode(&data) // data == int64(1) + +root, err := sonic.GetFromString(input) +// Get json.Number +jn := root.Number() +jm := root.InterfaceUseNumber().(json.Number) // jn == jm +// Get float64 +fn := root.Float64() +fm := root.Interface().(float64) // jn == jm + ``` + +### Sort Keys +On account of the performance loss from sorting (roughly 10%), sonic doesn't enable this feature by default. If your component depends on it to work (like [zstd](https://github.com/facebook/zstd)), Use it like this: +```go +import "github.com/bytedance/sonic" +import "github.com/bytedance/sonic/encoder" + +// Binding map only +m := map[string]interface{}{} +v, err := encoder.Encode(m, encoder.SortMapKeys) + +// Or ast.Node.SortKeys() before marshal +var root := sonic.Get(JSON) +err := root.SortKeys() +``` +### Escape HTML +On account of the performance loss (roughly 15%), sonic doesn't enable this feature by default. You can use `encoder.EscapeHTML` option to open this feature (align with `encoding/json.HTMLEscape`). +```go +import "github.com/bytedance/sonic" + +v := map[string]string{"&&":"<>"} +ret, err := Encode(v, EscapeHTML) // ret == `{"\u0026\u0026":{"X":"\u003c\u003e"}}` +``` +### Compact Format +Sonic encodes primitive objects (struct/map...) as compact-format JSON by default, except marshaling `json.RawMessage` or `json.Marshaler`: sonic ensures validating their output JSON but **DONOT** compacting them for performance concerns. We provide the option `encoder.CompactMarshaler` to add compacting process. + +### Print Error +If there invalid syntax in input JSON, sonic will return `decoder.SyntaxError`, which supports pretty-printing of error position +```go +import "github.com/bytedance/sonic" +import "github.com/bytedance/sonic/decoder" + +var data interface{} +err := sonic.UnmarshalString("[[[}]]", &data) +if err != nil { + /* One line by default */ + println(e.Error()) // "Syntax error at index 3: invalid char\n\n\t[[[}]]\n\t...^..\n" + /* Pretty print */ + if e, ok := err.(decoder.SyntaxError); ok { + /*Syntax error at index 3: invalid char + + [[[}]] + ...^.. + */ + print(e.Description()) + } else if me, ok := err.(*decoder.MismatchTypeError); ok { + // decoder.MismatchTypeError is new to Sonic v1.6.0 + print(me.Description()) + } +} +``` + +#### Mismatched Types [Sonic v1.6.0] +If there a **mismatch-typed** value for a given key, sonic will report `decoder.MismatchTypeError` (if there are many, report the last one), but still skip wrong the value and keep decoding next JSON. +```go +import "github.com/bytedance/sonic" +import "github.com/bytedance/sonic/decoder" + +var data = struct{ + A int + B int +}{} +err := UnmarshalString(`{"A":"1","B":1}`, &data) +println(err.Error()) // Mismatch type int with value string "at index 5: mismatched type with value\n\n\t{\"A\":\"1\",\"B\":1}\n\t.....^.........\n" +fmt.Printf("%+v", data) // {A:0 B:1} +``` +### Ast.Node +Sonic/ast.Node is a completely self-contained AST for JSON. It implements serialization and deserialization both and provides robust APIs for obtaining and modification of generic data. +#### Get/Index +Search partial JSON by given paths, which must be non-negative integer or string, or nil +```go +import "github.com/bytedance/sonic" + +input := []byte(`{"key1":[{},{"key2":{"key3":[1,2,3]}}]}`) + +// no path, returns entire json +root, err := sonic.Get(input) +raw := root.Raw() // == string(input) + +// multiple paths +root, err := sonic.Get(input, "key1", 1, "key2") +sub := root.Get("key3").Index(2).Int64() // == 3 +``` +**Tip**: since `Index()` uses offset to locate data, which is much faster than scanning like `Get()`, we suggest you use it as much as possible. And sonic also provides another API `IndexOrGet()` to underlying use offset as well as ensure the key is matched. + +#### Set/Unset +Modify the json content by Set()/Unset() +```go +import "github.com/bytedance/sonic" + +// Set +exist, err := root.Set("key4", NewBool(true)) // exist == false +alias1 := root.Get("key4") +println(alias1.Valid()) // true +alias2 := root.Index(1) +println(alias1 == alias2) // true + +// Unset +exist, err := root.UnsetByIndex(1) // exist == true +println(root.Get("key4").Check()) // "value not exist" +``` + +#### Serialize +To encode `ast.Node` as json, use `MarshalJson()` or `json.Marshal()` (MUST pass the node's pointer) +```go +import ( + "encoding/json" + "github.com/bytedance/sonic" +) + +buf, err := root.MarshalJson() +println(string(buf)) // {"key1":[{},{"key2":{"key3":[1,2,3]}}]} +exp, err := json.Marshal(&root) // WARN: use pointer +println(string(buf) == string(exp)) // true +``` + +#### APIs +- validation: `Check()`, `Error()`, `Valid()`, `Exist()` +- searching: `Index()`, `Get()`, `IndexPair()`, `IndexOrGet()`, `GetByPath()` +- go-type casting: `Int64()`, `Float64()`, `String()`, `Number()`, `Bool()`, `Map[UseNumber|UseNode]()`, `Array[UseNumber|UseNode]()`, `Interface[UseNumber|UseNode]()` +- go-type packing: `NewRaw()`, `NewNumber()`, `NewNull()`, `NewBool()`, `NewString()`, `NewObject()`, `NewArray()` +- iteration: `Values()`, `Properties()`, `ForEach()`, `SortKeys()` +- modification: `Set()`, `SetByIndex()`, `Add()` + +## Compatibility +Sonic **DOES NOT** ensure to support all environments, due to the difficulty of developing high-performance codes. For developers who use sonic to build their applications in different environments, we have the following suggestions: + +- Developing on **Mac M1**: Make sure you have Rosetta 2 installed on your machine, and set `GOARCH=amd64` when building your application. Rosetta 2 can automatically translate x86 binaries to arm64 binaries and run x86 applications on Mac M1. +- Developing on **Linux arm64**: You can install qemu and use the `qemu-x86_64 -cpu max` command to convert x86 binaries to amr64 binaries for applications built with sonic. The qemu can achieve a similar transfer effect to Rosetta 2 on Mac M1. + +For developers who want to use sonic on Linux arm64 without qemu, or those who want to handle JSON strictly consistent with `encoding/json`, we provide some compatible APIs as `sonic.API` +- `ConfigDefault`: the sonic's default config (`EscapeHTML=false`,`SortKeys=false`...) to run on sonic-supporting environment. It will fall back to `encoding/json` with the corresponding config, and some options like `SortKeys=false` will be invalid. +- `ConfigStd`: the std-compatible config (`EscapeHTML=true`,`SortKeys=true`...) to run on sonic-supporting environment. It will fall back to `encoding/json`. +- `ConfigFastest`: the fastest config (`NoQuoteTextMarshaler=true`) to run on sonic-supporting environment. It will fall back to `encoding/json` with the corresponding config, and some options will be invalid. + +## Tips + +### Pretouch +Since Sonic uses [golang-asm](https://github.com/twitchyliquid64/golang-asm) as a JIT assembler, which is NOT very suitable for runtime compiling, first-hit running of a huge schema may cause request-timeout or even process-OOM. For better stability, we advise **using `Pretouch()` for huge-schema or compact-memory applications** before `Marshal()/Unmarshal()`. +```go +import ( + "reflect" + "github.com/bytedance/sonic" + "github.com/bytedance/sonic/option" +) + +func init() { + var v HugeStruct + + // For most large types (nesting depth <= option.DefaultMaxInlineDepth) + err := sonic.Pretouch(reflect.TypeOf(v)) + + // with more CompileOption... + err := sonic.Pretouch(reflect.TypeOf(v), + // If the type is too deep nesting (nesting depth > option.DefaultMaxInlineDepth), + // you can set compile recursive loops in Pretouch for better stability in JIT. + option.WithCompileRecursiveDepth(loop), + // For a large nested struct, try to set a smaller depth to reduce compiling time. + option.WithCompileMaxInlineDepth(depth), + ) +} +``` + +### Copy string +When decoding **string values without any escaped characters**, sonic references them from the origin JSON buffer instead of mallocing a new buffer to copy. This helps a lot for CPU performance but may leave the whole JSON buffer in memory as long as the decoded objects are being used. In practice, we found the extra memory introduced by referring JSON buffer is usually 20% ~ 80% of decoded objects. Once an application holds these objects for a long time (for example, cache the decoded objects for reusing), its in-use memory on the server may go up. We provide the option `decoder.CopyString()` for users to choose not to reference the JSON buffer, which may cause a decline in CPU performance to some degree. + +### Pass string or []byte? +For alignment to `encoding/json`, we provide API to pass `[]byte` as an argument, but the string-to-bytes copy is conducted at the same time considering safety, which may lose performance when the origin JSON is huge. Therefore, you can use `UnmarshalString()` and `GetFromString()` to pass a string, as long as your origin data is a string or **nocopy-cast** is safe for your []byte. We also provide API `MarshalString()` for convenient **nocopy-cast** of encoded JSON []byte, which is safe since sonic's output bytes is always duplicated and unique. + +### Accelerate `encoding.TextMarshaler` +To ensure data security, sonic.Encoder quotes and escapes string values from `encoding.TextMarshaler` interfaces by default, which may degrade performance much if most of your data is in form of them. We provide `encoder.NoQuoteTextMarshaler` to skip these operations, which means you **MUST** ensure their output string escaped and quoted following [RFC8259](https://datatracker.ietf.org/doc/html/rfc8259). + + +### Better performance for generic data +In **fully-parsed** scenario, `Unmarshal()` performs better than `Get()`+`Node.Interface()`. But if you only have a part of the schema for specific json, you can combine `Get()` and `Unmarshal()` together: +```go +import "github.com/bytedance/sonic" + +node, err := sonic.GetFromString(_TwitterJson, "statuses", 3, "user") +var user User // your partial schema... +err = sonic.UnmarshalString(node.Raw(), &user) +``` +Even if you don't have any schema, use `ast.Node` as the container of generic values instead of `map` or `interface`: +```go +import "github.com/bytedance/sonic" + +root, err := sonic.GetFromString(_TwitterJson) +user := root.GetByPath("statuses", 3, "user") // === root.Get("status").Index(3).Get("user") +err = user.Check() + +// err = user.LoadAll() // only call this when you want to use 'user' concurrently... +go someFunc(user) +``` +Why? Because `ast.Node` stores its children using `array`: +- `Array`'s performance is **much better** than `Map` when Inserting (Deserialize) and Scanning (Serialize) data; +- **Hashing** (`map[x]`) is not as efficient as **Indexing** (`array[x]`), which `ast.Node` can conduct on **both array and object**; +- Using `Interface()`/`Map()` means Sonic must parse all the underlying values, while `ast.Node` can parse them **on demand**. + +**CAUTION:** `ast.Node` **DOESN'T** ensure concurrent security directly, due to its **lazy-load** design. However, you can call `Node.Load()`/`Node.LoadAll()` to achieve that, which may bring performance reduction while it still works faster than converting to `map` or `interface{}` + +## Community +Sonic is a subproject of [CloudWeGo](https://www.cloudwego.io/). We are committed to building a cloud native ecosystem. diff --git a/vendor/github.com/bytedance/sonic/api.go b/vendor/github.com/bytedance/sonic/api.go new file mode 100644 index 0000000..a2bc67e --- /dev/null +++ b/vendor/github.com/bytedance/sonic/api.go @@ -0,0 +1,185 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sonic + +import ( + `io` + + `github.com/bytedance/sonic/ast` +) + +// Config is a combination of sonic/encoder.Options and sonic/decoder.Options +type Config struct { + // EscapeHTML indicates encoder to escape all HTML characters + // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). + // WARNING: This hurts performance A LOT, USE WITH CARE. + EscapeHTML bool + + // SortMapKeys indicates encoder that the keys of a map needs to be sorted + // before serializing into JSON. + // WARNING: This hurts performance A LOT, USE WITH CARE. + SortMapKeys bool + + // CompactMarshaler indicates encoder that the output JSON from json.Marshaler + // is always compact and needs no validation + CompactMarshaler bool + + // NoQuoteTextMarshaler indicates encoder that the output text from encoding.TextMarshaler + // is always escaped string and needs no quoting + NoQuoteTextMarshaler bool + + // NoNullSliceOrMap indicates encoder that all empty Array or Object are encoded as '[]' or '{}', + // instead of 'null' + NoNullSliceOrMap bool + + // UseInt64 indicates decoder to unmarshal an integer into an interface{} as an + // int64 instead of as a float64. + UseInt64 bool + + // UseNumber indicates decoder to unmarshal a number into an interface{} as a + // json.Number instead of as a float64. + UseNumber bool + + // UseUnicodeErrors indicates decoder to return an error when encounter invalid + // UTF-8 escape sequences. + UseUnicodeErrors bool + + // DisallowUnknownFields indicates decoder to return an error when the destination + // is a struct and the input contains object keys which do not match any + // non-ignored, exported fields in the destination. + DisallowUnknownFields bool + + // CopyString indicates decoder to decode string values by copying instead of referring. + CopyString bool + + // ValidateString indicates decoder and encoder to valid string values: decoder will return errors + // when unescaped control chars(\u0000-\u001f) in the string value of JSON. + ValidateString bool +} + +var ( + // ConfigDefault is the default config of APIs, aiming at efficiency and safty. + ConfigDefault = Config{}.Froze() + + // ConfigStd is the standard config of APIs, aiming at being compatible with encoding/json. + ConfigStd = Config{ + EscapeHTML : true, + SortMapKeys: true, + CompactMarshaler: true, + CopyString : true, + ValidateString : true, + }.Froze() + + // ConfigFastest is the fastest config of APIs, aiming at speed. + ConfigFastest = Config{ + NoQuoteTextMarshaler: true, + }.Froze() +) + + +// API is a binding of specific config. +// This interface is inspired by github.com/json-iterator/go, +// and has same behaviors under equavilent config. +type API interface { + // MarshalToString returns the JSON encoding string of v + MarshalToString(v interface{}) (string, error) + // Marshal returns the JSON encoding bytes of v. + Marshal(v interface{}) ([]byte, error) + // MarshalIndent returns the JSON encoding bytes with indent and prefix. + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + // UnmarshalFromString parses the JSON-encoded bytes and stores the result in the value pointed to by v. + UnmarshalFromString(str string, v interface{}) error + // Unmarshal parses the JSON-encoded string and stores the result in the value pointed to by v. + Unmarshal(data []byte, v interface{}) error + // NewEncoder create a Encoder holding writer + NewEncoder(writer io.Writer) Encoder + // NewDecoder create a Decoder holding reader + NewDecoder(reader io.Reader) Decoder + // Valid validates the JSON-encoded bytes and reportes if it is valid + Valid(data []byte) bool +} + +// Encoder encodes JSON into io.Writer +type Encoder interface { + // Encode writes the JSON encoding of v to the stream, followed by a newline character. + Encode(val interface{}) error + // SetEscapeHTML specifies whether problematic HTML characters + // should be escaped inside JSON quoted strings. + // The default behavior NOT ESCAPE + SetEscapeHTML(on bool) + // SetIndent instructs the encoder to format each subsequent encoded value + // as if indented by the package-level function Indent(dst, src, prefix, indent). + // Calling SetIndent("", "") disables indentation + SetIndent(prefix, indent string) +} + +// Decoder decodes JSON from io.Read +type Decoder interface { + // Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. + Decode(val interface{}) error + // Buffered returns a reader of the data remaining in the Decoder's buffer. + // The reader is valid until the next call to Decode. + Buffered() io.Reader + // DisallowUnknownFields causes the Decoder to return an error when the destination is a struct + // and the input contains object keys which do not match any non-ignored, exported fields in the destination. + DisallowUnknownFields() + // More reports whether there is another element in the current array or object being parsed. + More() bool + // UseNumber causes the Decoder to unmarshal a number into an interface{} as a Number instead of as a float64. + UseNumber() +} + +// Marshal returns the JSON encoding bytes of v. +func Marshal(val interface{}) ([]byte, error) { + return ConfigDefault.Marshal(val) +} + +// MarshalString returns the JSON encoding string of v. +func MarshalString(val interface{}) (string, error) { + return ConfigDefault.MarshalToString(val) +} + +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// NOTICE: This API copies given buffer by default, +// if you want to pass JSON more efficiently, use UnmarshalString instead. +func Unmarshal(buf []byte, val interface{}) error { + return ConfigDefault.Unmarshal(buf, val) +} + +// UnmarshalString is like Unmarshal, except buf is a string. +func UnmarshalString(buf string, val interface{}) error { + return ConfigDefault.UnmarshalFromString(buf, val) +} + +// Get searches the given path from json, +// and returns its representing ast.Node. +// +// Each path arg must be integer or string: +// - Integer means searching current node as array +// - String means searching current node as object +// +// Note, the api expects the json is well-formed at least, +// otherwise it may return unexpected result. +func Get(src []byte, path ...interface{}) (ast.Node, error) { + return GetFromString(string(src), path...) +} + +// GetFromString is same with Get except src is string, +// which can reduce unnecessary memory copy. +func GetFromString(src string, path ...interface{}) (ast.Node, error) { + return ast.NewSearcher(src).GetByPath(path...) +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/api_amd64.go b/vendor/github.com/bytedance/sonic/ast/api_amd64.go new file mode 100644 index 0000000..6b3458a --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/api_amd64.go @@ -0,0 +1,126 @@ +// +build amd64,go1.15,!go1.21 + + +package ast + +import ( + `runtime` + `unsafe` + + `github.com/bytedance/sonic/encoder` + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` + uq `github.com/bytedance/sonic/unquote` + `github.com/chenzhuoyu/base64x` +) + +var typeByte = rt.UnpackEface(byte(0)).Type + +func quote(buf *[]byte, val string) { + *buf = append(*buf, '"') + if len(val) == 0 { + *buf = append(*buf, '"') + } + + sp := rt.IndexChar(val, 0) + nb := len(val) + b := (*rt.GoSlice)(unsafe.Pointer(buf)) + + // input buffer + for nb > 0 { + // output buffer + dp := unsafe.Pointer(uintptr(b.Ptr) + uintptr(b.Len)) + dn := b.Cap - b.Len + // call native.Quote, dn is byte count it outputs + ret := native.Quote(sp, nb, dp, &dn, 0) + // update *buf length + b.Len += dn + + // no need more output + if ret >= 0 { + break + } + + // double buf size + *b = growslice(typeByte, *b, b.Cap*2) + // ret is the complement of consumed input + ret = ^ret + // update input buffer + nb -= ret + sp = unsafe.Pointer(uintptr(sp) + uintptr(ret)) + } + + runtime.KeepAlive(buf) + runtime.KeepAlive(sp) + *buf = append(*buf, '"') +} + +func unquote(src string) (string, types.ParsingError) { + return uq.String(src) +} + +func decodeBase64(src string) ([]byte, error) { + return base64x.StdEncoding.DecodeString(src) +} + +func encodeBase64(src []byte) string { + return base64x.StdEncoding.EncodeToString(src) +} + +func (self *Parser) decodeValue() (val types.JsonState) { + sv := (*rt.GoString)(unsafe.Pointer(&self.s)) + self.p = native.Value(sv.Ptr, sv.Len, self.p, &val, 0) + return +} + +func (self *Parser) skip() (int, types.ParsingError) { + fsm := types.NewStateMachine() + start := native.SkipOne(&self.s, &self.p, fsm, 0) + types.FreeStateMachine(fsm) + + if start < 0 { + return self.p, types.ParsingError(-start) + } + return start, 0 +} + +func (self *Node) encodeInterface(buf *[]byte) error { + //WARN: NOT compatible with json.Encoder + return encoder.EncodeInto(buf, self.packAny(), 0) +} + +func (self *Parser) skipFast() (int, types.ParsingError) { + start := native.SkipOneFast(&self.s, &self.p) + if start < 0 { + return self.p, types.ParsingError(-start) + } + return start, 0 +} + +func (self *Parser) getByPath(path ...interface{}) (int, types.ParsingError) { + start := native.GetByPath(&self.s, &self.p, &path) + runtime.KeepAlive(path) + if start < 0 { + return self.p, types.ParsingError(-start) + } + return start, 0 +} + + +func (self *Searcher) GetByPath(path ...interface{}) (Node, error) { + var err types.ParsingError + var start int + + self.parser.p = 0 + start, err = self.parser.getByPath(path...) + if err != 0 { + return Node{}, self.parser.syntaxError(err) + } + + t := switchRawType(self.parser.s[start]) + if t == _V_NONE { + return Node{}, self.parser.ExportError(err) + } + return newRawNode(self.parser.s[start:self.parser.p], t), nil +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/api_compat.go b/vendor/github.com/bytedance/sonic/ast/api_compat.go new file mode 100644 index 0000000..642330c --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/api_compat.go @@ -0,0 +1,102 @@ +// +build !amd64 go1.21 + +package ast + +import ( + `encoding/base64` + `encoding/json` + `fmt` + + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` +) + +func quote(buf *[]byte, val string) { + quoteString(buf, val) +} + +func unquote(src string) (string, types.ParsingError) { + sp := rt.IndexChar(src, -1) + out, ok := unquoteBytes(rt.BytesFrom(sp, len(src)+2, len(src)+2)) + if !ok { + return "", types.ERR_INVALID_ESCAPE + } + return rt.Mem2Str(out), 0 +} + + + +func decodeBase64(src string) ([]byte, error) { + return base64.StdEncoding.DecodeString(src) +} + +func encodeBase64(src []byte) string { + return base64.StdEncoding.EncodeToString(src) +} + +func (self *Parser) decodeValue() (val types.JsonState) { + e, v := decodeValue(self.s, self.p) + if e < 0 { + return v + } + self.p = e + return v +} + +func (self *Parser) skip() (int, types.ParsingError) { + e, s := skipValue(self.s, self.p) + if e < 0 { + return self.p, types.ParsingError(-e) + } + self.p = e + return s, 0 +} + +func (self *Parser) skipFast() (int, types.ParsingError) { + return self.skip() +} + +func (self *Node) encodeInterface(buf *[]byte) error { + out, err := json.Marshal(self.packAny()) + if err != nil { + return err + } + *buf = append(*buf, out...) + return nil +} + +func (self *Searcher) GetByPath(path ...interface{}) (Node, error) { + self.parser.p = 0 + + var err types.ParsingError + for _, p := range path { + switch p := p.(type) { + case int: + if err = self.parser.searchIndex(p); err != 0 { + return Node{}, self.parser.ExportError(err) + } + case string: + if err = self.parser.searchKey(p); err != 0 { + return Node{}, self.parser.ExportError(err) + } + default: + panic("path must be either int or string") + } + } + + var start = self.parser.p + if start, err = self.parser.skip(); err != 0 { + return Node{}, self.parser.ExportError(err) + } + ns := len(self.parser.s) + if self.parser.p > ns || start >= ns || start>=self.parser.p { + return Node{}, fmt.Errorf("skip %d char out of json boundary", start) + } + + t := switchRawType(self.parser.s[start]) + if t == _V_NONE { + return Node{}, self.parser.ExportError(err) + } + + return newRawNode(self.parser.s[start:self.parser.p], t), nil +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/asm.s b/vendor/github.com/bytedance/sonic/ast/asm.s new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/bytedance/sonic/ast/decode.go b/vendor/github.com/bytedance/sonic/ast/decode.go new file mode 100644 index 0000000..d54e983 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/decode.go @@ -0,0 +1,430 @@ +package ast + +import ( + `encoding/base64` + `runtime` + `strconv` + `unsafe` + + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` +) + +const _blankCharsMask = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') + +const ( + bytesNull = "null" + bytesTrue = "true" + bytesFalse = "false" + bytesObject = "{}" + bytesArray = "[]" +) + +func isSpace(c byte) bool { + return (int(1<= se { + return -int(types.ERR_EOF) + } + runtime.KeepAlive(src) + return int(sp - uintptr(rt.IndexChar(src, 0))) +} + +func decodeNull(src string, pos int) (ret int) { + ret = pos + 4 + if ret > len(src) { + return -int(types.ERR_EOF) + } + if src[pos:ret] == bytesNull { + return ret + } else { + return -int(types.ERR_INVALID_CHAR) + } +} + +func decodeTrue(src string, pos int) (ret int) { + ret = pos + 4 + if ret > len(src) { + return -int(types.ERR_EOF) + } + if src[pos:ret] == bytesTrue { + return ret + } else { + return -int(types.ERR_INVALID_CHAR) + } + +} + +func decodeFalse(src string, pos int) (ret int) { + ret = pos + 5 + if ret > len(src) { + return -int(types.ERR_EOF) + } + if src[pos:ret] == bytesFalse { + return ret + } + return -int(types.ERR_INVALID_CHAR) +} + +func decodeString(src string, pos int) (ret int, v string) { + ret, ep := skipString(src, pos) + if ep == -1 { + (*rt.GoString)(unsafe.Pointer(&v)).Ptr = rt.IndexChar(src, pos+1) + (*rt.GoString)(unsafe.Pointer(&v)).Len = ret - pos - 2 + return ret, v + } + + vv, ok := unquoteBytes(rt.Str2Mem(src[pos:ret])) + if !ok { + return -int(types.ERR_INVALID_CHAR), "" + } + + runtime.KeepAlive(src) + return ret, rt.Mem2Str(vv) +} + +func decodeBinary(src string, pos int) (ret int, v []byte) { + var vv string + ret, vv = decodeString(src, pos) + if ret < 0 { + return ret, nil + } + var err error + v, err = base64.StdEncoding.DecodeString(vv) + if err != nil { + return -int(types.ERR_INVALID_CHAR), nil + } + return ret, v +} + +func isDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func decodeInt64(src string, pos int) (ret int, v int64, err error) { + sp := uintptr(rt.IndexChar(src, pos)) + ss := uintptr(sp) + se := uintptr(rt.IndexChar(src, len(src))) + if uintptr(sp) >= se { + return -int(types.ERR_EOF), 0, nil + } + + if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { + sp += 1 + } + if sp == se { + return -int(types.ERR_EOF), 0, nil + } + + for ; sp < se; sp += uintptr(1) { + if !isDigit(*(*byte)(unsafe.Pointer(sp))) { + break + } + } + + if sp < se { + if c := *(*byte)(unsafe.Pointer(sp)); c == '.' || c == 'e' || c == 'E' { + return -int(types.ERR_INVALID_NUMBER_FMT), 0, nil + } + } + + var vv string + ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) + (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos + + v, err = strconv.ParseInt(vv, 10, 64) + if err != nil { + //NOTICE: allow overflow here + if err.(*strconv.NumError).Err == strconv.ErrRange { + return ret, 0, err + } + return -int(types.ERR_INVALID_CHAR), 0, err + } + + runtime.KeepAlive(src) + return ret, v, nil +} + +func isNumberChars(c byte) bool { + return (c >= '0' && c <= '9') || c == '+' || c == '-' || c == 'e' || c == 'E' || c == '.' +} + +func decodeFloat64(src string, pos int) (ret int, v float64, err error) { + sp := uintptr(rt.IndexChar(src, pos)) + ss := uintptr(sp) + se := uintptr(rt.IndexChar(src, len(src))) + if uintptr(sp) >= se { + return -int(types.ERR_EOF), 0, nil + } + + if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { + sp += 1 + } + if sp == se { + return -int(types.ERR_EOF), 0, nil + } + + for ; sp < se; sp += uintptr(1) { + if !isNumberChars(*(*byte)(unsafe.Pointer(sp))) { + break + } + } + + var vv string + ret = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + (*rt.GoString)(unsafe.Pointer(&vv)).Ptr = unsafe.Pointer(ss) + (*rt.GoString)(unsafe.Pointer(&vv)).Len = ret - pos + + v, err = strconv.ParseFloat(vv, 64) + if err != nil { + //NOTICE: allow overflow here + if err.(*strconv.NumError).Err == strconv.ErrRange { + return ret, 0, err + } + return -int(types.ERR_INVALID_CHAR), 0, err + } + + runtime.KeepAlive(src) + return ret, v, nil +} + +func decodeValue(src string, pos int) (ret int, v types.JsonState) { + pos = skipBlank(src, pos) + if pos < 0 { + return pos, types.JsonState{Vt: types.ValueType(pos)} + } + switch c := src[pos]; c { + case 'n': + ret = decodeNull(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_NULL} + case '"': + var ep int + ret, ep = skipString(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_STRING, Iv: int64(pos + 1), Ep: ep} + case '{': + return pos + 1, types.JsonState{Vt: types.V_OBJECT} + case '[': + return pos + 1, types.JsonState{Vt: types.V_ARRAY} + case 't': + ret = decodeTrue(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_TRUE} + case 'f': + ret = decodeFalse(src, pos) + if ret < 0 { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + return ret, types.JsonState{Vt: types.V_FALSE} + case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + var iv int64 + ret, iv, _ = decodeInt64(src, pos) + if ret >= 0 { + return ret, types.JsonState{Vt: types.V_INTEGER, Iv: iv, Ep: pos} + } else if ret != -int(types.ERR_INVALID_NUMBER_FMT) { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + var fv float64 + ret, fv, _ = decodeFloat64(src, pos) + if ret >= 0 { + return ret, types.JsonState{Vt: types.V_DOUBLE, Dv: fv, Ep: pos} + } else { + return ret, types.JsonState{Vt: types.ValueType(ret)} + } + default: + return -int(types.ERR_INVALID_CHAR), types.JsonState{Vt:-types.ValueType(types.ERR_INVALID_CHAR)} + } +} + +func skipNumber(src string, pos int) (ret int) { + sp := uintptr(rt.IndexChar(src, pos)) + se := uintptr(rt.IndexChar(src, len(src))) + if uintptr(sp) >= se { + return -int(types.ERR_EOF) + } + + if c := *(*byte)(unsafe.Pointer(sp)); c == '-' { + sp += 1 + } + ss := sp + + var pointer bool + var exponent bool + var lastIsDigit bool + var nextNeedDigit = true + + for ; sp < se; sp += uintptr(1) { + c := *(*byte)(unsafe.Pointer(sp)) + if isDigit(c) { + lastIsDigit = true + nextNeedDigit = false + continue + } else if nextNeedDigit { + return -int(types.ERR_INVALID_CHAR) + } else if c == '.' { + if !lastIsDigit || pointer || sp == ss { + return -int(types.ERR_INVALID_CHAR) + } + pointer = true + lastIsDigit = false + nextNeedDigit = true + continue + } else if c == 'e' || c == 'E' { + if !lastIsDigit || exponent { + return -int(types.ERR_INVALID_CHAR) + } + if sp == se-1 { + return -int(types.ERR_EOF) + } + exponent = true + lastIsDigit = false + nextNeedDigit = false + continue + } else if c == '-' || c == '+' { + if prev := *(*byte)(unsafe.Pointer(sp - 1)); prev != 'e' && prev != 'E' { + return -int(types.ERR_INVALID_CHAR) + } + lastIsDigit = false + nextNeedDigit = true + continue + } else { + break + } + } + + if nextNeedDigit { + return -int(types.ERR_EOF) + } + + runtime.KeepAlive(src) + return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) +} + +func skipString(src string, pos int) (ret int, ep int) { + if pos+1 >= len(src) { + return -int(types.ERR_EOF), -1 + } + + sp := uintptr(rt.IndexChar(src, pos)) + se := uintptr(rt.IndexChar(src, len(src))) + + if *(*byte)(unsafe.Pointer(sp)) != '"' { + return -int(types.ERR_INVALID_CHAR), -1 + } + sp += 1 + + ep = -1 + for sp < se { + c := *(*byte)(unsafe.Pointer(sp)) + if c == '\\' { + if ep == -1 { + ep = int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) + } + sp += 2 + continue + } + sp += 1 + if c == '"' { + break + } + } + + if sp > se { + return -int(types.ERR_EOF), -1 + } + + runtime.KeepAlive(src) + return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)), ep +} + +func skipPair(src string, pos int, lchar byte, rchar byte) (ret int) { + if pos+1 >= len(src) { + return -int(types.ERR_EOF) + } + + sp := uintptr(rt.IndexChar(src, pos)) + se := uintptr(rt.IndexChar(src, len(src))) + + if *(*byte)(unsafe.Pointer(sp)) != lchar { + return -int(types.ERR_INVALID_CHAR) + } + + sp += 1 + nbrace := 1 + inquote := false + + for sp < se { + c := *(*byte)(unsafe.Pointer(sp)) + if c == '\\' { + sp += 2 + continue + } else if c == '"' { + inquote = !inquote + } else if c == lchar { + if !inquote { + nbrace += 1 + } + } else if c == rchar { + if !inquote { + nbrace -= 1 + if nbrace == 0 { + sp += 1 + break + } + } + } + sp += 1 + } + + if nbrace != 0 { + return -int(types.ERR_INVALID_CHAR) + } + + runtime.KeepAlive(src) + return int(uintptr(sp) - uintptr((*rt.GoString)(unsafe.Pointer(&src)).Ptr)) +} + +func skipValue(src string, pos int) (ret int, start int) { + pos = skipBlank(src, pos) + if pos < 0 { + return pos, -1 + } + switch c := src[pos]; c { + case 'n': + ret = decodeNull(src, pos) + case '"': + ret, _ = skipString(src, pos) + case '{': + ret = skipPair(src, pos, '{', '}') + case '[': + ret = skipPair(src, pos, '[', ']') + case 't': + ret = decodeTrue(src, pos) + case 'f': + ret = decodeFalse(src, pos) + case '-', '+', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + ret = skipNumber(src, pos) + default: + ret = -int(types.ERR_INVALID_CHAR) + } + return ret, pos +} diff --git a/vendor/github.com/bytedance/sonic/ast/encode.go b/vendor/github.com/bytedance/sonic/ast/encode.go new file mode 100644 index 0000000..1187e30 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/encode.go @@ -0,0 +1,259 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + `sync` + `unicode/utf8` + + `github.com/bytedance/sonic/internal/rt` +) + +const ( + _MaxBuffer = 1024 // 1KB buffer size +) + +func quoteString(e *[]byte, s string) { + *e = append(*e, '"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + *e = append(*e, s[start:i]...) + } + *e = append(*e, '\\') + switch b { + case '\\', '"': + *e = append(*e, b) + case '\n': + *e = append(*e, 'n') + case '\r': + *e = append(*e, 'r') + case '\t': + *e = append(*e, 't') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // If escapeHTML is set, it also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + *e = append(*e, `u00`...) + *e = append(*e, hex[b>>4]) + *e = append(*e, hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + // if c == utf8.RuneError && size == 1 { + // if start < i { + // e.Write(s[start:i]) + // } + // e.WriteString(`\ufffd`) + // i += size + // start = i + // continue + // } + if c == '\u2028' || c == '\u2029' { + if start < i { + *e = append(*e, s[start:i]...) + } + *e = append(*e, `\u202`...) + *e = append(*e, hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + *e = append(*e, s[start:]...) + } + *e = append(*e, '"') +} + +var bytesPool = sync.Pool{} + +func (self *Node) MarshalJSON() ([]byte, error) { + buf := newBuffer() + err := self.encode(buf) + if err != nil { + freeBuffer(buf) + return nil, err + } + + ret := make([]byte, len(*buf)) + copy(ret, *buf) + freeBuffer(buf) + return ret, err +} + +func newBuffer() *[]byte { + if ret := bytesPool.Get(); ret != nil { + return ret.(*[]byte) + } else { + buf := make([]byte, 0, _MaxBuffer) + return &buf + } +} + +func freeBuffer(buf *[]byte) { + *buf = (*buf)[:0] + bytesPool.Put(buf) +} + +func (self *Node) encode(buf *[]byte) error { + if self.IsRaw() { + return self.encodeRaw(buf) + } + switch self.Type() { + case V_NONE : return ErrNotExist + case V_ERROR : return self.Check() + case V_NULL : return self.encodeNull(buf) + case V_TRUE : return self.encodeTrue(buf) + case V_FALSE : return self.encodeFalse(buf) + case V_ARRAY : return self.encodeArray(buf) + case V_OBJECT: return self.encodeObject(buf) + case V_STRING: return self.encodeString(buf) + case V_NUMBER: return self.encodeNumber(buf) + case V_ANY : return self.encodeInterface(buf) + default : return ErrUnsupportType + } +} + +func (self *Node) encodeRaw(buf *[]byte) error { + raw, err := self.Raw() + if err != nil { + return err + } + *buf = append(*buf, raw...) + return nil +} + +func (self *Node) encodeNull(buf *[]byte) error { + *buf = append(*buf, bytesNull...) + return nil +} + +func (self *Node) encodeTrue(buf *[]byte) error { + *buf = append(*buf, bytesTrue...) + return nil +} + +func (self *Node) encodeFalse(buf *[]byte) error { + *buf = append(*buf, bytesFalse...) + return nil +} + +func (self *Node) encodeNumber(buf *[]byte) error { + str := rt.StrFrom(self.p, self.v) + *buf = append(*buf, str...) + return nil +} + +func (self *Node) encodeString(buf *[]byte) error { + if self.v == 0 { + *buf = append(*buf, '"', '"') + return nil + } + + quote(buf, rt.StrFrom(self.p, self.v)) + return nil +} + +func (self *Node) encodeArray(buf *[]byte) error { + if self.isLazy() { + if err := self.skipAllIndex(); err != nil { + return err + } + } + + nb := self.len() + if nb == 0 { + *buf = append(*buf, bytesArray...) + return nil + } + + *buf = append(*buf, '[') + + var p = (*Node)(self.p) + err := p.encode(buf) + if err != nil { + return err + } + for i := 1; i < nb; i++ { + *buf = append(*buf, ',') + p = p.unsafe_next() + err := p.encode(buf) + if err != nil { + return err + } + } + + *buf = append(*buf, ']') + return nil +} + +func (self *Pair) encode(buf *[]byte) error { + if len(*buf) == 0 { + *buf = append(*buf, '"', '"', ':') + return self.Value.encode(buf) + } + + quote(buf, self.Key) + *buf = append(*buf, ':') + + return self.Value.encode(buf) +} + +func (self *Node) encodeObject(buf *[]byte) error { + if self.isLazy() { + if err := self.skipAllKey(); err != nil { + return err + } + } + + nb := self.len() + if nb == 0 { + *buf = append(*buf, bytesObject...) + return nil + } + + *buf = append(*buf, '{') + + var p = (*Pair)(self.p) + err := p.encode(buf) + if err != nil { + return err + } + for i := 1; i < nb; i++ { + *buf = append(*buf, ',') + p = p.unsafe_next() + err := p.encode(buf) + if err != nil { + return err + } + } + + *buf = append(*buf, '}') + return nil +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/error.go b/vendor/github.com/bytedance/sonic/ast/error.go new file mode 100644 index 0000000..f4c441a --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/error.go @@ -0,0 +1,98 @@ +package ast + +import ( + `fmt` + `strings` + `unsafe` + + `github.com/bytedance/sonic/internal/native/types` +) + +func (self *Parser) syntaxError(err types.ParsingError) SyntaxError { + return SyntaxError{ + Pos : self.p, + Src : self.s, + Code: err, + } +} + +func newSyntaxError(err SyntaxError) *Node { + msg := err.Description() + return &Node{ + t: V_ERROR, + v: int64(err.Code), + p: unsafe.Pointer(&msg), + } +} + +type SyntaxError struct { + Pos int + Src string + Code types.ParsingError + Msg string +} + +func (self SyntaxError) Error() string { + return fmt.Sprintf("%q", self.Description()) +} + +func (self SyntaxError) Description() string { + return "Syntax error " + self.description() +} + +func (self SyntaxError) description() string { + i := 16 + p := self.Pos - i + q := self.Pos + i + + /* check for empty source */ + if self.Src == "" { + return fmt.Sprintf("no sources available: %#v", self) + } + + /* prevent slicing before the beginning */ + if p < 0 { + p, q, i = 0, q - p, i + p + } + + /* prevent slicing beyond the end */ + if n := len(self.Src); q > n { + n = q - n + q = len(self.Src) + + /* move the left bound if possible */ + if p > n { + i += n + p -= n + } + } + + /* left and right length */ + x := clamp_zero(i) + y := clamp_zero(q - p - i - 1) + + /* compose the error description */ + return fmt.Sprintf( + "at index %d: %s\n\n\t%s\n\t%s^%s\n", + self.Pos, + self.Message(), + self.Src[p:q], + strings.Repeat(".", x), + strings.Repeat(".", y), + ) +} + +func (self SyntaxError) Message() string { + if self.Msg == "" { + return self.Code.Message() + } + return self.Msg +} + +func clamp_zero(v int) int { + if v < 0 { + return 0 + } else { + return v + } +} diff --git a/vendor/github.com/bytedance/sonic/ast/iterator.go b/vendor/github.com/bytedance/sonic/ast/iterator.go new file mode 100644 index 0000000..03a25b4 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/iterator.go @@ -0,0 +1,164 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + `fmt` + + `github.com/bytedance/sonic/internal/native/types` +) + +type Pair struct { + Key string + Value Node +} + +// Values returns iterator for array's children traversal +func (self *Node) Values() (ListIterator, error) { + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return ListIterator{}, err + } + return ListIterator{Iterator{p: self}}, nil +} + +// Properties returns iterator for object's children traversal +func (self *Node) Properties() (ObjectIterator, error) { + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return ObjectIterator{}, err + } + return ObjectIterator{Iterator{p: self}}, nil +} + +type Iterator struct { + i int + p *Node +} + +func (self *Iterator) Pos() int { + return self.i +} + +func (self *Iterator) Len() int { + return self.p.len() +} + +// HasNext reports if it is the end of iteration or has error. +func (self *Iterator) HasNext() bool { + if !self.p.isLazy() { + return self.p.Valid() && self.i < self.p.len() + } else if self.p.t == _V_ARRAY_LAZY { + return self.p.skipNextNode().Valid() + } else if self.p.t == _V_OBJECT_LAZY { + pair := self.p.skipNextPair() + if pair == nil { + return false + } + return pair.Value.Valid() + } + return false +} + +// ListIterator is specialized iterator for V_ARRAY +type ListIterator struct { + Iterator +} + +// ObjectIterator is specialized iterator for V_ARRAY +type ObjectIterator struct { + Iterator +} + +// Next scans through children of underlying V_ARRAY, +// copies each child to v, and returns .HasNext(). +func (self *ListIterator) Next(v *Node) bool { + if !self.HasNext() { + return false + } else { + *v, self.i = *self.p.nodeAt(self.i), self.i + 1 + return true + } +} + +// Next scans through children of underlying V_OBJECT, +// copies each child to v, and returns .HasNext(). +func (self *ObjectIterator) Next(p *Pair) bool { + if !self.HasNext() { + return false + } else { + *p, self.i = *self.p.pairAt(self.i), self.i + 1 + return true + } +} + +// Sequence represents scanning path of single-layer nodes. +// Index indicates the value's order in both V_ARRAY and V_OBJECT json. +// Key is the value's key (for V_OBJECT json only, otherwise it will be nil). +type Sequence struct { + Index int + Key *string + // Level int +} + +// String is string representation of one Sequence +func (s Sequence) String() string { + k := "" + if s.Key != nil { + k = *s.Key + } + return fmt.Sprintf("Sequence(%d, %q)", s.Index, k) +} + +type Scanner func(path Sequence, node *Node) bool + +// ForEach scans one V_OBJECT node's children from JSON head to tail, +// and pass the Sequence and Node of corresponding JSON value. +// +// Especailly, if the node is not V_ARRAY or V_OBJECT, +// the node itself will be returned and Sequence.Index == -1. +func (self *Node) ForEach(sc Scanner) error { + switch self.itype() { + case types.V_ARRAY: + ns, err := self.UnsafeArray() + if err != nil { + return err + } + for i := range ns { + if !sc(Sequence{i, nil}, &ns[i]) { + return err + } + } + case types.V_OBJECT: + ns, err := self.UnsafeMap() + if err != nil { + return err + } + for i := range ns { + if !sc(Sequence{i, &ns[i].Key}, &ns[i].Value) { + return err + } + } + default: + sc(Sequence{-1, nil}, self) + } + return self.Check() +} + +type PairSlice []Pair + +func (self PairSlice) Sort() { + radixQsort(self, 0, maxDepth(len(self))) +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/node.go b/vendor/github.com/bytedance/sonic/ast/node.go new file mode 100644 index 0000000..0d37baf --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/node.go @@ -0,0 +1,1802 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + `encoding/json` + `fmt` + `strconv` + `unsafe` + `reflect` + + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` +) + +const ( + _CAP_BITS = 32 + _LEN_MASK = 1 << _CAP_BITS - 1 + + _NODE_SIZE = unsafe.Sizeof(Node{}) + _PAIR_SIZE = unsafe.Sizeof(Pair{}) +) + +const ( + _V_NONE types.ValueType = 0 + _V_NODE_BASE types.ValueType = 1 << 5 + _V_LAZY types.ValueType = 1 << 7 + _V_RAW types.ValueType = 1 << 8 + _V_NUMBER = _V_NODE_BASE + 1 + _V_ANY = _V_NODE_BASE + 2 + _V_ARRAY_LAZY = _V_LAZY | types.V_ARRAY + _V_OBJECT_LAZY = _V_LAZY | types.V_OBJECT + _MASK_LAZY = _V_LAZY - 1 + _MASK_RAW = _V_RAW - 1 +) + +const ( + V_NONE = 0 + V_ERROR = 1 + V_NULL = 2 + V_TRUE = 3 + V_FALSE = 4 + V_ARRAY = 5 + V_OBJECT = 6 + V_STRING = 7 + V_NUMBER = int(_V_NUMBER) + V_ANY = int(_V_ANY) +) + +var ( + byteType = rt.UnpackType(reflect.TypeOf(byte(0))) +) + +type Node struct { + v int64 + t types.ValueType + p unsafe.Pointer +} + +// UnmarshalJSON is just an adapter to json.Unmarshaler. +// If you want better performance, use Searcher.GetByPath() directly +func (self *Node) UnmarshalJSON(data []byte) (err error) { + *self, err = NewSearcher(string(data)).GetByPath() + return +} + +/** Node Type Accessor **/ + +// Type returns json type represented by the node +// It will be one of belows: +// V_NONE = 0 (empty node) +// V_ERROR = 1 (error node) +// V_NULL = 2 (json value `null`) +// V_TRUE = 3 (json value `true`) +// V_FALSE = 4 (json value `false`) +// V_ARRAY = 5 (json value array) +// V_OBJECT = 6 (json value object) +// V_STRING = 7 (json value string) +// V_NUMBER = 33 (json value number ) +// V_ANY = 34 (golang interface{}) +func (self Node) Type() int { + return int(self.t & _MASK_LAZY & _MASK_RAW) +} + +func (self Node) itype() types.ValueType { + return self.t & _MASK_LAZY & _MASK_RAW +} + +// Exists returns false only if the self is nil or empty node V_NONE +func (self *Node) Exists() bool { + return self != nil && self.t != _V_NONE +} + +// Valid reports if self is NOT V_ERROR or nil +func (self *Node) Valid() bool { + if self == nil { + return false + } + return self.t != V_ERROR +} + +// Check checks if the node itself is valid, and return: +// - ErrNotFound If the node is nil +// - Its underlying error If the node is V_ERROR +func (self *Node) Check() error { + if self == nil { + return ErrNotExist + } else if self.t != V_ERROR { + return nil + } else { + return self + } +} + +// Error returns error message if the node is invalid +func (self Node) Error() string { + if self.t != V_ERROR { + return "" + } else { + return *(*string)(self.p) + } +} + +// IsRaw returns true if node's underlying value is raw json +func (self Node) IsRaw() bool { + return self.t&_V_RAW != 0 +} + +func (self *Node) isLazy() bool { + return self != nil && self.t&_V_LAZY != 0 +} + +func (self *Node) isAny() bool { + return self != nil && self.t == _V_ANY +} + +/** Simple Value Methods **/ + +// Raw returns json representation of the node, +func (self *Node) Raw() (string, error) { + if !self.IsRaw() { + buf, err := self.MarshalJSON() + return rt.Mem2Str(buf), err + } + return rt.StrFrom(self.p, self.v), nil +} + +func (self *Node) checkRaw() error { + if err := self.Check(); err != nil { + return err + } + if self.IsRaw() { + self.parseRaw(false) + } + return nil +} + +// Bool returns bool value represented by this node, +// including types.V_TRUE|V_FALSE|V_NUMBER|V_STRING|V_ANY|V_NULL, +// V_NONE will return error +func (self *Node) Bool() (bool, error) { + if err := self.checkRaw(); err != nil { + return false, err + } + switch self.t { + case types.V_TRUE : return true , nil + case types.V_FALSE : return false, nil + case types.V_NULL : return false, nil + case _V_NUMBER : + if i, err := numberToInt64(self); err == nil { + return i != 0, nil + } else if f, err := numberToFloat64(self); err == nil { + return f != 0, nil + } else { + return false, err + } + case types.V_STRING: return strconv.ParseBool(rt.StrFrom(self.p, self.v)) + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case bool : return v, nil + case int : return v != 0, nil + case int8 : return v != 0, nil + case int16 : return v != 0, nil + case int32 : return v != 0, nil + case int64 : return v != 0, nil + case uint : return v != 0, nil + case uint8 : return v != 0, nil + case uint16 : return v != 0, nil + case uint32 : return v != 0, nil + case uint64 : return v != 0, nil + case float32: return v != 0, nil + case float64: return v != 0, nil + case string : return strconv.ParseBool(v) + case json.Number: + if i, err := v.Int64(); err == nil { + return i != 0, nil + } else if f, err := v.Float64(); err == nil { + return f != 0, nil + } else { + return false, err + } + default: return false, ErrUnsupportType + } + default : return false, ErrUnsupportType + } +} + +// Int64 casts the node to int64 value, +// including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING +// V_NONE it will return error +func (self *Node) Int64() (int64, error) { + if err := self.checkRaw(); err != nil { + return 0, err + } + switch self.t { + case _V_NUMBER, types.V_STRING : + if i, err := numberToInt64(self); err == nil { + return i, nil + } else if f, err := numberToFloat64(self); err == nil { + return int64(f), nil + } else { + return 0, err + } + case types.V_TRUE : return 1, nil + case types.V_FALSE : return 0, nil + case types.V_NULL : return 0, nil + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case bool : if v { return 1, nil } else { return 0, nil } + case int : return int64(v), nil + case int8 : return int64(v), nil + case int16 : return int64(v), nil + case int32 : return int64(v), nil + case int64 : return int64(v), nil + case uint : return int64(v), nil + case uint8 : return int64(v), nil + case uint16 : return int64(v), nil + case uint32 : return int64(v), nil + case uint64 : return int64(v), nil + case float32: return int64(v), nil + case float64: return int64(v), nil + case string : + if i, err := strconv.ParseInt(v, 10, 64); err == nil { + return i, nil + } else if f, err := strconv.ParseFloat(v, 64); err == nil { + return int64(f), nil + } else { + return 0, err + } + case json.Number: + if i, err := v.Int64(); err == nil { + return i, nil + } else if f, err := v.Float64(); err == nil { + return int64(f), nil + } else { + return 0, err + } + default: return 0, ErrUnsupportType + } + default : return 0, ErrUnsupportType + } +} + +// StrictInt64 exports underlying int64 value, including V_NUMBER, V_ANY +func (self *Node) StrictInt64() (int64, error) { + if err := self.checkRaw(); err != nil { + return 0, err + } + switch self.t { + case _V_NUMBER : return numberToInt64(self) + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case int : return int64(v), nil + case int8 : return int64(v), nil + case int16 : return int64(v), nil + case int32 : return int64(v), nil + case int64 : return int64(v), nil + case uint : return int64(v), nil + case uint8 : return int64(v), nil + case uint16: return int64(v), nil + case uint32: return int64(v), nil + case uint64: return int64(v), nil + case json.Number: + if i, err := v.Int64(); err == nil { + return i, nil + } else { + return 0, err + } + default: return 0, ErrUnsupportType + } + default : return 0, ErrUnsupportType + } +} + +func castNumber(v bool) json.Number { + if v { + return json.Number("1") + } else { + return json.Number("0") + } +} + +// Number casts node to float64, +// including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, +// V_NONE it will return error +func (self *Node) Number() (json.Number, error) { + if err := self.checkRaw(); err != nil { + return json.Number(""), err + } + switch self.t { + case _V_NUMBER : return toNumber(self) , nil + case types.V_STRING : + if _, err := numberToInt64(self); err == nil { + return toNumber(self), nil + } else if _, err := numberToFloat64(self); err == nil { + return toNumber(self), nil + } else { + return json.Number(""), err + } + case types.V_TRUE : return json.Number("1"), nil + case types.V_FALSE : return json.Number("0"), nil + case types.V_NULL : return json.Number("0"), nil + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case bool : return castNumber(v), nil + case int : return castNumber(v != 0), nil + case int8 : return castNumber(v != 0), nil + case int16 : return castNumber(v != 0), nil + case int32 : return castNumber(v != 0), nil + case int64 : return castNumber(v != 0), nil + case uint : return castNumber(v != 0), nil + case uint8 : return castNumber(v != 0), nil + case uint16 : return castNumber(v != 0), nil + case uint32 : return castNumber(v != 0), nil + case uint64 : return castNumber(v != 0), nil + case float32: return castNumber(v != 0), nil + case float64: return castNumber(v != 0), nil + case string : + if _, err := strconv.ParseFloat(v, 64); err == nil { + return json.Number(v), nil + } else { + return json.Number(""), err + } + case json.Number: return v, nil + default: return json.Number(""), ErrUnsupportType + } + default : return json.Number(""), ErrUnsupportType + } +} + +// Number exports underlying float64 value, including V_NUMBER, V_ANY of json.Number +func (self *Node) StrictNumber() (json.Number, error) { + if err := self.checkRaw(); err != nil { + return json.Number(""), err + } + switch self.t { + case _V_NUMBER : return toNumber(self) , nil + case _V_ANY : + if v, ok := self.packAny().(json.Number); ok { + return v, nil + } else { + return json.Number(""), ErrUnsupportType + } + default : return json.Number(""), ErrUnsupportType + } +} + +// String cast node to string, +// including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, +// V_NONE it will return error +func (self *Node) String() (string, error) { + if err := self.checkRaw(); err != nil { + return "", err + } + switch self.t { + case types.V_NULL : return "" , nil + case types.V_TRUE : return "true" , nil + case types.V_FALSE : return "false", nil + case types.V_STRING, _V_NUMBER : return rt.StrFrom(self.p, self.v), nil + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case bool : return strconv.FormatBool(v), nil + case int : return strconv.Itoa(v), nil + case int8 : return strconv.Itoa(int(v)), nil + case int16 : return strconv.Itoa(int(v)), nil + case int32 : return strconv.Itoa(int(v)), nil + case int64 : return strconv.Itoa(int(v)), nil + case uint : return strconv.Itoa(int(v)), nil + case uint8 : return strconv.Itoa(int(v)), nil + case uint16 : return strconv.Itoa(int(v)), nil + case uint32 : return strconv.Itoa(int(v)), nil + case uint64 : return strconv.Itoa(int(v)), nil + case float32: return strconv.FormatFloat(float64(v), 'g', -1, 64), nil + case float64: return strconv.FormatFloat(float64(v), 'g', -1, 64), nil + case string : return v, nil + case json.Number: return v.String(), nil + default: return "", ErrUnsupportType + } + default : return "" , ErrUnsupportType + } +} + +// StrictString returns string value (unescaped), includeing V_STRING, V_ANY of string. +// In other cases, it will return empty string. +func (self *Node) StrictString() (string, error) { + if err := self.checkRaw(); err != nil { + return "", err + } + switch self.t { + case types.V_STRING : return rt.StrFrom(self.p, self.v), nil + case _V_ANY : + if v, ok := self.packAny().(string); ok { + return v, nil + } else { + return "", ErrUnsupportType + } + default : return "", ErrUnsupportType + } +} + +// Float64 cast node to float64, +// including V_NUMBER|V_TRUE|V_FALSE|V_ANY|V_STRING|V_NULL, +// V_NONE it will return error +func (self *Node) Float64() (float64, error) { + if err := self.checkRaw(); err != nil { + return 0.0, err + } + switch self.t { + case _V_NUMBER, types.V_STRING : return numberToFloat64(self) + case types.V_TRUE : return 1.0, nil + case types.V_FALSE : return 0.0, nil + case types.V_NULL : return 0.0, nil + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case bool : + if v { + return 1.0, nil + } else { + return 0.0, nil + } + case int : return float64(v), nil + case int8 : return float64(v), nil + case int16 : return float64(v), nil + case int32 : return float64(v), nil + case int64 : return float64(v), nil + case uint : return float64(v), nil + case uint8 : return float64(v), nil + case uint16 : return float64(v), nil + case uint32 : return float64(v), nil + case uint64 : return float64(v), nil + case float32: return float64(v), nil + case float64: return float64(v), nil + case string : + if f, err := strconv.ParseFloat(v, 64); err == nil { + return float64(f), nil + } else { + return 0, err + } + case json.Number: + if f, err := v.Float64(); err == nil { + return float64(f), nil + } else { + return 0, err + } + default : return 0, ErrUnsupportType + } + default : return 0.0, ErrUnsupportType + } +} + +// Float64 exports underlying float64 value, includeing V_NUMBER, V_ANY +func (self *Node) StrictFloat64() (float64, error) { + if err := self.checkRaw(); err != nil { + return 0.0, err + } + switch self.t { + case _V_NUMBER : return numberToFloat64(self) + case _V_ANY : + any := self.packAny() + switch v := any.(type) { + case float32 : return float64(v), nil + case float64 : return float64(v), nil + default : return 0, ErrUnsupportType + } + default : return 0.0, ErrUnsupportType + } +} + +/** Sequencial Value Methods **/ + +// Len returns children count of a array|object|string node +// For partially loaded node, it also works but only counts the parsed children +func (self *Node) Len() (int, error) { + if err := self.checkRaw(); err != nil { + return 0, err + } + if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { + return int(self.v & _LEN_MASK), nil + } else if self.t == types.V_STRING { + return int(self.v), nil + } else if self.t == _V_NONE || self.t == types.V_NULL { + return 0, nil + } else { + return 0, ErrUnsupportType + } +} + +func (self Node) len() int { + return int(self.v & _LEN_MASK) +} + +// Cap returns malloc capacity of a array|object node for children +func (self *Node) Cap() (int, error) { + if err := self.checkRaw(); err != nil { + return 0, err + } + if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { + return int(self.v >> _CAP_BITS), nil + } else if self.t == _V_NONE || self.t == types.V_NULL { + return 0, nil + } else { + return 0, ErrUnsupportType + } +} + +func (self Node) cap() int { + return int(self.v >> _CAP_BITS) +} + +// Set sets the node of given key under self, and reports if the key has existed. +// +// If self is V_NONE or V_NULL, it becomes V_OBJECT and sets the node at the key. +func (self *Node) Set(key string, node Node) (bool, error) { + if self != nil && (self.t == _V_NONE || self.t == types.V_NULL) { + *self = NewObject([]Pair{{key, node}}) + return false, nil + } + + if err := node.Check(); err != nil { + return false, err + } + + p := self.Get(key) + if !p.Exists() { + l := self.len() + c := self.cap() + if l == c { + // TODO: maybe change append size in future + c += _DEFAULT_NODE_CAP + mem := unsafe_NewArray(_PAIR_TYPE, c) + memmove(mem, self.p, _PAIR_SIZE * uintptr(l)) + self.p = mem + } + v := self.pairAt(l) + v.Key = key + v.Value = node + self.setCapAndLen(c, l+1) + return false, nil + + } else if err := p.Check(); err != nil { + return false, err + } + + *p = node + return true, nil +} + +// SetAny wraps val with V_ANY node, and Set() the node. +func (self *Node) SetAny(key string, val interface{}) (bool, error) { + return self.Set(key, NewAny(val)) +} + +// Unset remove the node of given key under object parent, and reports if the key has existed. +func (self *Node) Unset(key string) (bool, error) { + self.must(types.V_OBJECT, "an object") + p, i := self.skipKey(key) + if !p.Exists() { + return false, nil + } else if err := p.Check(); err != nil { + return false, err + } + + self.removePair(i) + return true, nil +} + +// SetByIndex sets the node of given index, and reports if the key has existed. +// +// The index must be within self's children. +func (self *Node) SetByIndex(index int, node Node) (bool, error) { + if err := node.Check(); err != nil { + return false, err + } + + p := self.Index(index) + if !p.Exists() { + return false, ErrNotExist + } else if err := p.Check(); err != nil { + return false, err + } + + *p = node + return true, nil +} + +// SetAny wraps val with V_ANY node, and SetByIndex() the node. +func (self *Node) SetAnyByIndex(index int, val interface{}) (bool, error) { + return self.SetByIndex(index, NewAny(val)) +} + +// UnsetByIndex remove the node of given index +func (self *Node) UnsetByIndex(index int) (bool, error) { + var p *Node + it := self.itype() + if it == types.V_ARRAY { + p = self.Index(index) + }else if it == types.V_OBJECT { + pr := self.skipIndexPair(index) + if pr == nil { + return false, ErrNotExist + } + p = &pr.Value + } else { + return false, ErrUnsupportType + } + + if !p.Exists() { + return false, ErrNotExist + } + + if it == types.V_ARRAY { + self.removeNode(index) + }else if it == types.V_OBJECT { + self.removePair(index) + } + return true, nil +} + +// Add appends the given node under self. +// +// If self is V_NONE or V_NULL, it becomes V_ARRAY and sets the node at index 0. +func (self *Node) Add(node Node) error { + if self != nil && (self.t == _V_NONE || self.t == types.V_NULL) { + *self = NewArray([]Node{node}) + return nil + } + + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return err + } + if err := self.skipAllIndex(); err != nil { + return err + } + + var p rt.GoSlice + p.Cap = self.cap() + p.Len = self.len() + p.Ptr = self.p + + s := *(*[]Node)(unsafe.Pointer(&p)) + s = append(s, node) + + self.p = unsafe.Pointer(&s[0]) + self.setCapAndLen(cap(s), len(s)) + return nil +} + +// SetAny wraps val with V_ANY node, and Add() the node. +func (self *Node) AddAny(val interface{}) error { + return self.Add(NewAny(val)) +} + +// GetByPath load given path on demands, +// which only ensure nodes before this path got parsed. +// +// Note, the api expects the json is well-formed at least, +// otherwise it may return unexpected result. +func (self *Node) GetByPath(path ...interface{}) *Node { + if !self.Valid() { + return self + } + var s = self + for _, p := range path { + switch p := p.(type) { + case int: + s = s.Index(p) + if !s.Valid() { + return s + } + case string: + s = s.Get(p) + if !s.Valid() { + return s + } + default: + panic("path must be either int or string") + } + } + return s +} + +// Get loads given key of an object node on demands +func (self *Node) Get(key string) *Node { + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return unwrapError(err) + } + n, _ := self.skipKey(key) + return n +} + +// Index indexies node at given idx, +// node type CAN be either V_OBJECT or V_ARRAY +func (self *Node) Index(idx int) *Node { + if err := self.checkRaw(); err != nil { + return unwrapError(err) + } + + it := self.itype() + if it == types.V_ARRAY { + return self.skipIndex(idx) + + }else if it == types.V_OBJECT { + pr := self.skipIndexPair(idx) + if pr == nil { + return newError(_ERR_NOT_FOUND, "value not exists") + } + return &pr.Value + + } else { + return newError(_ERR_UNSUPPORT_TYPE, fmt.Sprintf("unsupported type: %v", self.itype())) + } +} + +// IndexPair indexies pair at given idx, +// node type MUST be either V_OBJECT +func (self *Node) IndexPair(idx int) *Pair { + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil + } + return self.skipIndexPair(idx) +} + +// IndexOrGet firstly use idx to index a value and check if its key matches +// If not, then use the key to search value +func (self *Node) IndexOrGet(idx int, key string) *Node { + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return unwrapError(err) + } + + pr := self.skipIndexPair(idx) + if pr != nil && pr.Key == key { + return &pr.Value + } + n, _ := self.skipKey(key) + return n +} + +/** Generic Value Converters **/ + +// Map loads all keys of an object node +func (self *Node) Map() (map[string]interface{}, error) { + if self.isAny() { + any := self.packAny() + if v, ok := any.(map[string]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObject() +} + +// MapUseNumber loads all keys of an object node, with numeric nodes casted to json.Number +func (self *Node) MapUseNumber() (map[string]interface{}, error) { + if self.isAny() { + any := self.packAny() + if v, ok := any.(map[string]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNumber() +} + +// MapUseNode scans both parsed and non-parsed chidren nodes, +// and map them by their keys +func (self *Node) MapUseNode() (map[string]Node, error) { + if self.isAny() { + any := self.packAny() + if v, ok := any.(map[string]Node); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.skipAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNode() +} + +// MapUnsafe exports the underlying pointer to its children map +// WARN: don't use it unless you know what you are doing +func (self *Node) UnsafeMap() ([]Pair, error) { + if err := self.should(types.V_OBJECT, "an object"); err != nil { + return nil, err + } + if err := self.skipAllKey(); err != nil { + return nil, err + } + s := rt.Ptr2SlicePtr(self.p, int(self.len()), self.cap()) + return *(*[]Pair)(s), nil +} + +// SortKeys sorts children of a V_OBJECT node in ascending key-order. +// If recurse is true, it recursively sorts children's children as long as a V_OBJECT node is found. +func (self *Node) SortKeys(recurse bool) (err error) { + ps, err := self.UnsafeMap() + if err != nil { + return err + } + PairSlice(ps).Sort() + if recurse { + var sc Scanner + sc = func(path Sequence, node *Node) bool { + if node.itype() == types.V_OBJECT { + if err := node.SortKeys(recurse); err != nil { + return false + } + } + if node.itype() == types.V_ARRAY { + if err := node.ForEach(sc); err != nil { + return false + } + } + return true + } + self.ForEach(sc) + } + return nil +} + +// Array loads all indexes of an array node +func (self *Node) Array() ([]interface{}, error) { + if self.isAny() { + any := self.packAny() + if v, ok := any.([]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArray() +} + +// ArrayUseNumber loads all indexes of an array node, with numeric nodes casted to json.Number +func (self *Node) ArrayUseNumber() ([]interface{}, error) { + if self.isAny() { + any := self.packAny() + if v, ok := any.([]interface{}); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNumber() +} + +// ArrayUseNode copys both parsed and non-parsed chidren nodes, +// and indexes them by original order +func (self *Node) ArrayUseNode() ([]Node, error) { + if self.isAny() { + any := self.packAny() + if v, ok := any.([]Node); ok { + return v, nil + } else { + return nil, ErrUnsupportType + } + } + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.skipAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNode() +} + +// ArrayUnsafe exports the underlying pointer to its children array +// WARN: don't use it unless you know what you are doing +func (self *Node) UnsafeArray() ([]Node, error) { + if err := self.should(types.V_ARRAY, "an array"); err != nil { + return nil, err + } + if err := self.skipAllIndex(); err != nil { + return nil, err + } + s := rt.Ptr2SlicePtr(self.p, self.len(), self.cap()) + return *(*[]Node)(s), nil +} + +// Interface loads all children under all pathes from this node, +// and converts itself as generic type. +// WARN: all numberic nodes are casted to float64 +func (self *Node) Interface() (interface{}, error) { + if err := self.checkRaw(); err != nil { + return nil, err + } + switch self.t { + case V_ERROR : return nil, self.Check() + case types.V_NULL : return nil, nil + case types.V_TRUE : return true, nil + case types.V_FALSE : return false, nil + case types.V_ARRAY : return self.toGenericArray() + case types.V_OBJECT : return self.toGenericObject() + case types.V_STRING : return rt.StrFrom(self.p, self.v), nil + case _V_NUMBER : + v, err := numberToFloat64(self) + if err != nil { + return nil, err + } + return v, nil + case _V_ARRAY_LAZY : + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArray() + case _V_OBJECT_LAZY : + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObject() + case _V_ANY: + switch v := self.packAny().(type) { + case Node : return v.Interface() + case *Node: return v.Interface() + default : return v, nil + } + default : return nil, ErrUnsupportType + } +} + +func (self *Node) packAny() interface{} { + return *(*interface{})(self.p) +} + +// InterfaceUseNumber works same with Interface() +// except numberic nodes are casted to json.Number +func (self *Node) InterfaceUseNumber() (interface{}, error) { + if err := self.checkRaw(); err != nil { + return nil, err + } + switch self.t { + case V_ERROR : return nil, self.Check() + case types.V_NULL : return nil, nil + case types.V_TRUE : return true, nil + case types.V_FALSE : return false, nil + case types.V_ARRAY : return self.toGenericArrayUseNumber() + case types.V_OBJECT : return self.toGenericObjectUseNumber() + case types.V_STRING : return rt.StrFrom(self.p, self.v), nil + case _V_NUMBER : return toNumber(self), nil + case _V_ARRAY_LAZY : + if err := self.loadAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNumber() + case _V_OBJECT_LAZY : + if err := self.loadAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNumber() + case _V_ANY : return self.packAny(), nil + default : return nil, ErrUnsupportType + } +} + +// InterfaceUseNode clone itself as a new node, +// or its children as map[string]Node (or []Node) +func (self *Node) InterfaceUseNode() (interface{}, error) { + if err := self.checkRaw(); err != nil { + return nil, err + } + switch self.t { + case types.V_ARRAY : return self.toGenericArrayUseNode() + case types.V_OBJECT : return self.toGenericObjectUseNode() + case _V_ARRAY_LAZY : + if err := self.skipAllIndex(); err != nil { + return nil, err + } + return self.toGenericArrayUseNode() + case _V_OBJECT_LAZY : + if err := self.skipAllKey(); err != nil { + return nil, err + } + return self.toGenericObjectUseNode() + default : return *self, self.Check() + } +} + +// LoadAll loads all the node's children and children's children as parsed. +// After calling it, the node can be safely used on concurrency +func (self *Node) LoadAll() error { + if self.IsRaw() { + self.parseRaw(true) + return self.Check() + } + + switch self.itype() { + case types.V_ARRAY: + e := self.len() + if err := self.loadAllIndex(); err != nil { + return err + } + for i := 0; i < e; i++ { + n := self.nodeAt(i) + if n.IsRaw() { + n.parseRaw(true) + } + if err := n.Check(); err != nil { + return err + } + } + return nil + case types.V_OBJECT: + e := self.len() + if err := self.loadAllKey(); err != nil { + return err + } + for i := 0; i < e; i++ { + n := self.pairAt(i) + if n.Value.IsRaw() { + n.Value.parseRaw(true) + } + if err := n.Value.Check(); err != nil { + return err + } + } + return nil + default: + return self.Check() + } +} + +// Load loads the node's children as parsed. +// After calling it, only the node itself can be used on concurrency (not include its children) +func (self *Node) Load() error { + if self.IsRaw() { + self.parseRaw(false) + return self.Load() + } + + switch self.t { + case _V_ARRAY_LAZY: + return self.skipAllIndex() + case _V_OBJECT_LAZY: + return self.skipAllKey() + default: + return self.Check() + } +} + +/**---------------------------------- Internal Helper Methods ----------------------------------**/ + +var ( + _NODE_TYPE = rt.UnpackEface(Node{}).Type + _PAIR_TYPE = rt.UnpackEface(Pair{}).Type +) + +func (self *Node) setCapAndLen(cap int, len int) { + if self.t == types.V_ARRAY || self.t == types.V_OBJECT || self.t == _V_ARRAY_LAZY || self.t == _V_OBJECT_LAZY { + self.v = int64(len&_LEN_MASK | cap<<_CAP_BITS) + } else { + panic("value does not have a length") + } +} + +func (self *Node) unsafe_next() *Node { + return (*Node)(unsafe.Pointer(uintptr(unsafe.Pointer(self)) + _NODE_SIZE)) +} + +func (self *Pair) unsafe_next() *Pair { + return (*Pair)(unsafe.Pointer(uintptr(unsafe.Pointer(self)) + _PAIR_SIZE)) +} + +func (self *Node) must(t types.ValueType, s string) { + if err := self.checkRaw(); err != nil { + panic(err) + } + if err := self.Check(); err != nil { + panic(err) + } + if self.itype() != t { + panic("value cannot be represented as " + s) + } +} + +func (self *Node) should(t types.ValueType, s string) error { + if err := self.checkRaw(); err != nil { + return err + } + if self.itype() != t { + return ErrUnsupportType + } + return nil +} + +func (self *Node) nodeAt(i int) *Node { + var p = self.p + if self.isLazy() { + _, stack := self.getParserAndArrayStack() + p = *(*unsafe.Pointer)(unsafe.Pointer(&stack.v)) + } + return (*Node)(unsafe.Pointer(uintptr(p) + uintptr(i)*_NODE_SIZE)) +} + +func (self *Node) pairAt(i int) *Pair { + var p = self.p + if self.isLazy() { + _, stack := self.getParserAndObjectStack() + p = *(*unsafe.Pointer)(unsafe.Pointer(&stack.v)) + } + return (*Pair)(unsafe.Pointer(uintptr(p) + uintptr(i)*_PAIR_SIZE)) +} + +func (self *Node) getParserAndArrayStack() (*Parser, *parseArrayStack) { + stack := (*parseArrayStack)(self.p) + ret := (*rt.GoSlice)(unsafe.Pointer(&stack.v)) + ret.Len = self.len() + ret.Cap = self.cap() + return &stack.parser, stack +} + +func (self *Node) getParserAndObjectStack() (*Parser, *parseObjectStack) { + stack := (*parseObjectStack)(self.p) + ret := (*rt.GoSlice)(unsafe.Pointer(&stack.v)) + ret.Len = self.len() + ret.Cap = self.cap() + return &stack.parser, stack +} + +func (self *Node) skipAllIndex() error { + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndArrayStack() + parser.skipValue = true + parser.noLazy = true + *self, err = parser.decodeArray(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil +} + +func (self *Node) skipAllKey() error { + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndObjectStack() + parser.skipValue = true + parser.noLazy = true + *self, err = parser.decodeObject(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil +} + +func (self *Node) skipKey(key string) (*Node, int) { + nb := self.len() + lazy := self.isLazy() + + if nb > 0 { + /* linear search */ + var p *Pair + if lazy { + s := (*parseObjectStack)(self.p) + p = &s.v[0] + } else { + p = (*Pair)(self.p) + } + + if p.Key == key { + return &p.Value, 0 + } + for i := 1; i < nb; i++ { + p = p.unsafe_next() + if p.Key == key { + return &p.Value, i + } + } + } + + /* not found */ + if !lazy { + return nil, -1 + } + + // lazy load + for last, i := self.skipNextPair(), nb; last != nil; last, i = self.skipNextPair(), i+1 { + if last.Value.Check() != nil { + return &last.Value, -1 + } + if last.Key == key { + return &last.Value, i + } + } + + return nil, -1 +} + +func (self *Node) skipIndex(index int) *Node { + nb := self.len() + if nb > index { + v := self.nodeAt(index) + return v + } + if !self.isLazy() { + return nil + } + + // lazy load + for last := self.skipNextNode(); last != nil; last = self.skipNextNode(){ + if last.Check() != nil { + return last + } + if self.len() > index { + return last + } + } + + return nil +} + +func (self *Node) skipIndexPair(index int) *Pair { + nb := self.len() + if nb > index { + return self.pairAt(index) + } + if !self.isLazy() { + return nil + } + + // lazy load + for last := self.skipNextPair(); last != nil; last = self.skipNextPair(){ + if last.Value.Check() != nil { + return last + } + if self.len() > index { + return last + } + } + + return nil +} + +func (self *Node) loadAllIndex() error { + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndArrayStack() + parser.noLazy = true + *self, err = parser.decodeArray(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil +} + +func (self *Node) loadAllKey() error { + if !self.isLazy() { + return nil + } + var err types.ParsingError + parser, stack := self.getParserAndObjectStack() + parser.noLazy = true + *self, err = parser.decodeObject(stack.v) + if err != 0 { + return parser.ExportError(err) + } + return nil +} + +func (self *Node) removeNode(i int) { + nb := self.len() - 1 + node := self.nodeAt(i) + if i == nb { + self.setCapAndLen(self.cap(), nb) + *node = Node{} + return + } + + from := self.nodeAt(i + 1) + memmove(unsafe.Pointer(node), unsafe.Pointer(from), _NODE_SIZE * uintptr(nb - i)) + + last := self.nodeAt(nb) + *last = Node{} + + self.setCapAndLen(self.cap(), nb) +} + +func (self *Node) removePair(i int) { + nb := self.len() - 1 + node := self.pairAt(i) + if i == nb { + self.setCapAndLen(self.cap(), nb) + *node = Pair{} + return + } + + from := self.pairAt(i + 1) + memmove(unsafe.Pointer(node), unsafe.Pointer(from), _PAIR_SIZE * uintptr(nb - i)) + + last := self.pairAt(nb) + *last = Pair{} + + self.setCapAndLen(self.cap(), nb) +} + +func (self *Node) toGenericArray() ([]interface{}, error) { + nb := self.len() + ret := make([]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Node)(self.p) + x, err := p.Interface() + if err != nil { + return nil, err + } + ret[0] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.Interface() + if err != nil { + return nil, err + } + ret[i] = x + } + + /* all done */ + return ret, nil +} + +func (self *Node) toGenericArrayUseNumber() ([]interface{}, error) { + nb := self.len() + ret := make([]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Node)(self.p) + x, err := p.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[0] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[i] = x + } + + /* all done */ + return ret, nil +} + +func (self *Node) toGenericArrayUseNode() ([]Node, error) { + var nb = self.len() + var out = make([]Node, nb) + if nb == 0 { + return out, nil + } + + var p = (*Node)(self.p) + out[0] = *p + if err := p.Check(); err != nil { + return nil, err + } + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + if err := p.Check(); err != nil { + return nil, err + } + out[i] = *p + } + + return out, nil +} + +func (self *Node) toGenericObject() (map[string]interface{}, error) { + nb := self.len() + ret := make(map[string]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Pair)(self.p) + x, err := p.Value.Interface() + if err != nil { + return nil, err + } + ret[p.Key] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.Value.Interface() + if err != nil { + return nil, err + } + ret[p.Key] = x + } + + /* all done */ + return ret, nil +} + + +func (self *Node) toGenericObjectUseNumber() (map[string]interface{}, error) { + nb := self.len() + ret := make(map[string]interface{}, nb) + if nb == 0 { + return ret, nil + } + + /* convert each item */ + var p = (*Pair)(self.p) + x, err := p.Value.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[p.Key] = x + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + x, err := p.Value.InterfaceUseNumber() + if err != nil { + return nil, err + } + ret[p.Key] = x + } + + /* all done */ + return ret, nil +} + +func (self *Node) toGenericObjectUseNode() (map[string]Node, error) { + var nb = self.len() + var out = make(map[string]Node, nb) + if nb == 0 { + return out, nil + } + + var p = (*Pair)(self.p) + out[p.Key] = p.Value + if err := p.Value.Check(); err != nil { + return nil, err + } + + for i := 1; i < nb; i++ { + p = p.unsafe_next() + if err := p.Value.Check(); err != nil { + return nil, err + } + out[p.Key] = p.Value + } + + /* all done */ + return out, nil +} + +/**------------------------------------ Factory Methods ------------------------------------**/ + +var ( + nullNode = Node{t: types.V_NULL} + trueNode = Node{t: types.V_TRUE} + falseNode = Node{t: types.V_FALSE} + + emptyArrayNode = Node{t: types.V_ARRAY} + emptyObjectNode = Node{t: types.V_OBJECT} +) + +// NewRaw creates a node of raw json, and decides its type by first char. +func NewRaw(json string) Node { + if json == "" { + panic("empty json string") + } + it := switchRawType(json[0]) + return newRawNode(json, it) +} + +// NewAny creates a node of type V_ANY if any's type isn't Node or *Node, +// which stores interface{} and can be only used for `.Interface()`\`.MarshalJSON()`. +func NewAny(any interface{}) Node { + switch n := any.(type) { + case Node: + return n + case *Node: + return *n + default: + return Node{ + t: _V_ANY, + v: 0, + p: unsafe.Pointer(&any), + } + } +} + +// NewBytes encodes given src with Base64 (RFC 4648), and creates a node of type V_STRING. +func NewBytes(src []byte) Node { + if len(src) == 0 { + panic("empty src bytes") + } + out := encodeBase64(src) + return NewString(out) +} + +// NewNull creates a node of type V_NULL +func NewNull() Node { + return Node{ + v: 0, + p: nil, + t: types.V_NULL, + } +} + +// NewBool creates a node of type bool: +// If v is true, returns V_TRUE node +// If v is false, returns V_FALSE node +func NewBool(v bool) Node { + var t = types.V_FALSE + if v { + t = types.V_TRUE + } + return Node{ + v: 0, + p: nil, + t: t, + } +} + +// NewNumber creates a json.Number node +// v must be a decimal string complying with RFC8259 +func NewNumber(v string) Node { + return Node{ + v: int64(len(v) & _LEN_MASK), + p: rt.StrPtr(v), + t: _V_NUMBER, + } +} + +func toNumber(node *Node) json.Number { + return json.Number(rt.StrFrom(node.p, node.v)) +} + +func numberToFloat64(node *Node) (float64, error) { + ret,err := toNumber(node).Float64() + if err != nil { + return 0, err + } + return ret, nil +} + +func numberToInt64(node *Node) (int64, error) { + ret,err := toNumber(node).Int64() + if err != nil { + return 0, err + } + return ret, nil +} + +func newBytes(v []byte) Node { + return Node{ + t: types.V_STRING, + p: mem2ptr(v), + v: int64(len(v) & _LEN_MASK), + } +} + +// NewString creates a node of type V_STRING. +// v is considered to be a valid UTF-8 string, +// which means it won't be validated and unescaped. +// when the node is encoded to json, v will be escaped. +func NewString(v string) Node { + return Node{ + t: types.V_STRING, + p: rt.StrPtr(v), + v: int64(len(v) & _LEN_MASK), + } +} + +// NewArray creates a node of type V_ARRAY, +// using v as its underlying children +func NewArray(v []Node) Node { + return Node{ + t: types.V_ARRAY, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: *(*unsafe.Pointer)(unsafe.Pointer(&v)), + } +} + +func (self *Node) setArray(v []Node) { + self.t = types.V_ARRAY + self.setCapAndLen(cap(v), len(v)) + self.p = *(*unsafe.Pointer)(unsafe.Pointer(&v)) +} + +// NewObject creates a node of type V_OBJECT, +// using v as its underlying children +func NewObject(v []Pair) Node { + return Node{ + t: types.V_OBJECT, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: *(*unsafe.Pointer)(unsafe.Pointer(&v)), + } +} + +func (self *Node) setObject(v []Pair) { + self.t = types.V_OBJECT + self.setCapAndLen(cap(v), len(v)) + self.p = *(*unsafe.Pointer)(unsafe.Pointer(&v)) +} + +type parseObjectStack struct { + parser Parser + v []Pair +} + +type parseArrayStack struct { + parser Parser + v []Node +} + +func newLazyArray(p *Parser, v []Node) Node { + s := new(parseArrayStack) + s.parser = *p + s.v = v + return Node{ + t: _V_ARRAY_LAZY, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: unsafe.Pointer(s), + } +} + +func (self *Node) setLazyArray(p *Parser, v []Node) { + s := new(parseArrayStack) + s.parser = *p + s.v = v + self.t = _V_ARRAY_LAZY + self.setCapAndLen(cap(v), len(v)) + self.p = (unsafe.Pointer)(s) +} + +func newLazyObject(p *Parser, v []Pair) Node { + s := new(parseObjectStack) + s.parser = *p + s.v = v + return Node{ + t: _V_OBJECT_LAZY, + v: int64(len(v)&_LEN_MASK | cap(v)<<_CAP_BITS), + p: unsafe.Pointer(s), + } +} + +func (self *Node) setLazyObject(p *Parser, v []Pair) { + s := new(parseObjectStack) + s.parser = *p + s.v = v + self.t = _V_OBJECT_LAZY + self.setCapAndLen(cap(v), len(v)) + self.p = (unsafe.Pointer)(s) +} + +func newRawNode(str string, typ types.ValueType) Node { + return Node{ + t: _V_RAW | typ, + p: rt.StrPtr(str), + v: int64(len(str) & _LEN_MASK), + } +} + +func (self *Node) parseRaw(full bool) { + raw := rt.StrFrom(self.p, self.v) + parser := NewParser(raw) + if full { + parser.noLazy = true + parser.skipValue = false + } + var e types.ParsingError + *self, e = parser.Parse() + if e != 0 { + *self = *newSyntaxError(parser.syntaxError(e)) + } +} + +func newError(err types.ParsingError, msg string) *Node { + return &Node{ + t: V_ERROR, + v: int64(err), + p: unsafe.Pointer(&msg), + } +} + +var typeJumpTable = [256]types.ValueType{ + '"' : types.V_STRING, + '-' : _V_NUMBER, + '0' : _V_NUMBER, + '1' : _V_NUMBER, + '2' : _V_NUMBER, + '3' : _V_NUMBER, + '4' : _V_NUMBER, + '5' : _V_NUMBER, + '6' : _V_NUMBER, + '7' : _V_NUMBER, + '8' : _V_NUMBER, + '9' : _V_NUMBER, + '[' : types.V_ARRAY, + 'f' : types.V_FALSE, + 'n' : types.V_NULL, + 't' : types.V_TRUE, + '{' : types.V_OBJECT, +} + +func switchRawType(c byte) types.ValueType { + return typeJumpTable[c] +} + +func unwrapError(err error) *Node { + if se, ok := err.(*Node); ok { + return se + }else if sse, ok := err.(Node); ok { + return &sse + } else { + msg := err.Error() + return &Node{ + t: V_ERROR, + v: 0, + p: unsafe.Pointer(&msg), + } + } +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/parser.go b/vendor/github.com/bytedance/sonic/ast/parser.go new file mode 100644 index 0000000..ebb7bb0 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/parser.go @@ -0,0 +1,618 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + `fmt` + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` +) + +const _DEFAULT_NODE_CAP int = 16 + +const ( + _ERR_NOT_FOUND types.ParsingError = 33 + _ERR_UNSUPPORT_TYPE types.ParsingError = 34 +) + +var ( + ErrNotExist error = newError(_ERR_NOT_FOUND, "value not exists") + ErrUnsupportType error = newError(_ERR_UNSUPPORT_TYPE, "unsupported type") +) + +type Parser struct { + p int + s string + noLazy bool + skipValue bool +} + +/** Parser Private Methods **/ + +func (self *Parser) delim() types.ParsingError { + n := len(self.s) + p := self.lspace(self.p) + + /* check for EOF */ + if p >= n { + return types.ERR_EOF + } + + /* check for the delimtier */ + if self.s[p] != ':' { + return types.ERR_INVALID_CHAR + } + + /* update the read pointer */ + self.p = p + 1 + return 0 +} + +func (self *Parser) object() types.ParsingError { + n := len(self.s) + p := self.lspace(self.p) + + /* check for EOF */ + if p >= n { + return types.ERR_EOF + } + + /* check for the delimtier */ + if self.s[p] != '{' { + return types.ERR_INVALID_CHAR + } + + /* update the read pointer */ + self.p = p + 1 + return 0 +} + +func (self *Parser) array() types.ParsingError { + n := len(self.s) + p := self.lspace(self.p) + + /* check for EOF */ + if p >= n { + return types.ERR_EOF + } + + /* check for the delimtier */ + if self.s[p] != '[' { + return types.ERR_INVALID_CHAR + } + + /* update the read pointer */ + self.p = p + 1 + return 0 +} + +func (self *Parser) lspace(sp int) int { + ns := len(self.s) + for ; sp= ns { + return Node{}, types.ERR_EOF + } + + /* check for empty array */ + if self.s[self.p] == ']' { + self.p++ + return emptyArrayNode, 0 + } + + /* allocate array space and parse every element */ + for { + var val Node + var err types.ParsingError + + if self.skipValue { + /* skip the value */ + var start int + if start, err = self.skipFast(); err != 0 { + return Node{}, err + } + if self.p > ns { + return Node{}, types.ERR_EOF + } + t := switchRawType(self.s[start]) + if t == _V_NONE { + return Node{}, types.ERR_INVALID_CHAR + } + val = newRawNode(self.s[start:self.p], t) + }else{ + /* decode the value */ + if val, err = self.Parse(); err != 0 { + return Node{}, err + } + } + + /* add the value to result */ + ret = append(ret, val) + self.p = self.lspace(self.p) + + /* check for EOF */ + if self.p >= ns { + return Node{}, types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',' : self.p++ + case ']' : self.p++; return NewArray(ret), 0 + default: + if val.isLazy() { + return newLazyArray(self, ret), 0 + } + return Node{}, types.ERR_INVALID_CHAR + } + } +} + +func (self *Parser) decodeObject(ret []Pair) (Node, types.ParsingError) { + sp := self.p + ns := len(self.s) + + /* check for EOF */ + if self.p = self.lspace(sp); self.p >= ns { + return Node{}, types.ERR_EOF + } + + /* check for empty object */ + if self.s[self.p] == '}' { + self.p++ + return emptyObjectNode, 0 + } + + /* decode each pair */ + for { + var val Node + var njs types.JsonState + var err types.ParsingError + + /* decode the key */ + if njs = self.decodeValue(); njs.Vt != types.V_STRING { + return Node{}, types.ERR_INVALID_CHAR + } + + /* extract the key */ + idx := self.p - 1 + key := self.s[njs.Iv:idx] + + /* check for escape sequence */ + if njs.Ep != -1 { + if key, err = unquote(key); err != 0 { + return Node{}, err + } + } + + /* expect a ':' delimiter */ + if err = self.delim(); err != 0 { + return Node{}, err + } + + + if self.skipValue { + /* skip the value */ + var start int + if start, err = self.skipFast(); err != 0 { + return Node{}, err + } + if self.p > ns { + return Node{}, types.ERR_EOF + } + t := switchRawType(self.s[start]) + if t == _V_NONE { + return Node{}, types.ERR_INVALID_CHAR + } + val = newRawNode(self.s[start:self.p], t) + } else { + /* decode the value */ + if val, err = self.Parse(); err != 0 { + return Node{}, err + } + } + + /* add the value to result */ + ret = append(ret, Pair{Key: key, Value: val}) + self.p = self.lspace(self.p) + + /* check for EOF */ + if self.p >= ns { + return Node{}, types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',' : self.p++ + case '}' : self.p++; return NewObject(ret), 0 + default: + if val.isLazy() { + return newLazyObject(self, ret), 0 + } + return Node{}, types.ERR_INVALID_CHAR + } + } +} + +func (self *Parser) decodeString(iv int64, ep int) (Node, types.ParsingError) { + p := self.p - 1 + s := self.s[iv:p] + + /* fast path: no escape sequence */ + if ep == -1 { + return NewString(s), 0 + } + + /* unquote the string */ + out, err := unquote(s) + + /* check for errors */ + if err != 0 { + return Node{}, err + } else { + return newBytes(rt.Str2Mem(out)), 0 + } +} + +/** Parser Interface **/ + +func (self *Parser) Pos() int { + return self.p +} + +func (self *Parser) Parse() (Node, types.ParsingError) { + switch val := self.decodeValue(); val.Vt { + case types.V_EOF : return Node{}, types.ERR_EOF + case types.V_NULL : return nullNode, 0 + case types.V_TRUE : return trueNode, 0 + case types.V_FALSE : return falseNode, 0 + case types.V_STRING : return self.decodeString(val.Iv, val.Ep) + case types.V_ARRAY: + if self.noLazy { + return self.decodeArray(make([]Node, 0, _DEFAULT_NODE_CAP)) + } + return newLazyArray(self, make([]Node, 0, _DEFAULT_NODE_CAP)), 0 + case types.V_OBJECT: + if self.noLazy { + return self.decodeObject(make([]Pair, 0, _DEFAULT_NODE_CAP)) + } + return newLazyObject(self, make([]Pair, 0, _DEFAULT_NODE_CAP)), 0 + case types.V_DOUBLE : return NewNumber(self.s[val.Ep:self.p]), 0 + case types.V_INTEGER : return NewNumber(self.s[val.Ep:self.p]), 0 + default : return Node{}, types.ParsingError(-val.Vt) + } +} + +func (self *Parser) searchKey(match string) types.ParsingError { + ns := len(self.s) + if err := self.object(); err != 0 { + return err + } + + /* check for EOF */ + if self.p = self.lspace(self.p); self.p >= ns { + return types.ERR_EOF + } + + /* check for empty object */ + if self.s[self.p] == '}' { + self.p++ + return _ERR_NOT_FOUND + } + + var njs types.JsonState + var err types.ParsingError + /* decode each pair */ + for { + + /* decode the key */ + if njs = self.decodeValue(); njs.Vt != types.V_STRING { + return types.ERR_INVALID_CHAR + } + + /* extract the key */ + idx := self.p - 1 + key := self.s[njs.Iv:idx] + + /* check for escape sequence */ + if njs.Ep != -1 { + if key, err = unquote(key); err != 0 { + return err + } + } + + /* expect a ':' delimiter */ + if err = self.delim(); err != 0 { + return err + } + + /* skip value */ + if key != match { + if _, err = self.skip(); err != 0 { + return err + } + } else { + return 0 + } + + /* check for EOF */ + self.p = self.lspace(self.p) + if self.p >= ns { + return types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',': + self.p++ + case '}': + self.p++ + return _ERR_NOT_FOUND + default: + return types.ERR_INVALID_CHAR + } + } +} + +func (self *Parser) searchIndex(idx int) types.ParsingError { + ns := len(self.s) + if err := self.array(); err != 0 { + return err + } + + /* check for EOF */ + if self.p = self.lspace(self.p); self.p >= ns { + return types.ERR_EOF + } + + /* check for empty array */ + if self.s[self.p] == ']' { + self.p++ + return _ERR_NOT_FOUND + } + + var err types.ParsingError + /* allocate array space and parse every element */ + for i := 0; i < idx; i++ { + + /* decode the value */ + if _, err = self.skip(); err != 0 { + return err + } + + /* check for EOF */ + self.p = self.lspace(self.p) + if self.p >= ns { + return types.ERR_EOF + } + + /* check for the next character */ + switch self.s[self.p] { + case ',': + self.p++ + case ']': + self.p++ + return _ERR_NOT_FOUND + default: + return types.ERR_INVALID_CHAR + } + } + + return 0 +} + +func (self *Node) skipNextNode() *Node { + if !self.isLazy() { + return nil + } + + parser, stack := self.getParserAndArrayStack() + ret := stack.v + sp := parser.p + ns := len(parser.s) + + /* check for EOF */ + if parser.p = parser.lspace(sp); parser.p >= ns { + return newSyntaxError(parser.syntaxError(types.ERR_EOF)) + } + + /* check for empty array */ + if parser.s[parser.p] == ']' { + parser.p++ + self.setArray(ret) + return nil + } + + var val Node + /* skip the value */ + if start, err := parser.skipFast(); err != 0 { + return newSyntaxError(parser.syntaxError(err)) + } else { + t := switchRawType(parser.s[start]) + if t == _V_NONE { + return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) + } + val = newRawNode(parser.s[start:parser.p], t) + } + + /* add the value to result */ + ret = append(ret, val) + parser.p = parser.lspace(parser.p) + + /* check for EOF */ + if parser.p >= ns { + return newSyntaxError(parser.syntaxError(types.ERR_EOF)) + } + + /* check for the next character */ + switch parser.s[parser.p] { + case ',': + parser.p++ + self.setLazyArray(parser, ret) + return &ret[len(ret)-1] + case ']': + parser.p++ + self.setArray(ret) + return &ret[len(ret)-1] + default: + return newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR)) + } +} + +func (self *Node) skipNextPair() (*Pair) { + if !self.isLazy() { + return nil + } + + parser, stack := self.getParserAndObjectStack() + ret := stack.v + sp := parser.p + ns := len(parser.s) + + /* check for EOF */ + if parser.p = parser.lspace(sp); parser.p >= ns { + return &Pair{"", *newSyntaxError(parser.syntaxError(types.ERR_EOF))} + } + + /* check for empty object */ + if parser.s[parser.p] == '}' { + parser.p++ + self.setObject(ret) + return nil + } + + /* decode one pair */ + var val Node + var njs types.JsonState + var err types.ParsingError + + /* decode the key */ + if njs = parser.decodeValue(); njs.Vt != types.V_STRING { + return &Pair{"", *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} + } + + /* extract the key */ + idx := parser.p - 1 + key := parser.s[njs.Iv:idx] + + /* check for escape sequence */ + if njs.Ep != -1 { + if key, err = unquote(key); err != 0 { + return &Pair{key, *newSyntaxError(parser.syntaxError(err))} + } + } + + /* expect a ':' delimiter */ + if err = parser.delim(); err != 0 { + return &Pair{key, *newSyntaxError(parser.syntaxError(err))} + } + + /* skip the value */ + if start, err := parser.skipFast(); err != 0 { + return &Pair{key, *newSyntaxError(parser.syntaxError(err))} + } else { + t := switchRawType(parser.s[start]) + if t == _V_NONE { + return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} + } + val = newRawNode(parser.s[start:parser.p], t) + } + + /* add the value to result */ + ret = append(ret, Pair{Key: key, Value: val}) + parser.p = parser.lspace(parser.p) + + /* check for EOF */ + if parser.p >= ns { + return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_EOF))} + } + + /* check for the next character */ + switch parser.s[parser.p] { + case ',': + parser.p++ + self.setLazyObject(parser, ret) + return &ret[len(ret)-1] + case '}': + parser.p++ + self.setObject(ret) + return &ret[len(ret)-1] + default: + return &Pair{key, *newSyntaxError(parser.syntaxError(types.ERR_INVALID_CHAR))} + } +} + + +/** Parser Factory **/ + +// Loads parse all json into interface{} +func Loads(src string) (int, interface{}, error) { + ps := &Parser{s: src} + np, err := ps.Parse() + + /* check for errors */ + if err != 0 { + return 0, nil, ps.ExportError(err) + } else { + x, err := np.Interface() + if err != nil { + return 0, nil, err + } + return ps.Pos(), x, nil + } +} + +// LoadsUseNumber parse all json into interface{}, with numeric nodes casted to json.Number +func LoadsUseNumber(src string) (int, interface{}, error) { + ps := &Parser{s: src} + np, err := ps.Parse() + + /* check for errors */ + if err != 0 { + return 0, nil, err + } else { + x, err := np.InterfaceUseNumber() + if err != nil { + return 0, nil, err + } + return ps.Pos(), x, nil + } +} + +func NewParser(src string) *Parser { + return &Parser{s: src} +} + +// ExportError converts types.ParsingError to std Error +func (self *Parser) ExportError(err types.ParsingError) error { + if err == _ERR_NOT_FOUND { + return ErrNotExist + } + return fmt.Errorf("%q", SyntaxError{ + Pos : self.p, + Src : self.s, + Code: err, + }.Description()) +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/search.go b/vendor/github.com/bytedance/sonic/ast/search.go new file mode 100644 index 0000000..bb6fcea --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/search.go @@ -0,0 +1,30 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +type Searcher struct { + parser Parser +} + +func NewSearcher(str string) *Searcher { + return &Searcher{ + parser: Parser{ + s: str, + noLazy: false, + }, + } +} diff --git a/vendor/github.com/bytedance/sonic/ast/sort.go b/vendor/github.com/bytedance/sonic/ast/sort.go new file mode 100644 index 0000000..0a9f145 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/sort.go @@ -0,0 +1,206 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +// Algorithm 3-way Radix Quicksort, d means the radix. +// Reference: https://algs4.cs.princeton.edu/51radix/Quick3string.java.html +func radixQsort(kvs PairSlice, d, maxDepth int) { + for len(kvs) > 11 { + // To avoid the worst case of quickSort (time: O(n^2)), use introsort here. + // Reference: https://en.wikipedia.org/wiki/Introsort and + // https://github.com/golang/go/issues/467 + if maxDepth == 0 { + heapSort(kvs, 0, len(kvs)) + return + } + maxDepth-- + + p := pivot(kvs, d) + lt, i, gt := 0, 0, len(kvs) + for i < gt { + c := byteAt(kvs[i].Key, d) + if c < p { + swap(kvs, lt, i) + i++ + lt++ + } else if c > p { + gt-- + swap(kvs, i, gt) + } else { + i++ + } + } + + // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)] + // Native implemention: + // radixQsort(kvs[:lt], d, maxDepth) + // if p > -1 { + // radixQsort(kvs[lt:gt], d+1, maxDepth) + // } + // radixQsort(kvs[gt:], d, maxDepth) + // Optimize as follows: make recursive calls only for the smaller parts. + // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/ + if p == -1 { + if lt > len(kvs) - gt { + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[:lt] + } else { + radixQsort(kvs[:lt], d, maxDepth) + kvs = kvs[gt:] + } + } else { + ml := maxThree(lt, gt-lt, len(kvs)-gt) + if ml == lt { + radixQsort(kvs[lt:gt], d+1, maxDepth) + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[:lt] + } else if ml == gt-lt { + radixQsort(kvs[:lt], d, maxDepth) + radixQsort(kvs[gt:], d, maxDepth) + kvs = kvs[lt:gt] + d += 1 + } else { + radixQsort(kvs[:lt], d, maxDepth) + radixQsort(kvs[lt:gt], d+1, maxDepth) + kvs = kvs[gt:] + } + } + } + insertRadixSort(kvs, d) +} + +func insertRadixSort(kvs PairSlice, d int) { + for i := 1; i < len(kvs); i++ { + for j := i; j > 0 && lessFrom(kvs[j].Key, kvs[j-1].Key, d); j-- { + swap(kvs, j, j-1) + } + } +} + +func pivot(kvs PairSlice, d int) int { + m := len(kvs) >> 1 + if len(kvs) > 40 { + // Tukey's ``Ninther,'' median of three mediankvs of three. + t := len(kvs) / 8 + return medianThree( + medianThree(byteAt(kvs[0].Key, d), byteAt(kvs[t].Key, d), byteAt(kvs[2*t].Key, d)), + medianThree(byteAt(kvs[m].Key, d), byteAt(kvs[m-t].Key, d), byteAt(kvs[m+t].Key, d)), + medianThree(byteAt(kvs[len(kvs)-1].Key, d), + byteAt(kvs[len(kvs)-1-t].Key, d), + byteAt(kvs[len(kvs)-1-2*t].Key, d))) + } + return medianThree(byteAt(kvs[0].Key, d), byteAt(kvs[m].Key, d), byteAt(kvs[len(kvs)-1].Key, d)) +} + +func medianThree(i, j, k int) int { + if i > j { + i, j = j, i + } // i < j + if k < i { + return i + } + if k > j { + return j + } + return k +} + +func maxThree(i, j, k int) int { + max := i + if max < j { + max = j + } + if max < k { + max = k + } + return max +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returnkvs 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// siftDown implements the heap property on kvs[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDown(kvs PairSlice, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && kvs[first+child].Key < kvs[first+child+1].Key { + child++ + } + if kvs[first+root].Key >= kvs[first+child].Key { + return + } + swap(kvs, first+root, first+child) + root = child + } +} + +func heapSort(kvs PairSlice, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with the greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(kvs, i, hi, first) + } + + // Pop elements, the largest first, into end of kvs. + for i := hi - 1; i >= 0; i-- { + swap(kvs, first, first+i) + siftDown(kvs, lo, i, first) + } +} + +// Note that Pair.Key is NOT pointed to Pair.m when map key is integer after swap +func swap(kvs PairSlice, a, b int) { + kvs[a].Key, kvs[b].Key = kvs[b].Key, kvs[a].Key + kvs[a].Value, kvs[b].Value = kvs[b].Value, kvs[a].Value +} + +// Compare two strings from the pos d. +func lessFrom(a, b string, d int) bool { + l := len(a) + if l > len(b) { + l = len(b) + } + for i := d; i < l; i++ { + if a[i] == b[i] { + continue + } + return a[i] < b[i] + } + return len(a) < len(b) +} + +func byteAt(b string, p int) int { + if p < len(b) { + return int(b[p]) + } + return -1 +} diff --git a/vendor/github.com/bytedance/sonic/ast/stubs_go115.go b/vendor/github.com/bytedance/sonic/ast/stubs_go115.go new file mode 100644 index 0000000..37b9451 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/stubs_go115.go @@ -0,0 +1,55 @@ +// +build !go1.20 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + `unsafe` + `unicode/utf8` + + `github.com/bytedance/sonic/internal/rt` +) + +//go:noescape +//go:linkname memmove runtime.memmove +//goland:noinspection GoUnusedParameter +func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +//goland:noinspection GoUnusedParameter +func unsafe_NewArray(typ *rt.GoType, n int) unsafe.Pointer + +//go:linkname growslice runtime.growslice +//goland:noinspection GoUnusedParameter +func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice + +//go:nosplit +func mem2ptr(s []byte) unsafe.Pointer { + return (*rt.GoSlice)(unsafe.Pointer(&s)).Ptr +} + +var ( + //go:linkname safeSet encoding/json.safeSet + safeSet [utf8.RuneSelf]bool + + //go:linkname hex encoding/json.hex + hex string +) + +//go:linkname unquoteBytes encoding/json.unquoteBytes +func unquoteBytes(s []byte) (t []byte, ok bool) \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/ast/stubs_go120.go b/vendor/github.com/bytedance/sonic/ast/stubs_go120.go new file mode 100644 index 0000000..bd6fff6 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/ast/stubs_go120.go @@ -0,0 +1,55 @@ +// +build go1.20 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ast + +import ( + `unsafe` + `unicode/utf8` + + `github.com/bytedance/sonic/internal/rt` +) + +//go:noescape +//go:linkname memmove runtime.memmove +//goland:noinspection GoUnusedParameter +func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +//goland:noinspection GoUnusedParameter +func unsafe_NewArray(typ *rt.GoType, n int) unsafe.Pointer + +//go:linkname growslice reflect.growslice +//goland:noinspection GoUnusedParameter +func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice + +//go:nosplit +func mem2ptr(s []byte) unsafe.Pointer { + return (*rt.GoSlice)(unsafe.Pointer(&s)).Ptr +} + +var ( + //go:linkname safeSet encoding/json.safeSet + safeSet [utf8.RuneSelf]bool + + //go:linkname hex encoding/json.hex + hex string +) + +//go:linkname unquoteBytes encoding/json.unquoteBytes +func unquoteBytes(s []byte) (t []byte, ok bool) \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/bench-arm.sh b/vendor/github.com/bytedance/sonic/bench-arm.sh new file mode 100644 index 0000000..b47d627 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/bench-arm.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +pwd=$(pwd) +export SONIC_NO_ASYNC_GC=1 + +cd $pwd/ast +go test -benchmem -run=^$ -benchtime=1000000x -bench "^(BenchmarkGet.*|BenchmarkSet.*)$" + +go test -benchmem -run=^$ -benchtime=10000x -bench "^(BenchmarkParser_.*|BenchmarkEncode.*)$" + +go test -benchmem -run=^$ -benchtime=10000000x -bench "^(BenchmarkNodeGetByPath|BenchmarkStructGetByPath|BenchmarkNodeIndex|BenchmarkStructIndex|BenchmarkSliceIndex|BenchmarkMapIndex|BenchmarkNodeGet|BenchmarkSliceGet|BenchmarkMapGet|BenchmarkNodeSet|BenchmarkMapSet|BenchmarkNodeSetByIndex|BenchmarkSliceSetByIndex|BenchmarkStructSetByIndex|BenchmarkNodeUnset|BenchmarkMapUnset|BenchmarkNodUnsetByIndex|BenchmarkSliceUnsetByIndex|BenchmarkNodeAdd|BenchmarkSliceAdd|BenchmarkMapAdd)$" + +unset SONIC_NO_ASYNC_GC +cd $pwd \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/bench-large.png b/vendor/github.com/bytedance/sonic/bench-large.png new file mode 100644 index 0000000000000000000000000000000000000000..8a8785ec9adce4954f5d507c5e7032ea676907d6 GIT binary patch literal 87463 zcmdSBbyU{h_ALyyVj)O}fQkyz(qT|4A&Q7nN`rJG2$+BpA|N28q5`5IQqrJw3W9V< zcS_ftA5Z52ulsszk7N++sn&K^Im;@JxTvoR#w&-j{fbe zto6MrD^gn;ODL%3w|M#!y+b8Uv{_AyKUOHD^ zYyA6#(yK3bb^PmPtgJmvszX%&ej$1O`?LS^BTxO`|Hw3w+HC<@He=1aHBl0yEj}$~ z?-j!ZQ_WiHZ3DZ?D=Ju}q?kPtwXzQ|FpMS3jW^g$kleq2-?;A0)@xe!2ag7pn`>xm zGs*cgZ9OC^`f_NyJ@+zll{bB|j2aXd77oQ5WsOZIR8=X| zM2Z=?GEq^LG+xu$bK2Qi#B(3BV0tEBn8HI(Pl|&FWz^L}2MvYTl_G8yKRFm$ZOG)* zO}c5*ABU5y>*KDI{!us5Ol1@le2i>k)O17&mulLUdM?~89T=G*^=N2N{iw!4BH=PG zFu%aw7&$vX-u5%c*)~Jak5qrSn$Y~! zI>so4rXb-||6I`A z6|L%6gH5c#-*mK+2wHYll`^}J{hIIESnE1`_^|4VXw-*nho~S5FE6PTJo;i=*ShST zJ6VHAUkaL6g!1aqh}gUxJQ__&MfJtx=TVXy85YFqiOL|eBD&@wqLe^I&oZk8;?evXy=6|FZHj#4kA@@l zH!obcpq}k;vQ~2CBX5sg!Xlie}VqG_Jk{gg~gtbYq@Pwo6< zSM6-Al#X6>+UL*DoM#7;U6*WX6(x@!J9cxTJ+mrOEMyP8pp!-7i)zDOthYY?vXLs2 z)Q?G`f_Y<6u2*)x|5aWOt>#zRWP?g7GeLUu*3{RwgEdj%G9wQkZz($x5EN8YaBp*R z+jxZJ-IvjB%ifVh|L?}E83QqT)8qqj+w_`coe)QJ~$Cq~%Nwi(YPxV@7;IIfi2ZRV`fGG21eGI2 zgDWa7)EeeH&W~NceC3L_zrRrHE;6zN@dIv>o!Vm8h$E*H92`7dC&#RE`SPaI$P_d} z77W*P?w(>-j_S*v%hisg;bmmZWVr(ojBkWTY|6ezZBoXkT}zlId7;^5G*# z>b@9U&>AbubDKYNBnkm z*ql$j-Q+i#m|gS9e6lmQDwKC~c(tso?6%RSq`}AZ7FKP8p3H8C*w`Mlw`+OO-6ag2k54Jq&5=IsZ_$$w{ZkkHc7>g(%2!=cyLKg=j<_X=6@ z6jj^LpFfv=doq{R*2ZEV@xaa#Jx95@YZu+Vef#!x;N@-N_XoS1TyIT&zPIp~DlI8_ z9vG-6zE@LKRo|?#vXXzG(#O~LomRF(Utix;ufNo$AD=XYstozF9Vd@+a6El+Qlmab zZl9jEqVl6hkKVq0`z#>f3?3o(&fHA<*0}Z2O=Inufip8U-@bo8ub|NPbZ?4Le;+KcUqNlm51Dw>#>1lx8ikcihRtEiB-WmvL{iG|nK zU&g_amzO^zDVbN&r9mE0Vmj4bNFA{-qpKUrUlT2Cec!P9WnHGN(Vag-zccAcii?ZK zy7Hru6IImIWX;StJF*>PuO_HPWpBzG+|sR~WGmTO5}f5%-PyOfdr>4(X3to2ul7RGNxA{UVpG;KNh>yyQ!dw4Islopc7nN5~SksQRy zO;Y=)ek(@@i$4=9wN`$NQpmD1VsX()DN4Nj^*PVaZR3cxnAWokk9r29BwWgBY6K-~ ztE&z0ekCiq_K583U-Z4Fm)QKpzkR;--KTct&+xg0iH23(XNy_ubetokV_uhe zGZMFrm97+<{tHUclI9G0C_;lY ze!cwuRxR6l>(h-KfkYu7PsNn5&kyZgpkN#Kr}*c?tO zvL;R2JyNSv4@O^8tNu*0y{vSIb=?f`prkZL*7t*hz#qhz?rvk#mM`zx1TA|9r8bt7 zGunw{g)$YgxVT8yJG;KNa-5TM3j>*;%lsqK`wt$_@*D2vH>@IIkn&IYF@9U~!EzG0 zru^L!{Ta2L=7{W*TKznl=)rdHLtNY|pmDq5`eI{5$U^dsPXgwxLCCe? z-Q70{gaTwM;xu(g$8G6IZ>hhd^AW^wHEp2y)~#Eu0oz;zb$A|&P!F$6ecy*;Lf`GQ z>zJfu6rM-#a2)_bVXDbb5;=xDqn|Q-7~UvHOA46OKScJwbm>w7B6r80J*Px}<_&Dg z8xZK6I+fT^6yDsve~F~v@g8~~R!3vwW1F|_E-+2|f&_G;xMO)BVw>UG@*E{4rPa_6 z7KGIClP8ZHIkMHtZYcLS=gOUN0{rFUTODF&FI~)euWMjuCm_`2nqriqJZ;#GE4~rteBadMRXJOvc$o%_l8!}4$~N} z-ms)oPw& zipt9C$+}y+ckJfQ{qvlc^@M~(B(;uf-6z{?Dk`^p#BVnxGP^^zc=g)fOo_JU$7300os;fqUip-wacD?=IUVJ*lvN2S{Ky-WrUGa}P)1S$OzGef_u7 z**F4M$0PI_txsRne|9-T3O9mp$ zAG|a>BWx*LcJ0~oDgDRY*EGM-NTDZ3RN-hZO>V4P#*j!FR8ZFc{8?UA)z_S?S5#8M zhbq&%W;@eYZa3M<=)rGPGxd#(+iId+go;a3-o}QH>S=Lo1c_^{NzU)vn(^L3$(bxO zqy;pzMuOqa)5X-j$1C?T2p8zA&rp{>)jC^VUj9)hpUFymg4j4zb)Vu&K(7Qw-+}U` z+p!U+rB@rv1KE8S-W*kYsY+YDRzhz{bH(86OL1pD!<3fTMQ3cjZBNn8UWpWZ>5{V1 zm%`EF_dl?eYhCB0dMRXSkuw=3Z2fJ#Bu2>klcedEG;CT`6J0UFr@XR~4ILoFI&$@Z7z+$;rtU zmSd&<%!9GfZf{T|QFm&6n5Ck4^-8`nsHBi?-bUkp+w<|`den100Tnkl$?hF_iCqik z86AdpdDVve8J%|$Y8f>=!Y#7Oog$ee?$GS?m>tcM91%d|>FEH!C<=A-*}rzFjgl~n zGkiC$oqK2LkCPKN76)LnLw4k3{zer3@~Sgs8$HU4pxX-+5(1b4X@(0~9z_kL)^;ik z-8{cI-CGj3wMofbT6#-iVIgukP?j?a{ZLa9GpbvE;dbVa&xN0OEk1ZDd*ji=hfac| z)PA>WhO;7Med!Fah)nLQ)_Pme_0#nC_seb{9Z5(MeA3=M^k*-VcxYq7wJWNsMRhT9 zCiQQ(v1B^;CUN`8&PXhAU`Po1e=(=2!tK7{UH9(Y`!m_qmGI4jyGcppp4TFOlSpg= zhw)@retxTi(cM>)J+T~3tFN*qzXy*xH1$SsQ8tZyko^(bgEY9SY3?Xk8Xt!y&8bMo zPL|`xy(c=df2Q5OhCtRgqnMUy%)cCPu}tI%joaNoF^4gl?j6UH^4GJHJFWmy;m|E*^p2FJ|*xC3ikeo7UjCgK0-rnnvuexj7;8R@L#< zS5vot%*vw5Csdu!1qju4U$aNHLD2Q3w^=0EG^L83ypd|P(thf5*0!nHwf0j7kJhjY z2sE5*vA*m3QBvom0+a|9wtmCg6b44ZX|`G@_lcHL)MRgCF;5fujVR~BtJk#p9jcP`N`2}K zqeMu*mgYyhueX?6->z-man*Ly)XCWC7u?qUt~uV-dm{I#Qa8PM^TzcAQd4erg=^gD zv@4GqiuCz-jei`>~2{$)#RG7>eH$>ZkYb*P&`UOL{Zy>qGmz@-5I?nMtKd#Pyl>; zG6yw>kkSwF^9Q0yzoMd22z=nfK6@kGto*BW?*|4p8K;LEUqC zBaI2v*v0O#mJ~{AY5*(HoC-WQX|p>}K;eZA(9iXC54?i(!&;Dm4@2w;Q@d5VWPn zk5yu18-*>yu7C-08jN%lw(2H1e*8F^T&vYZYXB{xzeJH8{u%4Sbd=SmW$NhTOuy8u zOWNIZDH3AM-At3>%-Qot*-5%=UbcQXo~Y@CqQKj?Im)$sXF>9S=jiINQl~3-BJ%#y z@@Nui>89&7P2ycc*<|s2F;k}EPD>*5otNOYw_2&Y~4ED za7Eb~yv+PW$Hfa5b`dQrSW}{+`Ma+^`23k?d2V#jA);(4?#IbJ-z+jx(3M3tt3 z7SKVcX^zWA%Zf4B0pK~qIaS+~bug;@fUAq;#NSS2JnPI{6oI8oEnUj$D)ohkG; z0c-k<^jy#~_HuoND*Ev8Z%K()OUzp~eG?M{ z5WvfH#u6=O@YW31s~vw2X2=8d;dEp?MCQZ#eCSJSEaXK|6T<*(yIfTAGg zSio$c-$8Qv{(Ya+n8TGdlpRiu^r}Tyl-!ciyg1^F`Wu~UJQg9u_aWOl+jdY1+p&|3 zOmA`O_ikoMhVbf=p5mvz_9{T~v1cOhlwb{h<-) zE41#k9^Cn>&!?uWQ0oiOhY=WL30;XvN7gCyV?0c`EuU{~&dki@ypif<-CLq^<3>gO zyNkXmQag9<^oZG!rC)xaqM|}Q-HeuKG*Q(7*QcgSnZ@#G-L(F&^YJ}@_;3qqTp1Fd zdY0Ys7&-qEeLjN6%&6HZ%=C1kHH`yN^!DAmWYDx=Sp3WNb;n!NKs7LXxNj`DJJKh+ zN}@X|VZ3Iw#i1!)06fAs0eG=0OY?1( zA|P+At*vps8#IiT&*BisUApwFOQEZNWyxQ>s0X4MU}4kd&BL9!Vn+kLK@$3b7_}a#qLp0xbEU+WUhed1 z5{+~-UZ#|B@B>K4#b$ylK@=XABEA_pz8vSfUzt44|06QUrqtFXzw z?3X_D-d?pkd1+&@*B@=?&o?qp>8M|(s&f+5LC*3>0*P7k=X(_FNm2RgQn58+LidT#@p!}5s?5UhE*y3!$SE>d-6BdCVgvN{tOj9V>#EHYC^GnFGDvX zP)K&(gUzc04Up{MVwB6ot&R1yzJY-mbQlD&bmGJbpC!?@RFm(P?kgGvyw{p1-4?_E zeB{7O*Geof{QmtLG$b9Fj;70anuX(TKuzTvH|UTxIkmDH)IWdu_|XT6k!|swp8oQK zeUfEC$FF?8@hJ!GKS5<;KFsx(;NA3&@*oUg=r*+!8&1iISDceE&vM1K>}Tt?dA$;HuO!ZQc#C%q5-= zJo;z&XK5(=$1AZ)7BQ>2zZEn;{7f-Yw#e+KLJ4lkv}M1v7CAwPP46yJqkgGpSTN!o z6crUc3ky3B?9nZMSb}+!n%tx+g0Xo7i6uqt(FD5P5`X4ewA*0F`df^m?JHSGjw(eM zi_5w~97+Z)3qq~>$w3bF+&ia`--CjJNc`&L6WMZz8nzHoLf=^woK=s$T$n0v6(_kR z-(3n4mF(UhK;poGIohwX5}m2Gxt0$WWdH4JxTq1?kY^$sJDojIgK_$(f3BRQSH!92t`I!RrT467i!}q2{>F) zt~2E~@{@l3C(eH7ShoT5m^O7}{eDc}oa-WFU|=A=&~Ez&bPW(+Z`!$Y=MuzQ4io64 z@`zEgx-lAIf<3qF%G*uDQ(#NY7BZD>lP9Fgsg(r zp!4ffW4`jn^CR5^R}XZuk#Lb4=&7fi3Xf8Qgd>bylsy6?g%(}rVgHPNE> z6=f_S+KN$|I%FvyJ$}rGh92jx6vBazObUbqFr$O$hwQQohi@y9?Yd7Sk6730{uzq$0}$3bZ=T?y1535(P=d`D(nZM`sX+9 zqSycO0@zYR7$k_Fvi`$ByCK?lypeE;(xLZZw^dG))MEDoqQuHE-)-nWwq2(uwpjN! zG$Dh+`&%uZ(&?0y1+bl$m%mvPA+n0xXQ4a;orVvX+HE1bOUsTqHShM8ybq&JYFfSk zMA449JFOI0_yK!fa9%%)M?!bQlWsp;{|NEgGFfdw|gZp@$pKQh`&$_C9tPZ*@L62FOk@mEfZi60<7I zS$4VKN@-an>M(ZB*Wa!25h4z$V7ss^M5jAMh6f_=Ga z&?%~nfcY7#jFwW`$w`<{nw}5;SG#Bkn^u(ICA9!CjrWzQUOdl3|L37U(saa_|hErWA`HwAlHWn~N~jyb`t z4gHtcrXC|nBD;I&y{;fr^nD&4UJ1q=8V2DTFt;`(w*=ZE3=_VD=BO+_y#>AdvuFF! zeM+OpZOOYUhWgDWBou;t-swEZm_feGdO4#Z;9$UGL8x~Gt}IA?Tng6bI2YFdjv=3{ zr!S}mK$0xL{5kohdtP1w4v-*TW=Tuad6t#Q6XB3-T*nHTcd=4uy(e8lKp?1;$wfg1 zPK|)db_mSEU`nx$D3pcj`+vqG!T8Yde*2{O*Q^0Uhlv2rzhVfN)*{$IVRXe%OR*}1 z#t*{Np|G?8KUuVqX4?1!O2ZeowL1W6iui^B0Re=b2)QVlx*BRAc>u=)xIXIR>_&h2 zT6APN3&I{!cJMGKrw{rWUYyHX#lN1A#;nwf!T-ZL(!l$F-vRRff$8M`XCIkO4q(W3 z@?>?&bvmy^x!yOUbzg_>2ry+)g>Y-Nwf@XZV2sRo8Z&!MOw=+h*4EMa>*b{0;6>X} zqdY!MZz+C(yl9R8lV6Bz%Iw)=2w5HLA&*tAUmu>&7vR+Lf2_wBCLS)`ci^&iw^*@H zYn^kpu&8@lZxSpRt8rO^2(!#TszVmiW`g0ODZU>3E$J<0Od4-l@MxTY<$d$YlPats zelwf8jvF@k)wC7YEhR9zOIn`lmpkEvm=oYUm_;9e5ubxzs&-XtZzsodar&(Lb4Uw(v; zO-&`LXVU2MV9Qb71m zkB$WewLX{RZjIg;$^=x2JsIsbWI`QwSAvtQP z?FIP2XlJ2fj)}bHA+bF@y6dx2 z8-c*P)|DqS2)7XAztCDQt2bPis<7G2C;`bMHGe^Dse7q0A5@ zGx3G>@)^Y(rXZ?&ql@OfSwP~p)K9IJq+JbaJxtL2WhF;e!pDz&VMb9bM~*y#or2G4 z>L$^D0&TZsSTYj=>>x|X!(AQPJ9h45fewdCLgdeDTLycJ1;h71el#$6*A|mA2o~M} zN^;SwGY{HZH*enT>wlY~nG}YvZ$NQzadA`ac4zXKZl|;~qXMDIMuqq7ipe=G-8ZB6 zP07E$SRjRwo&A-V^F2nd&y2AN_6MuYO6AFz);AUs-Wrv&ejAJR_GP?q_UtAIl)B4* zhV=~$rhlB;bbU;nBjyM4cEnN*UGXZ?MAzF zv+j*dyLBmJQE{;|!_^Jdq4~3#nhbc3IWsEqOkHq!NH|y|p7&hS4yukY!>p|=7(r`^ z3g4Z##R0@#1;8{ADA!xJW#G$5{4Y7fH`>&%uvFCFwMT0DYpAmGJ9 z{zk4w*E*;JeITa6@yXct@9WWDpHTifKicFEE@}FwvQ+$!@lVy^LMf(=C;XVDlyFYL zQ~1ye`Jt`I9?x(X2TE)q04MO*TWo|vD9=6U)dkZ}Q^B}Ixhvz(E{4Lac#Z(RcEUbf3+o%a_lh z{Wy5=ATr@%DvdS(ad2IGl;)D+a)<)P5SV8zeg|^6FbI2$y#qq7iSC>~-j*W}+!kSux7f0i)Klb;WR5YgY zdGcLpPLi3}nEd#TZM?#9{|)EIthSW5f@6;h%GuPkmz4RVQzCpupmV`^kW*6j*$0s3 zaNJNW{rB1Eu&IQdVf#L&kJD=%hAEj6(_?*_+j|<~ugHcAC53^TfVPZDPo;uK-eNuCy;1=;RPb2Sj|NPhjY{k4V&$z4Bg3SC-D{E=(kCb+)7 zkWazPT+8U#PRO}nZ2MrC?1#R|&((vLqX-NWS}G1WD_B7zTjni2M%2&FRB|J?Bl<`I zF+nZ_0?g}iy#z@;+j=``8qLqGWc6cQTu0$jK$h%XA^eujInMlGihbx#f$84;w<5q! z_+BBbC;gZ=bnlcKpI~3Sa{c;wq$}PxD`(NxE8-)-d*rMyk1|Ny2|}#e{!|potll|% z@#c0p&P9Z1n1qWUp4+a?fhCVAJRksl5t-9`i;>B%k70v`CZ_?d!G7Dq&PZf+sE$go zJ3y!qK~I66huZ3}1`hc<7(2j)9stn+E@yF6XQM)5q5V6WE7Blp7@`YpHXRUQwDL9L ze;u4jqF(K{HIwhY1y8!jYz#>0&(5=kcNQkh6>j&N|IfXAdrR2X)Y4|O@(huIH?D13 zt6GoE3aDyKYYYY*fD`czb+)~S_)LE{Z2zv_Vw1C$*4&5NplDjVT zhoC_TKraDdQ5ig2sr&k%`yGZg4kH$EfG^|9+FGOE-yRX%VQ59)<=JfQCNx+(_wF44 zWge6KVUAF(VZN+Ibtx=FPg51% z5+)znDSvP8-!K}oL6iqYM7JXH?*02O_|md|3?_DONFm1)B8kc2R-69vjMP~()*squvuDhMLN%;vH0||Uk)T9u`gVY`CA=s` z(RY89`RAPE)7))2%Wk<4_!y0APDKJYbmnOUqcPnQcm5a) znl%?d^Q*$*Dyyne!dszR{`|<6?b}^)6N9dR74YqfZ7~zn=7w#Ja0Wz2*TOku0-gqa z;A%g&`z`!9VAeCYwPoj=vvY7yF7`5S$fNlL*wKBk=Z{Hf(j9Uv-6J%X;F8;6-2oj# zP~2c{Pb1lq&3Q16j$ZH5}I?e0=qvQaD*{L_W8eUES5uNk|R&pk6R20-N;lAhl8(;=2=LsrQ^6s)#hkX;RdFRYrGa6ttpnId72WT@)c-~qQF8F_vGZ4S`GcxWeKwIzE zXu9^8s)*ggiEyl+QGkS7ycgkLpK9h%y>(oC}&01hU(uif?~Q`g?*c5P@~ zYGZKYv&({oVQo}3kSbihgtE7rPM|DQXbu(xoM>QVr<|&*z#5=sU%h%&U~KE)6B!ZN z_HshJzprmU+#?1x)u0AwynR6u6^YarH5Ix1Lmp}W^a*g9@Oq0c55<^urBR+w+0OU3n6qxY!|sL)s_^ zpzvT@cOkre7++{EJ@0(at1S4F`I3cbu=(36%8K=8J0tirrrWoAphDW9{!t)l;*-K8 z?}~oX$as%02vEZHIAbX=W99sJD@l>zrG~dj&Kdc~PM+M#AoL`Wayg7%s&-Qb>@&K_ zHwD9!)j6BT9jXjdRHhR5f&1L|{qde&z!=!fx9z4mEibbP?L{7xh=E&_)(#HkRAb4s$>E?(2sF1H9O zG{Jy;0|q4cR^n%1L?z@P^b;?Oili|dL9`Q|n`p*sq(ln_RqnuSd&5UTI`No&q@B*) zxkm7ddarj)aeW5W?;ZGY1Qu({I*x{= zn$MVJosF0_`?_h!XkN~l%=2qUgk4^2lR@E1RTw{L{rcDEwr9j%a-3-2DZE^pXN=|y zw3)tY_TA|ovo6o8t zcM?vn;L?NG0;rl>F_A&78L28=6y0DbAXI=81Ut#j{Izk*j@H^pG0zSQpIzW@^w z{%$?H^uaiDtE%&+DJ>s-y61z5U8uYlDLu;fQf(GF`}8*4hK@}(fx8(&=scKRS%r_Qzj*Vhz)SZ3NB4AC$$6Q`yic_tRs1KwPV z6>M=bf)L)`50nLW>mbL#pbgyZud#w1!l*!Io3S;%`5E&E&XAax@BRsBvs39-zeswB zmzUgX3x-zK7vNK^HAwn!Zw+P7XlbV3VuR>P%PIFF5$)o0iS)mM%{txullf>plC#G->v`O=(bp=?&dv(IglsS0n*KW6lPL`FPO(WvZ=$X4JmyiWwscp6qcuyb(7V$m;43MMIA7J zXRb_S1CmU)A7;Jw3`Qu_4)kxYzI>@4v`@(41s*LDFYJ@Myrd{ zlFE+oFmWS{N{C(y7!zn*A8a^I`a=+zKzn;m!-qnsDFjft_=1xK;K(w6?S<6(wEx3L zk9d(_3wR5L0AB`PXjt&(m0-_}TfQ8DNCQ^o0YuZxwFHy9G0OvOVWwN)wjo9|$f&vc zuyaQsH6mmKqM~Y$2T4o0%kbh*JO#kS;iE@wfMy9xpNz~_ilYisPFK6y?S3kLypi?< z&P9Kq+7O^iBE?`W(L)ieLzE{#*{LSK2=1+uh^8;MzHbBHah=I^>I@Phkv-4}v9hof zs#Ds)7FiV~5l$esofJoZBHw>&1!nKE8^6C6n*_izf5AK~!AM8i3Zh!}7xA!V1H+b5yFyO)|gQ&XMu#SPyPC3*R7 zI9z_CS`6-UU)nro2@D}XwzB|+JkZGs>YS05`eeFRw-FTEdacqGjlEPA-Wgf`1r8TXX2dEu zZ&9kZ+I%jjNldr6iFD?nxBoP@#~CIepGV%g{|4F=jvx#aB1Ichu1hEKpx|%WwvBGX zRP6Z*R&dtL=4Xbb&OfKLP?ZGDKj`deXgpO3H7FSvs>rx= zJUT3h*&4M^H;5)Xo$z6x-zgb{CoLsZk01v&Ri0Hr-i#2ne*%^1Mt0TD5V^Z7 zfY8D?ADnfkPoMT^DxL(J0QDP^*_^YOW8Ri|j_Y@iP|nR|&3(cQ4n_?zb#$A-nFN{? z23vMQG@V&?Y@#OUp+ts+Xgf*?V!jG-XbcNZ+EU%4C%f}LaH+Y3OF3sp$ByI;yhGDm z3QnRKTp+T8w5%+=MOEV&T@&eEfU>dIQ;o^t+=tVl4z^W`yd}d7l}CV@H_>y5ul@Py z3J=d%XKr|kQSAY0>QV%VrFeA+rbQs;{p|veo4}(8F_z%97{nZWfX$H#2=U1`oT2k_ z4)ydtjo8#T80I5JQ|ag~V77s5D`ee4!(cFcEpQ0d~u*?jU2CBPf)lS~GJ;aH9| zBiqQGyLW#Kd?ZaW&>2~Z`5G_=C8lYYRaEX(8z#(ku*OI78!;qo%7AL7_f%kumjA_4 zeLgS?J9g|iglSnMj~HVb8XB)+-9ixYY}t2D9>84XYR*>qSSVh#M+3;7pvJPYeoM)G zg1WZcPCoZ&=KkY%p(;=!sO1Ke99H{tBDtkZe|@9{bY~Y9ZayhDgXmFMnID%39}W5o zVNV_XKTrsbG{J6vhE$L=E+82rWgkK3P7u0H`RlojUI3{iPv_FHU~*m4^$L1Z#fvXjaI@*pDA~TZz}O?@@-XU|l0bg-}F?h`2;mB1iti{$L|Y zBv0A*Nxz*q#yu6OD?2`vqs0)J-#Q&KJD}*+Cqi?2L=BD%f{uk*pa=HU%{PIh(OEwL z??m%xVrh2pikeypC{G`i7QGUmTi|;+wR1wjm1UfvMn6v+`~XI;e}$3l+QoY4P{B}{_y4BDa1SajseHfvU)?B;vV#=* zIarwrc#jYcLlC2m8uH>iJkL<_ra*9|V7^pORj2@dLLXKkVXFOWV=1+0i$B&4m;yu* z-IQS!O~$;2Wo|H{Eh$Qql6<4r_08q278|e5IIw6}3&Y4KOJ(QmrBVmlxo9*}K;{1&&W>(_&^FtnJoKOLc0(g_Z(rRWoK{ zma%o;;8a!5apDa;s`v)oHmtOfZ{Hrq3Xu@z-fs2N^{5GvJYfnYl1fomUW-2MtdV&q z@vkm&S1P(lD-b-{Y2Ic1-^1KF_ATuD=R!9+7|waxnP81ceckQKgU=EV>QEytW($@N z`JRH3QeXUKf^LTV+Liy-+qeg-!&l2h_Mcu-S)-M>BS5g?I6m0Y&lVk`5Hb{Dcn%XV zIRLK1Lr>C9xn_7FA|^kQ^Mwv|@*hOJ^(X3=ROB1)R@?FkNee;RC|sDk9>*04D(Ka8 zEw$4-2yOxqjC@9eRdc)@9v&oMP2?t=9Oz>0{Cx>I3bNuJs6H2+}EtxGr_3n?tOC62A_bA*2k zVc$0kc>HSAdf3@vD1uSQzFV5>WdGK@D^+p-l4jQ(I)j)`5hI=dZ$LQ0QKt*efk`LAz3s6eV7P3is5Ep#3&e<2nY*>F@-kkNaR1o!|5^s!yT_=}cjn4SusB;1r^@l{o5jEH= zLP4YeIQI<>QZO+op)r(jWCJ&Yla4M8?rx%EzM80vu@7J|3v&{9REaAF7R7}dEfYGUCEIhW7Q2`%Ufi=9TK#uA4zsrqQq70&0pId=} zJh9|Ua=rW9#Y6aw>c>}g=w2jxdTGZ0_X7{*oX6yd^nQ?6Et)b2!cqJ61vvF#5)}`Z z>EqK+R`)qYyZXAjBh}WkI-l$^o7$QY%w|Ri`z4MpZvshI) zT6*?7N*|GnX<$~TEdPBx_^AmmXVo7CrNLNO$gR#o;S%?tPA3h7$_k6h;nf zPBz`ver8+K9(G==DRbh=>S{wUn`}(o@2OkLN)8LdKRuE6QqbHIhF>r^s1UGe*THOR zJ}Y|uKnK&?bVtJq?(CUMolj%eN%x=K!6JU$+@H+!{_}3NdBsnr2Ud2lJd8W$?(Y6- z$jlFOB=7)`Z zc)9n)nFrfowkh0LpWOHYB@W&i0zQC5W<`*+4%k0GoFzfPC5w!v544&U+!-@Z6Ibi0 zH~KT$#+g>+PU9J-RMU95;!Mz`7EC@!_;4uMfB#vp3M9~nb;dMxciv!n zG7v+GM7#2rfCkD8MJ*y`9ErK(0a|;1EQx$~ys){-r=X7Y+n$OOxj9evhE6v~ZNVlz zWdFcnea8Os;Oxs)##8K&xcBF7W{>5&a^x z>)_?gimWJpNr9lQzMi9q-e6`jC*{%ehv$9cX2aELhQR{oh6lL_H3GoU{klX6C z93V6=FE2qRWBwxE9Px*dF~h-{NEjAy(9b|hq?L62`ci#644?ZUa$J^Db8>OvhjGI^ z`2k{-kQZPkIE#owJryU6i(pf#7G{g7bqFjVxv^@ia5OOe1|EQRb#09?og;oU;VX*y zz19>*TZ3l{V=X~6a^77}EcJAGXn5#+X?Ab(l*p2(>#qy}bBB00O@KbOxf^`J^P#-J zNGbOyf~08DcJW1#xu&7D^@Mwvx#Fc4d#Ll-)K>R-au9PXe|{=A<_e~D-d)gwU9AKi zjxuoVc_k%7XoV5DCFJ;(_r~E}^LK6#uob2Q5W2iDS!BTi3#!D4K9vuAWHL&aHq_N$d;ywoOkCFv7byh$#|rxE2V^ zhCj}t!q~t~2?gu5Ha82)J=7WEVwo4DG`_N){%P0@fIL>19YADJU`z`SvWR9~rzTpe z7Nslx?k*Cb69XtFBia#onD*Y$(NVjjj#>&fn-`asNYQ}4ijU9klW>tV|Dq9V+x|xB zyqn92nS}>Pg4aKXiGdqvQFd{ds5>MY>4#?*3u=;iJ~V&KUhi~xc=%JLg%vFxNLFJG zO?wSq!a{)_=I#6Uo2;O~ncOIVJ%t#V#(bqw`&VnzB_e7OnM)9^;*K8D%#7mKabG({ z&80~+nj_5jF>Ar=IH8HuP25(2fF%Y&-Vy8gaAibR(~A9=<(QseIH4i{+V(?+zS=ek zo{ZRFEZ9%DXu%bh0=|Xv+}Z?OrxGm%U%)yTc-!?=XF!W>lpXDp5UZ1NW}dXx99&tL zWas4#z|69jpm{5a%=s4t{dx;5BWwvZw_K=>-C4u_yutBg5O@0V8t3BS?Vy%7_7a7k zrlzM!pBz|q+~r)G<|+95)8TXp5&Z4koB=El1R+aoH`s0PG@FT-f|fwTt1JC!#Vx3m zF?^_|s!AW~8H1Et1e9^&HW&(}Qq@`3B`|D$3M68BST zZgdXixTE1z^lOGOdp&O(j2z0wsNDVfQw8TY966noFg1g^cxysR41SWhul6!CRuDPu z3w*S=E@1nqy77v+2!!~1wB<)89QM%iJw)GT1C~( zY;BhaYbcM^9dKSl7#;I2R0RpmAgN~Yo?7`d2FXG~tH+-JgVx~&3gk1H2RT4i2uWpR zL~rQ@xX2XXT!qRXAf2)QY|xeI!wZq8uixq$AOG|>7@L$X=xT&_|0*$xA{IYopBM!^+FvxfZ??BJ^-WV5u}O+(-Yfw~EIF>)l*K5<(EB$|;tFMKY< zthg0sH_U|+*$&S`oLdk)74R>p?p$ea@D0-_R`&Td!RsIfqnX-nIm1q|zP=6_BEcY0vz8V^dViEc_zEyJOn0fFrseWYb1@X7sEa+DR8;zX@4`iK z0fVz}CJ;v-w<_$+)mBze@cBM3j8!Il+_;vYF5A%(S+4P<5`5_<4RPcoO-VZJxCQ~0 zkRY;<@o@?u`%a}Bc@n%o(cHe~#EdS)xS!x^?00_Y0s9MSjTn;0B?1abJ9FR1b<>AMUP!d z?0My;Mq)*`-+ba zQ=HxLJM%Et{)1OLo7pjkiB=~Wj3&yM0{9C|f_zj@zK<)JrrK?5No6)C0ITxAsLpiv zj|wPiFzxT5y%OUrBXdaW_uhnagYIic(uqbM`wZck@I*iuZcH~I9MsU5egoJOu5c*T z-(RmI!$I@%#CybhDkAoeF&}YOqknE3eCGVuj`Q9^&b4o{Fcw12#FTh4kF7nxdjdDZ zkmYLE!gDwD>r?o0wL2;*Z+P{~Uw$aho`p9LadFkW&ViW;V~fPvdfF^dGU}_gUH_z( zVI+|GT{k(1Zi|z%^VN!5^5}VfPyPN*5GW`&`yrXSZ>;FRIp9&Qe_2WnoHWF)40HB} z`@h+q*F1FMLr$_Xu2Vyf@5h4^lOzIW&CkFWcDk*Y<7KCj3&5O^k>F*-JsaTNpp#-c zs~1;-7=Wt7+{J-NrBWyovcOt|qveC-B+3%5ID14eGN7&NAYkhz6=WhuVag#dFHiHU z)jfFf>H%BR)-f;^1EqiU_dfSy^PB`0L&^Ka7&bJIG>G2xBSbAMCXosdY;ph!CS7^C z>6s5O&y4RkU?*!0U*8FVS82_uN7AlLQ^X|PFT8o|)ICjgb#>j1vKiyX4+oVc<|u*B z29ryf&LuVB(jn+_+mMWo9zMLq184akSxS@0QYTO!cxd3$7vET&#bWF~CYoacnwoI8 zLF6D*EaG}GP@y!irE=#?-KQg~#{E99fe?LEzS|lNpZ<<>k9S)yGN56|!QBW%v4`Xx za-ah=1i+IDM$6E|Rf0DqXj8%f0V$v#fBdMKd3i1H%!zd||1@r;CMJGJXY+e&YMVQR`8A5E%>dD!wVhi$a`G9ILA) z7mb-ApW;%FO0;bFhVaGj(b3VFnVDfY(46?6gQL>fEa!b4wvX;hOt>!wGI^%cG%E=v z7l9fGGwIypyuzE^z*~JV3GIUt|4t|0t(f~aG4VNMCkh23_38&`h)DH}^F4xezTzum zDTEpY`s*ir{P4X=gL=S-o zupz18E*BeA7sC68HAq$7{6r~R3Z#@;LR!(Q1ptG+%-NN?jV~IMCic#>AEc!{4^s(v z>5(z^V=u1~L&Q&>ocNXH3LW>nqT)MUonq;H3X zW7f=R;!Y-T_Df)D`-$azsGW38%ClUb09`Prq!HdTVvzAQg6sJZ6#-QCpX%{zKe&#^ z0>`Tsd3Te{nVc^T{9Um(~`MCkYeK@o9-QkRLmC?IIuv33lcCUw9-QQnYm@ZYTx` z0Y*hqcos6wEkUe34-+B+@U+TVai|=?Od_dpVEx8f9Pyn4S@4empBLw{!Re5|3(J(I zK)(Wc0ODc%f$b*DP-P*Spj?yU#zi*Zc^fAp8cwUcS%Yi(_`2Occf6WWBqkg645%_0 zR!xGE#_egCL&haoMBPKKA#J$M4eR$2JO^OOevF18XI;lYnh3^nEIMAo<}ibxj9cLk zxu(_f^hzu@-B_^W4@WWF!NZK@?T4y{s{n{GQ^K|a-x6U&KsVAF8&qzQ!H;n!J5ffA zU~YMZj6~ciR4EG#hmQb<@$2vSj4F=!Q=i;o+{CB7jRx4chraRqJ48|8E#aFxG z14vHay1Jr?I{9yTOhq|_9rY28hmHW4vSg5$xCi-Dl=hr!|T98O%q zHT}h)65g5IJ5Fy?nu!6iJ{U9!;~9+K$j22x$k#DOjD{RV{_~Iw+1~~I{d%H|QxgVN zz|>vDZ3{3kn>vH?Bly5QbOI!=0E37zG9)HUzgH6X3}GJnpY%Gc&o`6R(Hi0Q3udHu z_+s3~-D6jXIW^C_*_8=M{5kOZf->^J z6=oy^9fVK?2JmU+f(`87G56+81^o)~ZuTRp#Jg>*xNi_{-*};G+7S5XKKg_9CT6>F z2nl(bu;r2whu#-r-pPfB*0)MWrMSC6b0jsLY~BG|0>h(Lji-NJWIKc19`^ z$;!wc36YBIkX1$sg$61m_v5M0=eoYvb^m_%-}m=8zQ^%tozC-pzQ*(USWhrP%(ai* zPb2E;1<6QlQVUrY^4e`iR0ZT=;hRiN@$_$Ob`qd?Hn1U^G$r;GLKs1W6p;0!k$=~o zk7XV3o^tluL|tLq4qk)|1feJ8T(YEZti%fl1sV@z0i<#Ma?IahD8&0i)kaNia74B9 z07~vzvu8i4shP*e$2W(CnZ^6=OC~)xXlqi8^At~>JelCJI!i4uT+Q z;gUv|CtF*G{o)P3PJHIk9W9Jc{MfV@m(SPu3$LFcParZ<9TZ}v+;a&i*w7};|oGM)IU4p^^A5cSW_A6=bx{ssyrn-82byJqArywO3T4p69;{-g1EM@h)R_bj& zT+5AeDRAgvVW3KAc%@^k`)u2mi0WA8tcl;hkG5teWas+8I*@a`Nl@tP=@nD=y^>m9 z&w`FfrIud}W*d-t+nM+Q#Y2=I4sK|QV6+;j*4Ok zw(fk8K84%RA!>PDDTkH5oud-nA3v(%8qiB>Jj|L|VWxo!K!O`SIdkz3J#n&Sm#?cR6>ePQ6 zUBhz5!^4AJ{MHe@>-dXZk6RaVot)h(;3;h6nX7t;@9*-GyG7U!faQjg2W#1nhA`cB zDgH1xc=5#%#~Y2SRZsfjpMGz=pIf{S!CMSn@ff53@h|<0ii%3p-*tN%YH+_2@|H4Z z=$~1aT4GjwR08k?&sf63zwceuxN{GI92IZ>;|1;z5|^5q+OX6pQszb@`;XujwTdW> zrHqOT0(f`PXQnu4E(RZE`Yy&rQ5rub@$SCD^|Bno%BT8_1k*R-87TnX8Jrf*7Qne zZh-sL_wDnU2ME9ltgrY8@$B1k`)-GzQs#IfnYCFoejU`1#lxlriSO1|JLn1E9|S)& zloy)_z<&^m=3gJaCysu5W&m4z-0J+0v-AKpEK%xgYa092dKteARtFZUG}bvXM##1P0WJchi!|s_64$TR*8xU#@QNQuv*mK)a{pjzYTuSt6%_?qmx@q>blBL6&{!< z8d1hN*h0@P=;^r*yRfCkl6z-KAiYjPvT@fei(sZ4&d!BJ1!Asaz8ukZ*K>>Aheo>U zn`fg*o__1`pVqFP+`BuxuC+PrIlJ12BWEp^p02sg!m|;dWMIGU+3O@K1W1raYoCh&*I(H z5$^a~;PWXaFHFo-0Coo;PU1MIN4ZTWCsp5rT?~|O+zy%K5=rae>?NK+lg~lC^aW;K zIASsz*^S+{YD(NXh)ROMG?<3DrR`p#MnXcw>4!|LUOs%ELI%Y)W9Y`QqX zoAHCw3dNh7~#?WX+G3q+18Hy&xZe=G< z*EiLGeHJ=-S{V{{?KkjFfsv7Jz@W&({h{#ezYjNVkoBF`=;g0tS(?sHV;800P=phb zE^CXUt>AsI~!FBJT0~Bwj7mJrJ8U-ug}*FE4Ku$n;o|6c==b z!P)WUy7!nDoG`@`z%J%D9BOZw{Kt5V29adO);|F_y+pzL6H2AM(3PT#V+<6{h9H}$ zI5aZ!V|fRnj*hjV)$MD~69;+sN-wN);3GX{fRmkgSXgC&5D5`RPjwEeIyfJ3mq))2 z@#V<6S~es0dT!KsNKWbu16Uj>d>Qd0YmxE9FoRN%ByGT_lQQD)a%3wf>HmxwOdDVf zAVdN+Ww`Y5NTb2qhG}L@Kp%Uc&CxUch0Wmx4H#jlguQ?vG0a_dxBXu&K!ycVe83O9 zEeDXo;O)!6HHv2B`ErEp96#Mm&E-P_bc%Kz_Kk?F5~i)YdbN_#Q{bcYF2M{UTf{oa zD|*=wyf@1=Goz0}d^Iq30C=b%*r~%%Y~qZ__|br^vQzhTv@F^l5Q_xBo#04=7?!dQ zJtju=9~BmorwAQC+1I=BfD2k;7Xb>^PCc--HQTllFw2*3-}bfr2FO4%GWuiQ) z0q;=u`c(20c?|I0h7b|=XCzKO!PZzSvRbu$Gg@U7#RH{{cR*WK6GXjvbqEt7fmj}! z&;F4ERK$MbDbTqKIC{PSA?xhviAg5;|9v=YkQ0y_+~+bn`Ws|{#PK(e%D#R=j03f6 z)Q~_;_fB)IPrUoUA>+?5ra8TJ|4y;Igu@`X4!#Kek0!kXZ{4)qD%zT-9c#cOmuich7D&fN2pck-csNI(A(|d| z?PqM>sZk+3OCd-AWl(xH=&|uD@HCC((+Dno+Do2EpE4bAkZ7)gcx$yiUaDvrJ=TUp20Wb?f%bO{e5vpeNLNQPL&~ zT62I*k09lI1SRdOB>j1wBto=l7Ny{2Jd-lOlJup~6awf32Uc`~$%kJLJ7_UfYtI2* z(zOB-2-2z>02-O?fs$bfFbKT;2F?hMRjqk^Ft#j0GTl~a9HJm#`ThG_4T&0nQ-HK$ zUwC2;nlc_la^t6S0OtaIFrbetg4?~-hfE7irb4KAb$fJKzkAhvo_w6YgzGnCKj>U2Nf}7V zQ9!o6{J6G+kDHzbj6iz5KD4XP{wMP!(7$E1oEcewvTV*D{kP z11Y~0w$1nmmCMVw-q>V58>yA)CNdC9GfhFZK z0K-yyusmGW_~$Y)A$_-Btr(;c)mNJeNLm}c3m_l@nGY1+#4Wpkm)B(A?&YVIj*gD6 z?)0~16-0nveqVD(17a}ThHS?lePKa2+aSdp$cdEbPhF7DZclBtNkVz3z2~gyme|R4 z!*hoTN=2m!R3tXpQ59`Hp0DRWRnp6sFSS47uisxdDkvF`z8eb`-gI(6MFHR<+cFZ% zmw5u9L7hnDRVn97fSv@_AnLhslP8dn#E50+^rC^1i1G&O8zX5S;N_Y>C+97bnj^U1 zTG#m4v157H+gJFhzlizd=VBe#8Rb38%na4ugyH1a;1Ls3Vne^@ncWP9fywPBcf!m*mj%CMQY zR5uv^2o?!a;N$S@7x&><{><%s68GFC8$5wpr^EIEq;D6HEY!fvkVxr#XMPM47O6Yz zrxywZ7oN#TPk%MeGMD@4ey_Tt&Q=PV?SL%>F?#}#SgR1egWQu}qIVb)lk;pSy`{qS znO^(SKrvI{c_=Axg#JYFgd^)9MYP$r>9y@0?Qng2aKjRCWvSVLB>o3enSu9V8&YZa z{S<(vlLHBHr0XYguJ+?7U{mreqWX(ip19H{equny7`lYPCTlLs)uQpJ@NGhFWg@3s znh4HHgfNj>9Y8}bP<`FD&A(cN5nCRZ61&>ju6f7>?ID>0G$k4wpbkcpx&e%+p9}F! z5Dei4)!%bSX(oT>y`t1JxPfpi1_2ui&Nttq41nt!+_T?c-QI}A1M&jINgu&t73kQe zm~x1cqu@H_f!>!)`cRbP)eKQs5K2WBE#y>Oe{s)UNYgoNap0eLVcy%qUPZ#75g>}` zUtSI-2vOXx?tY@EsUNRRDk6^Y-_~Mz)4SC3LvQa!Xa+_Z&#*7In6M|)7l6e;G2T`M z0GR6XPuSpQjS7VboJrZ}2j%iSlmDZY+~7DmN-jJK`6g91U^t+{=OKUrZv%)n`VxsX zln+n4k{cxuoRg}1ji+_%UZzojA#eazuhaN7GtI+7wHM>Ou5Nv7B5R4a3$-_3;-qG; z=;o;;(#b!kp|awwEZf;lDRPH-O$CaBHO`dBfb`#{{5z2*wdaL+iHT%aBv-9htt!ENee7n}70em}qLn zCREU*kU;5!#B>>K%~Feu&C@^O0qdtW%YF(mF$75U0u2xJ@C|xeKx4YIbTSWzgd0V# zA7am6Bz5Z|m?eA669H|ihMPM}O@$(T@a9Mx(kc`sn6?+Q6V|Lz77-Q{WSF60!z#{x zrIeGDN(BNS_8@Ey;%!$)!_-9s}DI;%<~GsmV`{ zNii$?odOhhxA`OL`^HS6Ex|37l|hhhGQ!%do<4hq@?n^2rm)xAViFPy5hV{=3(L#; zCrs%H`l-udjuLHxCM5CF$oG&MyBsvBHBbrgRDApPjq(n;#17m;RbaO!Kjhn0?4Ioi zIwVbo(1}}%aoHVPRQXcx9JU7~D8Wr09s0Dai~z&Yv^<FaktPOpgI+NcwArk`AhW35$gKyd4JdchK{fc7^ynz zqgeE!(*;M16Y&tNgI=ebvjM9&M^(=*E*89|Nrj_t;A6|f!>q7O%?H8vmY4nI@4ukF z(6>BA|5nxRb8pegPoPX3A0H4QwJBYaq*8Az}D{aRd7- zr&*w?LSis_-_)_YDcSqR7B){oehK1pdwP0!$5qh9qs;=W=D6GO!-o$}{q<}5^?f-S zRmOkSuXES>;-`He6k7C6j_-EcZbZ**)>-TY`b*15R{+TsV2d=O`fMnKrR9yv%&sri za&;Y4;M%s9!QiKk`T;V&@+SjHY}qj!1CQj z>UP!jI}_0fRD-Vg12v5Pj&k6ajxM4$OelJ*ozRfOpHou(8Ma#fHSZkDY4Yyzo zZd2xkRljjrtfh=F%}i$}VgXPYV~|1`A|rtWBP^CwtRarIifHQk(cyv&q$f{8Pms)L z#SFm1pc)0&L>fH0li=cd#iVL}l}?URg>MGEa;hvyVM^@ssgEMIlAaCWjqtiX)pSwxou1fg_O?~RmOTF?RO z(M+OwsqSb^LEDbs4#$C<#2PV!h2<;aMIw+{p4U+wPC6Fy5TKpT0EEB*fAP$8a$Fx( z(ED1wmgUNv9g6+~t0&z1aaF(ah?kqtGN)|U#>wODxpn=CZ(z8HD>KjrPkn6&T)#{4 zK^xCRETMm~F&_3$VBJM&Dd_ zHZ?J+h9fp9aadW`Bu9diD|ofT_Ns&jkH=7-Ko+|B*!=F7Xd{@8)<${VFTa{JmZg3-<(@TFJ*WZjWogiN0~k9B3I)Wz3v22d1X!F?I1n zMG5V_scc@Cgwfisej%d7tK0h4IDl=nsNm&#kI2BJOOK`C!-!paHbo%C#5T@+!xA)7 zSF?dsCYo5Cy%Sp`wg=bQgAGC&^1xAD6Rp*p#Z2;pNzop!wJJw_yl|i3FYl#uSOCq! z7=!PpB_`wMqJ;~Isd|u*aH^uX)oep2V0zs9(49aP2%epXrIT|6-@9x8TLrX@zl{5* zpi=*VH52YFL$haL?p1X2$1kI!$(grO6lXfDO>9zfg{g@QnK->0^kxNGBy(Tf36Kvf zgdk?mY!(!)jDhCVRQynt4Ka8e+!smO$&{OuBhBKyeeOv>CsM$MAfn2x3E5^mz909| z0enwY&z`Z~syZJQ7FH^?#<4+{0Qcm8A;hPe8m$h*Lc$YI@-{SgOJA>*azJI;T>*<_ zg4-{o8z=u@wAa+s9K0VsT@JT}diS~XJ;t5e^v9<_?dVhJJF?+_kgQ7{*){dL6><9) zbVMH`K`Lv;rl{ltU|bbiGD;EyH$^qE7Z(;55}m?yxxIr67`qxU_PlyORa0SL`qxqS zAXNm^r?Y4+F^nscq(0%)h#4tEE>#?LuR^ssr&X%zb zbm2&uF0%>TyuYkp*^D8lxOriZ^#Iteg#~6{GQ?wdOS;nd>VMwD?k^<|;x|L1%mBBf zrdFjnZJDj--=A%ZpN;F`FF0=`mbAiK=>`&EIDv4ucl9tF1g8iN%!xM3w}}hlB>9m3;zUvro;Q6 zv`1t7k97Npls+AsTUB~1yF|SA;qNrk(g3PB4P_;Cqx-A71G*J?Ghkc=A zdHh9jwzU=OJ#aGkmczoh^pbSdP6{*Y(5K7CT78U5gaI^8LQ*&8!AV6hdO2-hzJ9%S z^6y*mw_Xu|jM6VMJT*p(8>?6hSoSKXHYZiy2`Sxv#8U__vL7#!?NV`eY@t}Phg9*{ zi1nQFZ+F-RKp90Z5x@V0RNYp1c>p7!*=p8v6EzwY%~({869RlzgUrZ(dYuR6xNru{6F&!vprF}AbT^STf<@C_}O2}P_nij z!q1n4f~^t3f3hzhyGllqbsT6=?cyW~V?i8p=F?*>>Ar-1;uf<>`bR2VzqkjWKQ80U zr|kb%Vf7=|xQMCG$e|lsEs!4*9sO!)UY4&qOXhO;8<~}vv%6KVmW&!2AXx!@>$RIV z6Az^30@B=op;{<1x3RGE&a7)7V1;(? z-FuISn_jX(92P-`46+FfKZ6h^NEkohmlUgY8?)=6F32U}Ile>xL>BLf0-vObgR6qCskLLquBl^cDv54qRD zP@v_tr%1pz4*?13Q}7`fOW;Ezj2m%C@0VGca>P(6QKy&gQfQMcsL`^DCY@#Y+w4Z1;u{Lo4#7^UW^bfcx z8|mK1D-$p+u=hqWBy6v~5|}o8cJdam;RZl}VQs{t;>ZXQ5~(Xh=?Z)&4qo~l&bbd2!uaC#`vTjOik0l_K+E!|L&?@Vp} zhE(jUuvbB4@jh*szZkQaFx>P5qOL8$1K|e6yXvezu?V<;B_J&twg<@{qOhuw1PE3n zqxC)1+lQd!dNfk6O7a>~B9nCiuKLK6-Ltr*j>Tu62zuH6btb+msPeIf@vSHCC?XM< z#8D0g#sMAy!*z49JyLt|rvU0)1&;$kMO3vJzE4Ux>2;}wr|BDdU1Gr!_t?>NO~8cE zu(P2u85oXeLQzD6queN)4+AvGDqvR`lG|A{IU$BRy$4wgFh1bzJO{)`^mo8)L7K8g zci&ggF{IsRke3Ua>s5X?G^VOA-@ELZQb1qw_h$%xk^NvzI$)ebiHfmG5vU)!ai>$H z4~onHf~&CUuSY~wf&KJ~E^9;Sp6B*WAY#jjXtWAoJUIFIE+Z=h-`7*fnjk z2a5JC6s%B|&qu}p7SPf5+>Q7GL;$T5BtBr1Yu_~u8HVpE^y}5-kTn%4#$1F!LmrR( z1xkUrTvQ}aVKx9KsFxwCfq}x52BPAz+Y8H1EZ!bi>0HCfMygl*2B+qdM>S!Re znYzHfr{e?wA)pdrlJ!4<8Rnt^)&>I$3#l@KzsUrFe%iVKZ6bPluW0XYqQ<$!Ncj8NaOcnT zV&Y2RWNSh2JaE0ckjg=$b`8jc%$%bOI62W;yKtu!IGT78nhfrkPOIvQ9oMt zHZsr{1bAaKAN%ZGa?Ts=@6s2Lm;yqGHZ!`4q(S2~%=>FJqRJEhF8|83K@8BdU?eMK8 z3YWKs+`c_$Dy0nkEWXJPAPY%wN?LzB(JFC|xQ2cMFr!#T-N-dx1NsUF|4j+2C)irU zF8}T~ZQlhQAO}%kQuL=xFyQYC1h|3_oR4AEIPKj)_|VnAVKE;0e&sc(6(U#pv`_X4y2 z_;HN@Hk@{SSB^>}Jse=2BI~${v1Y-#qzoZW4f2-&#Mo*mF!1Xd z`J%)F05?5w>=Vj^LXrF@nF(T2^F@!@Kq2CI$w8^*k!a_s5`1N@76j|M9X8nOtbGP zpG(j$%yal;aeIb;=RN;d3xLTQpIY`sBt-iM1;qei!U0Yz)aBFb5^Vi`B(-7lpww;v z>WSQR)LeX|b-<+pZw%DMtfO1@=M>ms;+&=gvuY)DcfGx_)nB!BTvlqbWJYdZ8m#AR zzdsZ(%U9G4PQQOR5dDcmlAbYkGWtVc0PWUsu{;W(_t85InD8*=^Pj0)_|)t4X7p{!ft2b z^gwGwG@v-m%*@nY(D5IlcFT6N4WD7j{|eoOANdmy`T8FPm7GuzQc$5i#^zTyQr^DZ zD_`vih=E*>i7iyaL4a$03zulZBwVhjhW>BD^%POIp@z75WLv}X$3w1#gJ;yqD$mix z+$GzkNvarLB%mZ~EbR5Al9JDemnioN4ST-QMS%=FlWbn7Nb)Z%2^@Q7hm8-(2PPT8 z)^Ht_dNL-q&J`0&<&>|1t7uxFo=~vmWjB5wMsxQ$pmT&64i!y$U`55-Sz#VZf9;!) zKStftx)Si5=oZA)LVv+F#0cN{zjAiLn(FFuq9l|wG&G)(@I8Cge+%Dp$l^6sbl$$q z=%9swa-$rqDo+K_?x_$wT!>5{y(nvgeUBZGV339nJkCJLkGfM;Tl?Dn#S0hS6U}*9 zU9DGn`0!z9dMIX)*cWhcS?ZM`A9unB{x>#NWxeL(i)G4LkHKiDGUt`h zb@jL&U#+YYMLz=FTppgj)%*-iellBn~qKms8Kj@jDlnFiteqTWH8`^qT{DNcH5t|wC6%=K6|L?;2 z;VYhObLaNrL^H?nK%5q2KJh2^rx=6p5eH6BwtUEzXiPL()gH*K&2Y$4&jEFl(dm8_ z^Nbgq<{D9R_6l!N5V?qx8j`I}JQfS|Jvgywdfc}0zEuRHdNMJ$TeGHti>GLD(tAtW z@i6d_EHJVLCw&N1+6$p4$F?C81EO9AobDbC&Dr$v!GpJJ_p0vg_rZg z&q7O$PIm_W4Y1t26CXoWCvp_AUahcRQRxD0<7M9=av3BK1q@Sc7Hq3~VD!T9wS%be z@ZCnr!(2pqJ5-M7L~8q;k!`VnVuoY4KQH{6&Xt)`-4+3AW(?S_@l%_ z85N#Q3!uLaIjuR6h(m}SWDE$YDp278cv8r8Is_Vn+D6P4CE?n?2)N&q|3JVuXNo+2 z8jyZA1tpjtcub7Zf^n-G-Hi6+Hc2cG;<(@&4TfBRq#kI8KWI-TU&&QC$ys>7^zO-( zl9C|^xSKsZ`R0E~?@9WA`iv}>>%NWRj-ffhsv zPEJ++V|KPFo45&oRnQzQPGWG0&hTl5gp3Kh3_0) z2cr2821r4XRc4V9@_G!_)ih!mp|k+5YC3Q_VSPn?zvo%KWAk=uZ65o5a2sd^$_s(l zq^qaL8(M0!4LajBifB*Z4v2+>0;YFI&AW3x|<@6IjzHs-D{X)}fK$Pcn>v`4DPkx35L z`0%}WLJ51=ci4$StBq?#=uXGr;Gh~7Oy<)yxveP@SsP14pf9lF;sRt%YSjvGCNPpX z33|7Ef6K`=(-XM>3{5Ebz}gB5N5&tQ&@+hG%!tS>z zBO(<$^3>J7O9R8$CnQ;CVgjuU^-bhJgF+QXa;WSOR?>rY2~ln9-Rg#Qac`FWi{lS? z?TmI@=dJEt5|twG`TO^0&COg8Vwad{vc^Sj*g&f0EdgvkWEZgC77#5IyhsCY{KC^~ z9h@4#jqDN7YnId2xO#N(+8Y(q=n-KRzI~5UkqS`%VJ833?yOC9wwuC%dRP*{Xw&ra zt|$%y9PN&f>TQn5>wN)S2ns>41yF7K13_i7U8KcwA5RZz6j(4>_Bek*N6MJ$ z)Z5X)+nIo9_Gpj}IWkd?H7#zQ4G|c=&a4>WH!T4bx^f$8+0{XDe;FIovpl%Y7MSo! z0XzZ4yjf8loN?9{N|ti+*X`5#Z~6f>&m1)9+rMr5GXhIhxer< z;nUq_w=h>Q`kCok5>k?Y_qjz-*TU?D<6)EllG}mqA_j`lC>`_51=#S?zfUz@2y>eG zw_W+ObEZSwHDJ77zJGT$2sLg(f9#>+m+JiIX&A5Y>0?6Px`tzG0M6bgz#FJd)Tyz) z$0l-j+a<#2-6`oJe4JHfxq3qIEEqo&Nu}T1+}wXbWa&%2ZG)XUalhQpW^{|E9vKS( z0(f&J@NHC-|5zI?rgzWkH6?&Idi=j->sh)d_LKCtV_4dx@X@2Rwhs9#_10MgTat$v z+QO{HHNZEf#(YEPjLpuffK7uD=9ljG4uh8FVV{{}u>P;w{Ez(Dkep~@`Xt|^%@Q+% z=~~mL&!3?`-HBgiW7yuRWLfQkMXGx-QlOGRq-l*DEVa60J9BjdKeM!UEc0y+c=GcK9$eh(#zg*&zUkmRXs8 zhNn+z)p)r4r{_(}typ#6Zfu-<`|3Q?{P|iZ4i*$UySTV8oVjLMUw=5}(^Yg#gJXUp zJB|UCpSACSAH073dZxTag9YLy>3GMU_C%VzRkjxNbvx0ppfU7lE$v-mK%LI*qZ011|Abm2+0DgBG@CcLoZ1mj9uSaa`L**N{pb!tA@LN*0s*dsDUvba$Ginsm3tI&mTvs?QhV^ai~3yIWCQ?Z zT^yX&(DM2LBF^M$2;+l4IN3$Yk| zWtF)X|*JZ`p>iqQS0${FH5)zr} zHQ&#Bx8G+6vF3IV|AOK9;K6#t#;R~?>1k_oLUlvB?b$Hp-IXY~fRgz4ueXy>kUsK4 zpTO3SLRSe@x$ky61${rLaPpr_XsD|0004%Z0sJn*{=^AiVAa63?@Zs3b1&TE$}$EX z|4O+HH~j~=Jd&zrvKmw|%b#7jaue$@t{dv|waJsXb_e`WqjiHWz$bWu3XcO{F7yk< zZ{93Vo}64G#6aq>mO;K!QX}Od?^aV&24vdR-F*)5G*yg?MKLOmY}Ex^G+~k zdx7gb6n=_ObNjIFU3U2R@yp0eE+VuYo}W4#U#ytq7;&WuF>VO9U_n*99iQrM-O)6# z189tCz#~hlK=imLmR4DNbEia8om42AEhSycp}}cR_xRe6`hUcP%)B#p<*gy*_5Se( z;h}zl$j^;v9$=2u-u)J{79o9J#oZ@`shB<&VT(ZNi%i3=D7^17vkAMy)G!k+MC-e*Vi_m}3|jh379*gaI7kQ=L8r78dfB$lMkO6S`>3`HeZp&W>L|ECXI94a^|R(f-f@Y*dTdPJ zkdQ^92Ojz3*^^s@Featcb(gX4Q9C_EQW8F;QD!n4g)STlnnEG=j*jQ9UtfTP-n6rm z9>BUw+uGVX#pYj)rL%OMh{z({y^@C8!xi5?taX(cnP($$=4gywk~yo7zkfV;xJ^Ov z?GQ{wWgv(akL7CqA9^g&f$%Mrc69LKE{S$VxJ%dF^X0GG+76@ndyO_8s1ytAR-^z& z*$NjTBkETh<3cqL@*jK^?{XYs6nAuVtWbPbUe1DS$NG2sbsL!5Dah6zZE_cSgE4@gXcUF6lS^ zo2!uv-GdUc?l88ApT}~-d!yyc8(vsrvmPoP`6*LtyK`jTl`97VlKW>T$qIgqfdI6FO^)8Z_%28(4?$U~_eCnDrs z5p#x12;0^SgoNTLAQp7nK)l=uG>n8GS6qRqF!E2N$Bx+8t2Vc=FcAO4D8@A)%+t3G zK?#f38!#*_E!P|8a-a!{{*CdztE8kBBV#5yXeoMyr>G~=Fh&XZ!(FJ|SW;K6RJC|x z+0XL6LHz$mOO0iX= z^pcb_&fyndetbFXy?bM_9^1v8%n&yK9bO5EW=l&8dAVtY!r^^z^MX>two^cDG?N+? z_wGQ5d{p8FvcO*4(PsrNY*)a%-VRv58xN6iT>NIq-wMQkO zan6^*C6f& z#)TRgDgE=V&->1%67W=xb0GEw90Ci$@%tlKAc&s51M13{j69}}6)tcp6xpgS~wnVAc51LC!ugLIUsRD{@gg@C|RxF7_O zfiUJD0{-uR>5|mu>KkD94t19WcIlA_|dqd%Dn(UT*vW(L4uXzaz%*2})#)WZ$pj5xBBeW5cP;~TWU{(oJhCbjkWb<91 zl`xBDC*a(BXInq%^d2%(%X1xGp1I%Fm+L2HR$oSOHB~@?^lM?`3j_55znmNonr2?)hfFK}Aw_+&)BBeyIoq*k zyr=d|NVWh2R$p!iO_X6z09Fv&!eIQb5<2`g5m87}x&)Ab&DC3HVGf*Q^ksT}$&PIw zv%$Y)nBiRQJ#ZuGnCS?av zftXOt2fX$M6RNw|{ZXgH>j;67TaM$KHEP)k970FK4U6Ab*bC&ee>wNUL1? zrUai3$QEDV`821n);i1H!J!1Ba5ljEWR=^AAI|WEM&=N9%QfSUw*YgeF~|gGZ*O0W z1!E6MDI>pU+4ALfBPl#9(~o6ssZDOQeuxif3{w32@LN9!i(!fL#y43~SEr>DfS|CM zAW*lX`!Y5$=|XNN#7WCcTwlQA)ue!e5U?=|4ZAypRtidDCD50cz#fQ?g(&Ql4I zbXfv+hlI2Q@Ck$ooa)yY#2k@j?j)>e55cAwKR&bcGN1f;dIy*JOuI@s2ZH|X3GY{@ zAc^d1pMezk`t0m%^W{VQ($$)1=ID|J@H*SO-81EG1Bef37g#}qJP9%CmqLmiI@En# zQYa?|22=|=(VtME50jrihhlv-E(A5Vr$dL*RM|i>oOP+vtaXu ztXBL!p{}8^>*UGY_>K<)1G5ovbkN1w+~K@gGXx1?k$szP_0tGps_R(%ri* z*d?a&@bZ4a1T$vOp@UUYQc@V2x*f1^zU#1PfCr^uQ=tkSD@KOTKoNsJaCa#O(tIHQ zfCHBBB;3ft`_3+74_yY4$e%T8i6>NI)AzIFZIzR=M`>^6=u28{D^3P!;+M$SM@i&e zz+(q=IP*@S$KbIY@B|biUB>5a>60g*IxIM}w&B)+q-7R>FC{pwB)I3w$;nl?7q6Rd zn7x0(0)p0s*c`s7*+8nX7#UI83IhjVG)p&N>W6^gk7n(s5*%tsidloL!3%mb?;?Z8 zDAGZXOshR{5AO2IIREfIIziU@L3E1z>HylhsdUaze@V5W_ExT7$VyWAlZp?028TeX)W zs(}$GeRO?s@j6`M^Ch@1)*0P{P$sI;`b%HkDn@w@sb?@$KsR^p%9V-vzcpW(7`fNd06pL4JclblU2^v*ZTN5DlCfD{7^ z?nUV7EPZ|b-)Qx6AyJ~aIw*{x7&wg12r$R>!h61XGezg@!gLQ@G+H`3J{aW_-#D=8 zgPgA-Q>^wZ8yg!O{4z2!GgTiF@Fkuv;-)7(UrV>K^}^8q>im?Q+nysy)eP$r$cw-F zO8`E~xD0Tn3*39rh$uV35EV#j)D6<7G5-9lIeQ2Zu?$ZTY2=E;?aY<@(Dv|QZ=BsR z=$CPz&#z}P1FiIAg>+PsCsQY`XT|;Qv3f~&#`Ws|X9E`(w-yxzBI+)qUcSY%R(JpY z*$T}ZSL(wg-7h?}H83^}?dQ(F+rp^p3rvYL!@Zdxqd=Xi5QQUZEnL(Sj`maT`g&%pQ1(f;Qp~j>$9x@C@{@}EH+uc9M#})5t&fItkU3~Q0QSUc%4_>sJSoA)w z-1$5q^O&4)rB!v)92;OzT+7|e($bHKnI*X3{o|x0VMIdnt7Xr`6`J<&p;uv{ zdw7B2oPJ9i;Sd!pXa%f*H*XFs`P3voq9aybStezMp`lUQ33D9p z)W&fwUi<`)A1SAyN^${LPyK)Pvx0Dvn*AQd2TMy&S43|mXp*24=y81a2=BaovMW$R zP*5-0Ic0Cw|Ji?TPtd6d6hHIha-_$G)xyH6*S@W;dhsHm-Q5bB5%PLto=idezo&}V zpJ_t#buY)++HAXsAQakBC^gH`2%^_sKbJ|<&Df1D|K>o9E@g+Jcu`??x;xsCI;_Gv z=985LwQ+Z1lXV7`%UQ|19_;K~akH^%X@}A)&BpzyFRQBNO53&5l{S~-!~6Gvb{|JS ze=f#76jgV$!r2pASrqUHx)i7Vg|H8f-mqQhV`J*j->?DZnmKo_GFSr;p}v5_Fzr|S zMubR2)?CKlhS%J}UiwJ6(1FcRgWhelK8Pl^iZe3p_O)x%d=>eHs||LQ;2mJ0qeH1* zPoSTkUVfa&B#yIQyB+ek(jHAchBOlerA2P7+4mEm3nZ1_96-505D3wQk29BPg_ydX zY_mbzS3nJ~Ari!b@B*|P^mSx`#K40-eCy~1r379}2I#Ibrk6;Z1s(04NeVY>)7-$bkkGJ3oaq$JHDRyBnQTQc0 zbL#JLPxA{1?A-0-;3GG&TYy30Dtai95EdTE`|Ax3#7FoP-!HWP_Kj{&EGBg^*GXYX~L# zyjX2ARrlajEQO?R!=bX7xOV7y%Ru&yg#J22X!hT+OB;#64&omh8^h{)iqEzY$15b> z3>+n-IaI$bug^|FYG>k^mKq% z6YNb1lSN``P&Uq|M#3->pEqPnsyQH^r@iPbAF!jHe8)Zl9LFqCl^zJ_@y1Q<=t9T> z!*LRE3lFUt1aQsKv~)1c2ol-~%o=hAKE{=z8a&xg@K#m7lah)tnoRw(OFj)4eMVkD z{N1E1FFRbexTIL2bD^Xe1lI>)=X7ECMH%W$U&#$2Dr*38Ttv&)gFe3;7-E#BAfi^h zK?KP+&M`uTPDLDzG=Vg3{XGy7G?R>L`HDrHoYTOsP)iJupWz%*4Z+f##8oDD)`e&k z4K>qM#LCJF?I<%&|7lUbZkLvp5|4Jh=K|@>P`F$|quqnfd`6To;!r*_!CmSjbGS`2 zL5D&dF*kjE8Thl&Lj|7xLfTZ$K%B_JAO-8eNl)OVf01vlGKmlG8ylN#pfHcqaQ%?8YWvTvWE9_)4XF&p11sGYNFt_ zVTwYJQvZlsS6h1;{uWAZGe~Zh$er9k+=14GoiWtT7#kboPFP!!gxksqN7n{uRJ2av zU;^gb`R?6xE-o&u$h&BWi5UR=CwV}t?|8(YuJY5@c)m$i92P*YI2-UG)?;yXHG5N2 zQ(AU*=!Bb<1UHsB9e1UrG3`b*z2zZ@8=S%L@nSnXC+BKJ#9UuRt&eYKZBn0{==`!_ zp4{^HG50Sit4(xNq?vs70fXhAXmlb|(kAHPHw)d?Q~EY%eiT1(-fPS zX_}mz+%-5Tq9zc1N+sdd*5I~R;~lRz*BNln*V>_N!`y-PYdcD;hnQ|kI|{AF4-CX~ zKwu1^__-aaiJ-&Cm;&IIfBpKsH*K5H(K7XM=U}QV&K_Sl(w1xQYa8KZ!{&P3+WO?p zo5xT5OwzOwL6!GfH2!v87TbE7&OVh4dW|P>w~4#mF&obL0bwioS4Jl$U|Y({%2Jtw zjqwEip~Hu2UQ|_G@Xm5Yo*vq4^<$rtqoX1ckm-tu5zl!Uo{=Gr=MGHvGA0kAn%**4 zJ0Q!4dkp0O_DeLoKQ5RD$;rOetl%t-k8$Z#x3|s+jRHEyP?{$ZThZ9~p*&jvsNsCw zxc$reSezqTQP7wbdotBOulY{ zyw|GqE|FOLrN&BnZ^+-9r!ZKvH{1EdXx_u*J=C-{1`v4$0LIxU{h(!bwrtk8y3gYs z2T$50fQY9j0DOAC^i^DDiI+FyJy6T;*=lOjTHpBezdoQeyYsF2r%#`b5zih-6c--! z?6Yj^ljR?0pJn+PlQDGViCUP3!-=7QrMa3ImcPUkVEBQ1Kt!9SDd6gfb<+s~15@Ra z9h$iydidCGS>8`)IOZEZhF*sRFsBSu3OwWU|M`>}LHLx42|6n1gZ<;Z=zOF9J(tj{pAJ3nzyDKx z;$`>mf3IAbew3#VV|M=iUr_n~-~P}-L@lZnI@NO!o6;m;h9@d^8jAJ#^Jfm$`h5*Y zB`CcCrt7OWZ_Y+%K{q^w*Dajw%K?_c0f;)50Hp@v-UU)yj>6@R(4}|ecn1A+9zz=r z;pL*`^4`wNbI{SVvRa3*uL}UOyU{nwQ%zH zkB#je9rMwIAZ)9oV&H|ORo;gcc@9So{NUxJxdE`?%(L2rE%asb&)dlb9$`jrddm%k zz%>x>LE*nZ9#>U`nmkUFt=7iqg6W{a%D7C=@K+r&lvoLj0I#A5**;!^LIyz2MC0I( zmdWeAm*4X;r2X-%O5+KOgx=hgirDtt0UfP~6|QFogodniM)r!Or3Iz8}9*Xg0X=aH4fp}D23?M!dTyVaPN_$B36ul0WOf2 zcjWniHGAT$hNKb%Z2OeCa z8{ywzHG@Q-zYrOA>{)*mB_|e=0|GK=j{E0t-xl;%ikLR?TCRh1kzy5y#lw8{`eUKW z0WdvuBgcb}(FxC%FM2gZ7G8#was`qA(62Gxg=aG+1Zt z9|X{r1z4#S+eLX{4)W;fuSy9CRos)gMq%Ww8ryy<$-=WU2vA1Z&9R8ju_yikO5USu z_uh!Qc=<9{JucBKGSzx~dCH}D^Y5ZR0g{Pc$pvjd^axNJEBc{O|L#xL`?Gnsov|Cv z?`6N)LnIMkGl<*@F}1~4r`GySG*s5;=aM5ZTulmB-?GJv?@!>HyLT|XQvbxW@B-+v zk3!HRdd3BlEw7#jCK{@|1D^#*`>?eoOi&`Aw(I%iJsw2JI7GVu5yHE2)b4P2uVVLB zDM_+s)3dSdqiOFPjk$WY?Tk-c*|+w2$DRB@@sQi!A2c@IIV|>FZoO-p?!^Y{-e)R3 zd-=b++hm2FeLxS}IHrId_+=N?06`W9z9t7k>bF8447pq#FwtgF-zaS5~p|a=5>OwV%fCQ1u)ZS29 zo|cZk1Q?aZ?&kfyxuR2t#fmufD+Tf=7Fh~y9$p$R|Lnk!o54_a61U|IJ;MhQbXrJj z%3qRf?n`0D)E`i=pJCy)C_&4N?TFvOmJPMIJ_{$9cV}mMj?8K#HJ#k#kKvxPQHmT) zVA+a-6}9LJ4A#9TlXwHt252HS68(CtMf;}6uU~gH1&=)aa#}|ZrdsS`AIxx?iwX?P zM;Rbdbp0KLjTOae3Ki7U0Zg}QstpER7 ze8zV!6}Ep%Yq!;xWWT~Ed{NxD{kw_jB}vx&k5C^lF^Wq{wgWX0k(W2Uw&VW5>xVHC zvrx;vMgWl9)~%#EQuuDTOm?y7(8<2bzU?xJuc1{tk5Vnh<}B0KBEE6f5#7bR%*`z= znQOPsaR$=_;`4k&1TQdI;G68!W08U9va*Pl;KCaST8H302#5a@=sj>?c!M{C(q(d{ z!_?vV{LJ%-6v@^217-%rRMNA>5lj*o;`tC4MAml3b1@vC1FQ0SRfqD7?Tp<^(goi` z3IJ{Vp6d@WKnQa!Rv{&XQ5mrjD{j5hXjuEO@8-lL$92*v*;5-cY1U}}>&mV3Dmz?G zlreohY%}iO9Co zK-&Rpf8fnhN8 zPB8WOlTG?(UrZdh0hLCWV~=%E`$R%g?oG`*m)+ey9L#G=xg?|DaZIgfc--;9=2{ch zk&np(!drHXXMSa2df|-CM@0f;QY7kwsXhwOfZ}8A1c_7Y86970`Pfi^7^544A0m*( zBddn?y_oRm)9?v-s%1D{lMCGpAc;lw=yxvw#UULW*7STV6EdU2Dh;0q3#HmZbOgBv z`5iD9;R3i)8j6rk7wtc51@mk$(3!2z!Wl6@%f&5TDj>@lfQxyUYo5dC)%LGzz8mAI zJT^02Z;Jkw-Vn;%IouL6@x;9q?=E|AI8WHr+-6xQD&~pCP#pMH?XdxuTu34nZ>DNVg|7&c5iR;L3?L zw1C#krXfHI^reIEL=L0qJ-}s%I3_>fF6i`*oRuGW7L_g|q&m4&wYhn^Ct6QNZE|yO zR~J*%*P0v8qODPE9t8Z48zDgG7pb}a>A313^lkZjioc8eJ`?-L3pg1LGN%8%XPRp( zyM5WXkoYWL45$_haw8@pEY;dp8R6hKF3@Y$XP&a{%CEw-)siNnSWJuae(lqD$a^Cn}J5wYH|2(NG5TX z<~n^~C%Xql`t2-wj&k@fiyA1nRZ?;h`cM?R((5i=z^Vmq{|x#B`@UNKuDJMkq#e$p z3=42bcs%b=k3mTRXkjPtVhJ|KAG@O-EbvOwU01>b#hy~MfE(Y*G5H%+4K)GGN1VD1Oue+W3-9K0RKPL$)z91Y7UAS6{I6r=zpkC?4aw(n7`ZwCH&OmLZjq>^@KRFyNEpn@=^k;9cd_t z0f)YmdBy+3-kV2b-M8(d7t*8&86pjYGBp^IP>Mo{5G7NIgjC2Bg(gE1DxyJ#GF2iO zN;DuObI3doMTQ0%*vF~+et*xq*LwHfYyY$NUTa(L^St-{)aAOa@AvaPhvPWTh@3?;FP7uhu807n@dcSC@-q#=m9J&4giv4J(>uQg2 zChp#Z3C{H+X5d+8Nk8hGDxVCI%({)b>y+b{TsOM>Dbct%Fv-5FW|qT``w!a_qo4oy zk8Cd`CZ_59w}PU63l+^22Nyz@x&{*_&)r=Cg3&R&7S zT!8ULi1`4NK}k0443_z+a{2vJ#p8C{)uk|jF5Y4UHl1eg4>UPVoyeQutk*J7#k2jM z=Vt=-2&tpMcQ7#kqG4406?9{>P}VKqW$zZoe>efd!NeyYfWC`&jk%Z1e&_MTLHQkz z?dsQI=D!_tidU>2OYl)KgXAHh#SO{b5@cO&^uW7Nz&3_|T)(z(s&s!`{crV@82`5- z{qaQ1z3f|CU8MVew6DA7>9xR*=@T*tpcocYf%Qv4!h>JFn3QCDJ|nOwFeWkd*Pkmm zLtQ;RF)1kvfZ$%U@_YMds)fX45B~0UKv5+*0*kMlnHR!;*jJ!FyYS1Ey?|J8)>;P# zrJQD8n(A9>3>~ODshJ@Wpsj7cApu|Q@Gi^C184gW|J!W;8---vSgdww(B_=cJ3XXC z=*O`S({?C*_zSDBJ0Aj2qwYVz0OR!AKoKyzE7)s< zU{Hh-QVnR(*JSnW|7rm!t+cgyBiYfIaivZGOsxY!l`-<9%M^u*9(YzdBpmgL5mUj(ARZFLSF{YK zn#fY6x23`&(#x06A2R^Pna7#tFI85BG=*k!;KoM#?lj|c`An1MeEI;>ZVY)wp;See z`Eb-TX|}~TZIcfc404`Fqj?umoVx1Rdes^q$rXw0#OmQH#uYIGqlHrn8&D9GF=1Bw z&jHnTOHm9xMTg`OX%BUi>v~V`Xti429(EajDNL6e(|0K#h;;Lg>F(x zlf?^Qj0GkLqo1JGUj|#n-j?8lxQzX2$mebFY}(@N{8WYwH$NZtoaX^*8bk;;fP90N zYZ}9~?L1HM*QR2O(sNuv@H^E3MtW6JU&SIGanehFJcZHcsQVIMV)@-#gmr` z(E4LF81rVAjpeY>x_}pwtOX!qKECl4pwMu5pw^i1n-ynXu6uWDBM-#T5q1XeZ!7KZ z_D@Tg>y6DgTQUb8KynE2ejz#3_f8iD(0Q)lwkf(JY9m`)b>bGyQ&sJ@1__vwx=y*} zmG0qPHCHc`i_n=SVH;zm-qMIrlK-PM!2w=a2@Mw8kwHl7bnj}DYm&;u@Cwk)dgM)CF7 zhy80A)b381T!$=ZN9dbFmv+}6i5)_1;7`#Q3ycoO!wJ)NPILtr zSyKf_3N&Qe!I< z1La7j=Px2T$=U(Hmc~}qolP!V!Ap-pLUF#O#jQsGTg0`;a`5(aK7eDeZ8p5I5K5Q# z?|1n!dEp2PNlDGak+E*dmW3j9CSuv{ckm0?4aOCGGiK768F0fnkMfT6@eK|9=-bz1 zw>UI+;Cl^$r)DNQB${f#V+COb6AYeSLqLJ?a0a8dKJEUHf|B0lBU*S|w^&)-vzC

vC;enj&_?Y$AW z&N})aD%=rA1tpRULcduajSp~_z)eM7!ZgrknhSLLV4=?`U#@jB8aEuETUhz=T({AC z)=|Ik1n-e|+J+H$y1BAuWr|2>_2;T91R8R_^z`PnG4}yBJPL}MXAv8H0CnLIE z6k_%z+2k2FV+an#0>FW{p^;;l93DW}<@nGmhxy!Z;UCUHj)7~;jl_cBMw(YJy9dZ6 z(COC6KRs6dG#i6@)a&8a)h64ekjF6xHhtNPc2D@|P+Wb*qWBF)KHfMN7_iGJSmL!! zRli}*rz?amKvl|q(h8ai8iGv})q1eWCR@u~^OoIClc%$6(v*gP`d08Y{q(li3p%Vd z{Pn9U%2_m?DBZk~*4SKD^nMx9g-i&OhD8}IX%up=;bsBb8zL2N9(rzeK0YzULYms) zm$O^r;kS9$9|v3!$Q_KYJ~}fPdfM{{MO?H3P)qH7?z)!Il*<3pdoUt-wXG)7ve)1r z77+K#NPyt=jkjj;JSV+dZeT3Vdk+Gvn%PIb#<;1B&4$BR{j>;79yS8e+yFLb(a`NI z(QX*$qh@6Gvj6T1BNcW50ZC(7V=UkWh&O`)BmO)?Gf!5`C_zX~r2+=lNGpnXN>Uc| zg);?m(cZBM7y%we5K)1g9^L<|qQ&JV52o*Iaj}@QDF6}R+9~0xx;fT}|A-D0G%)=F z4vR$n82z{cRS*IsrXlVk_(j(uTFo+SXliUkeYE1;bK0kZo{QY;7}}FdGLANg@I}S@ zt}1+T57Jz$f}V#nKl4r5g>zQ%X0w5wv7@Bp^%JD6^MPvft3uCt%;z~DOI(+ z$hk5BBq$0%3xo}?02$uZO`y9+BWU>cb?LS|BojHC3p)5Vs0zb-w;4_>?=V(yO@wV2 zf)!j#E~~jm;lzSYjbq9C!;k{%8RGNkMKb8W2q5gGI-G8CkY zR2c4risaR$r6~Mp`^&4M?=VWC$l`;AQK7Tw^! zv+2i3?_qhC_OhAfO;%1&$L;2e;y*qB_b2Ri^??T{BM%|#ynifmIexnuFkjlRLmF%` zC!8PRWEf-xwsPTYY|>%FnqVdd`SxSpYma*n7!s0wLi>LKiSF8|lI#Q=6AJC1k163n z!hIGs2xhwV_db17TB`MZs|>{btF)M-Y^nNH4?Qp5d?Dkc<)K5%K(u<8q}tdp+hVgcKFt_F%tB+lw;>IZPpD@PiqQtt z)hAQ$X4`RATb;_QJNsg%(rsBu;Prq&SfhS@W?^9={hmh3slE6l8O$>kjj6)TQiS>mAoWRcPzZ%=bM`zbu!7OG# zK)~@Ao?}CIdwndlYQt(CJnYAy%){kSBa%XH-n{8~?w4%1XJjY5=XIX>E#sS_>(J)# zF3JUff0h?CHJOY}Vyc-iR^SV>j{xE1QP|8FDmycwmud6~X&aa1@3>A;lX2bW30haL`L@hcMKA@Z&AC2fs-hkFBKrZ&-`j=Q9V)+ z?y$3j7EWVp%Fc*O5V`BSM`qr|C<#rJAv%CT-3qAYl2p3x4Ui_Y?$`o|hBpg+OjP<5 zk?wem_@!`vMj}2}?{eXm5mGV4?Zi_;8cO>q$j8^Ix10%pQ8Lf`?a+Qg2Dde08iP6> z5vFpbWSv~q^AIs&F`M|iWyQtCK42&`zHQganB|pNQ{;ZKUL=1008`Y@kJI2w>Jtz^ zI{p(FTBL0dyipmCA0r^9Br`EjRhz^ffoG&ac`9%W9~!?zq&@!|ZQSRU+@(l#GKY9tvo3a}z>FKO=vkmHxiS3ru z*tLs$;?b8OJ_C15hs$J(2o1%yuX)aATh}+uiRi&y@)+{!!IluE6GVKSAN6*&eCB|Ff+%|E;ff!K=GtajL=pTQ?4 zA+hjj41N>Qsi*PqT|kv-Y~^{3u`O)&Ds7W$3)uO-YRzgh>P;?;7XhHs{|-XU^OK*% z=f0jHhjXU#RmObl&l!t(P50Coigd00-GH<>eS^Z!{mu-New z4_A1KADe!_4U=X>02t@EbD`pVWxg491SB8DQ>K|6BG;(HOZ*l`^Rc3MKZVceLA$Y`=dz8 z)$a@)&!cGlf=^9O{9$-uyT(l0G(#D-N9*i37wq;t2|EWE5-(*q^(3RIV=jNt)d&Yd z8Y`t@@p?TJ)*L3)(&?vzlPKm*ES@tpbH=Gx^)2o$W(fms&xjU1ymU^DmD2Ry0uOYA zfNho=eP=aS+9rFErAhx2Ce<;Hx>B)P`>qa+lQ4?k*DX?Vf71J7GAL(bP*7s_P7s?0mequRUG4GWsAZyUq!f zlw~?h%e~?yG?yLR?98#n>s*mt?xyawVehoO&Pg0{+XU~LaL2(pj$Z;tH}p7`rNUeQ zn@S$fQhh1A2qW!WU$3471zS)s72q-lvp-$xgL&HLmp!AbCx6bqe9i_|0pfg#U4+^YHgome^l*7c`RIhUca3j7?D?*w7| z13+_NHlm*+=Er~)&1gto@#6__#cd`kP*~o@AZ&O04q95;VRYEaiqA5R zE0d?S#bfU8+{4f&Et8OVw>MX!6*W%57h}kNE_Kp6oEr#T^ru2XXORyr+;e@^wRA(q zb~hhoUKtr~ysu%}o;W3jWrGOTU$27ln9OMl^uNNafNkPe{#`k5U;XXSppxd;3jz0d ziDbsKK(D7p571XH@*9NNnr=SNP>|G%@b)(!pb8qoU=81<86jVV_k0|k<=XBi`c5q@ zRe%7Pnx^?mjLC)}-);mE;3!SI0|W#l8Yw&=BXeIVqfwUu^7HDvRY6 zr;(OuD;fekEt~BFL5j_U_@4ZFHbCCCpv`@3^x%noQv_fJa+V;sQm{9V_e#)iIu)~S zosqKB%5sr`WPA#33xmu9ab2@;6nx=7j5vfDw4CyLhOP6Lfiz-ig~#7=IQXKqbHvye zy1e&jtTZvH8ZLJ!q*U$Ly(@c zd#QYw&!?~(+vkAC9fl`5^YJRJ^=r`r=4}#`gb8u)93Ge0o4#+mWPss}iYW5S+PuL8 zt-aKX_>eb2Blet|>)&)1e-w%L8}KFrhZ#rT)Zjcu?fR1+ueD52Rt;o8!k#2Q4c1^u`G;6%_H!HOJfc+ z>eGRAqpZn^UuBWTnc=C5g`=zblq8PMmmCNKre|@N_^} zIC}@*rs6a$h9ys^xJ6`?$kQ3j?`HXg+>ZG;O;A@4)9NJhAcuiQ`}IH}523{Be$ANt z;bvYR5>gQ6BOS{8Ns&xN?gLNWmbLH1LGiKBMy%9)D}3gTf$DP!+%b#egWdzOwZ@!J zHVCi0tTA4BEKq+XjLjib$eQ^1iMBwwVLXB@P#3e)>`4sFNf5p-q% zNSbF9kcfS8ir?!j3OLI*&wwK5^2H_YzLECXMe=g6`GswRYSA=j+7eE{XDKgO4J?(Y(S?SH_WdrYQ}!jN}5 z9I|LrMXUP(H3)prb%csP%xsS?+)}jZ*Ll|*Q(91sJCoI)nB?^Hn0PvStPH>z8V^QNib9sGMj2t9))Reo#S2%(C1edz zY9;P$H)rOX56D*!pN|wf>|YnNqDSxAhFiN%kG0KUJk0t2am##;*}98EQ?FjXeor+6 zko(P;UCH==3)PNJV`ooz2Y&X6)BhsMeD#lNI{h!+P5s*67vTl;qkrW8|HJ>6Tm+X5 z(BBG4N|qW+2*-Zf>z;kVa(iv+SEsop7am&F#GEqN(Dy=rSN7^_VS`#-vA=Y`Rd=2; z&?__PF-fbg9=%X}| z#8P+0Mkko)Wa(GCgj~HexT9-N#3?=f#Zrkok&`-gS5}xXDJ}_8NQ+xwt$IpviC*Bs zu!jfgE7B!${GVUo4iN6uZhf}*mQB{pYN>c*v5}1${@Op{M#KhkcH03u4IYVBbzwdJ ztZB?cIj}WbRVs|l3-Rbbe#^*<3*MITY^hcT84TkF0wQ#3s`qY7{8|UMm2!AA)%WQ|?^jh#%*m7ar(zZ*BRYKr|Z? zZLiu-mT)=h7Q-_x0!rxd-rtd?tP|E%;%gfqHAD}45H>q-p)N26i*6bJi{;BlyRmYOhlGkm=dC%DBZr|d+T2b*Pz(1hSrF(FV z3N$$*(eIW356$Y1u+4-q0xc3J3niKbK-66Uo6KIo%Aeb}1_d_wWh*D6|kivlFl>>znhk%K`drna-XuI~pTYK3%4%c00#859u>uf7J6 z(nnxy84n*H-`BKi3GcaywjrD66?6=T+a#{PmGS=Bx;167vppvD!_K(`YCD0?ZZw7j zqUKUn-y2i~!>&W_Ib$9#_-gN72>>oZW=P!!+1Xdki1RB&w{ss55;2q83Oz8uPiY^3 z`%OSr%qc)1;03lBA}6q5tSy3XeOTGZk8KhIbLf;;F@Pajf)9On! z6mHrUcI{d|I#Vhf+`vZ%jyxO84rqIQk(SM1teA;@F9U*JSSnV?8@w5Cp7Cdb=f~vP z#CKf|OcNJ(%KA;+vW3Qc5^Ew8TU)%(xtvsYHcV9gfl8NFNrFGe#>#WJTR0$P7~v3b zYD9xV1??<`|ix4x}I=_A9^7yT%n@-j;O$hI1#v+fbZy)wE$-|-zFtS|#Y$L35 zh8=x@%O)=@q{eCC`MncF=&J$eD*T105tJ7`5D?%#W=E;IfcFI#pi<~RZNhBk?@cMbyqjhJ-y>1M$bp*WMRTvMtjIahv5hv~qR-00Oaum(Ea9R7 z52f>{PjoZvO&HCt9dmKLL8D0a0=NTyyu2M(d zgc~bc)?=Qg2nx?Y^h=JNZ`m>1^A6P_b^u$Vprw^O*tCEsg-V}IGlXQ=|14kwenXC- zm{Jj@6+jq3svEd=Z@M*-JQ>IpQ=V~I^ zL5nvKwu`k>SJ?T1uF#|s)#-t8+xAcjzj9Zvbt1i$QTsktuz{HIee6j2ro|f{e ztP~U!*5iJ>0b8nJdU>862~4J9=Bey>scdcd{AW=pG8q^$az%GjR8}U6{ub3QS>uEJ z(9qOems9)*bLw83tWIJ#_e$8JTY{cLhNc1;+hCw>MEJc3A_GPJRv3K(q*#KpY5|~> z5Rykcc(jh~x-Y$Q`iyNJh9@U(TL_c4YgOMN>?JD*9R^GwjnVkiqmeBP+D(A9F1Rtr z%JYxaJ`_N;o@|=)aV3j&UCQ0y&gqjPOJS&rILgU-S%J1YAqr4GHSOpE4tDmJI2;tv zHER}dvIm#MwCd}41{u!ns*oCM`f~d4iYz9S&PHCNPuV~Loo%we4@(o3E}V!jw4zCM z=gyrvSty~+_Pw#XQ$}Vp^DeSHsx1LQy1{KEc07e`_KIvwB;}*&E(IA?3~huPrljy8 z+v37Z*X6X|#Fwdp7=mn>NbO$I)s76Jh;_IHYwa`mheW^Y>FN?#egmGwNK@uOm4M!k zV7dLGLev2I=MX(YAeLwtkQ!ch@Y=q(MHQZKwH*blF+!W#5gg0WNztW7T82*mODk-Y z;FQB(61ot2t~YoV@Nw4mf2v73tT21=yo(ieYc@jH(t2if8e&8RrjQ&T`zNKKcX_fC#S+@Wd{lN4F z3K7#a0&b_hk2D}2u|?!iK>hiBpaiK3RtR2?@0_hiU&$+NH3ym)LE0I=B&YQ(x%xpyVgoqXkw|Up8xvdQ{nD| zx2nH?F7yeq+b{HeLv}~%IhKj~?c4!pwrXO7NvdVZ-qb_0fD6rXUpjOwegoFX3(1y(?TkjQ+#?_-|K zi-n}?8nPx+SP7TD%W`ko%mB?p)2A!^fkE}X2uE6i2P+0Tgb?~pyh75y0e*=5u~Am> z3*FHMIE`7TE-0$I#j@U^w!$MVsMvUCAqRWHP8+wN@)eV|TRaYXwbtz`ObY`n!#S`a zZ`OX!_j@~zv^MW@?BOX`i@LeMcM#kv-2TlvljEURU*a*OLc2Ne>uIoR*Zt7@x8415 zEhFa7Z$~zV#UM_y2v%w}*gp#d`vv2LcT_M&S**u8aiJi^S2(G(sv4W5U<`E$xooW%duzOB{`zs7^U1pGKZQ|f(b?N!w`?f0y*X!Prp zd*F}*>~zk@vLmRq$)bCz^2p~gJ6>zzQW2%X(!o3QWhv7{MW1dKw2NyI#_r7V)tTa4yuWorP*@_%$2|$(>jQ}#?>q5zV2*x{& zog`{f}-R#M#Ln_9+HUg|v!{}DFJKG^kc2VS>W>VI0Zl>DgB4y(xGCM#P}w zVpV^rVLnaT_JhR4NLVg$_b7*7LLAB_xycy{08}C`ec6S^&P;xp#74D42pJqs?{JUl zN?|q+B+?yx=$ojeM^P*B7cCju6h(&L$b#E$l=XBVx5AouE$zeGzQpYuz}YYq`uv4> zv@>FV*W7&iL8z*^(OB&!WbtOoi7;5v8WSrc?4O7~nO7hQa<0yGkSXcMnDO^$2Z=X^83+wH@67^>#D(`VS(vz{|= z)R^IO-%V>xf}+^gXRQdOQ@<9K-<-wrS5O0yi{16rX1maK5`pk;HF{hQu_Jc&=ngX)qNQ69oKXvgJ2MVnRN8)2x;iT!48%~KMbL0(sP;M(RybFCQHNu6rDx*Tm-XLm z!m*(VQNt3TCp>w1kq2p#9nN2xOR*4C^SMW&uewIQ`*)6>T^5dGOZ|St-bhO5 zC~8fPlRGS#p3qUh<7gR^2e)|r2k{L@p1qQP-!V+r(x&#I7Mc*c*WaZi-gXT0p5o_P zIQ9a&+rksNZvuctKXy=xrgj(w>O-_sar#_7o!rx&^mTClAJ^MqcIx39X>0M)j3LxN zTCgUi@|_gWt=ft%SgnY=!w}Mv!{cxre4n;x^GK1(~SP0n?wRqz;As_LI+-B_+GtAD{+1q}!E=B=3J>%ona*%pYko6a! zhTga3hO8|&5<3}rF`{rMsbGdYw9M&iKK*?R{4}5bbrfgqd|8i>h{y$MeBm=W+NLrA z?@wf>eW(mjBo{!1{M^kEbF&sZnXn|J;P7pnpJO@SxLeKSx)8qS7>%gT-iSm1{az1J zN(U;i8Bu`uDb5mu<~N8+mDm(W^A6c9`3NMJ@*I4JHWQmys`4_|?j35?K-b$~IB=NJ zCus-A%s_+iZ^7;(CuIU+hH7?5P26Wn33e5?mBeI?vN>OD_G!@lYPw~fnbi0PBja0> zW*jChZBJA=+c$2t_;4bx!u#ujNjssSLcQd`yTpBA{tVfpi{Gpb}Lgph5C9*ne7s2xKA7og6dg$m5uS&IS-YzNIK5M2(CSK+O+kyKMS2f=fh)T~|3NbW zlnFe3SmR78${)2_ul2lRI2Wa7Z*$H@uyWc>EN5bl!dJPV4EcNB&qcp~xe#EJX)J{x z1phg^yFYl?gG0o%Xj877w&L(KJV~9XdyzR7@blX$OQ>Uc+CI3;X4Nh5NyBS^Tj#ps&oC1@wR=|kt|b1f334S*<9Xf+`U2@z;yORg={Lk#+d3Blxo9v%S3 zlG9@cECjycd!SI*28nCniuGe(fNeCsMpd;BMCIB<{#KXVUnb>SKY_lxj>O}DO_wpr zQJ9v%z9KfIo#P;%wDCoBI80Py6=htcgc_WMZ#=c?7CZYoY`f$a_QfxZ<<8F+Ei;$NZaRPQA{F=q6;f`+o{1vRY5;ZwNSfW+a*ppezj}l$My&(#b*(jDWO=fy zyGPdknMb1%(1o5qO#{4{fUeFKqcAXH`7vMBZT{m8zk|9HmnjEuv6)tGwHGYwzq7CN z<;%e<`~6Pd*K5fUT3O+_7D>?pV|*xx$yX6y0Pq_IlW*e+@O)Z+2kAK`NVy#sP83lg z%+!6AwHCnU%Y_|nI2dwE3@(64w5eW+5raHne@XTP$Hpet+sPq07$l`v-?Bjyv`Q)8 zYLy?tekI~6wA1xK<*0J{XPX@yk9hf2BM+bU-sBCGIdLMoow<%l&>y#hqCfhF;7be4c^4d`}dTKf!@+;jm>E-u#1 zSVrMf47Uq90kAYj?RB28@n(T5rKq~m9iX6VcYYR_e!R0CO+C|O_9=%WARI0~wCuZ9 zEBj=x1W+8TLWlI19ym~4*abZA;_Kd>Nn0huB0sWT+u`OZs?kUAyKvTe|59CU59Ot3 ztza#KkhC2zA7G*bz$<7C6Y!2^-djhPq|crBW}&o1`!0Kc>a`u3TU1YpUtS%nO8ZOHr;mP25@scIPj#5#A&F41J>wruQPJ*Q7ykl zP-c_-)a9fQsI=ODD?$9Oy2PPFL^{DgMMaU7;9S z3UoKbgCCyjGP56BszqChnG`j4;L3`yPy1_;%D>sbv^bpO0aXL(plA z2htqIxqWXd{o%LfzR1zUMWgg66tOf3>`hQ%%)Fzsrg1+F3J8#{Q9segcU{1wq*DBm zEpPA620J-~WgIFMfqq=V&(6;(Zg6KA6Ejr&X`_bbc4CH#zzkhaCCCHCo@+|nInm5A_~)Gm;v5Ol@{Lo2Sr?NYW&{5=pwS%&eyy+%V%($A{nG;oaID_noHZ8O zhLkIe^|UZ0<@VUmfM<&~QVk|NxF8WcjLrtVBygre5-h=&7J{R%Us_szSB~mNKV!xs zt9BgE_jcy)srOd$Xh(HC(#n&%^$)c+l6}E@v%ucbH1mrJxJuTYdrzFrR(Asd<*@1z z2q*y}pjHq8RlEfa8s$$aav*+@F6QLq!~)9|7%yXqg1ip?QtRO0gCb_m*!tAx02DK^ z73Wf|Ng|9SFdLqSbN3!u{fM2>jhi>$VG83?sf3!U)M~a(7n%STI6QRnJS#LQ?y+}+ z9qptmQ${(G0X9 z3$IYl0|Rk*U6R)?c?2Fnl~!=}9VVkkqCa`@9I8*Z4V(YF>UgXjWaLZ}rK|n>-z9F{ zuL$;mftjs3eWCm~+r7?Ck~ zJFc`pu+btB`C(&Qv)#=PMy^^Gw+xkb2SCs8PxTFj3=-*Q=U;BQ>(f$G(bS{U2~YC5 zDq@Q7k8wLy%tI_+{?hi6*-j&)h;2@W=ia`38+D|Jbz@q8wW+cBkDB&mmX&H%vJxA! zug4iKsa>^E3988o%qsn~4lnd=VP^)yYikQF}8#lGS`{vPi|_LNUSkrgJ9^#W$9pxu{YeBBi&cuN9o}45 zNXzG}JCUx)FIMv(XRdo*dVc#-CGKAR847j6vpNO4RHm3%ytxy-c}OGPJUHN#%Vs(F z4hUlE<$jf4!+_k!Sv7kA!!htc$kyl|Q#f|ZMUCx^3HD#&?jXSyTnf*-l-SRG@mlXg zhIhs6{-2UH*;O<=~msuO$Z?k$Lu>C9Pn+yTaR;oo_WJxfq<`Nv8Q*wi-HmG@YP|Dhzny6}Q$XGM;<Mdb3@W(S+Fn%Os>UDkE@ zC%1_8^Ko;<2Z(VdSljUEOL2X>wL&=Nm#*8j?&gJD7wLc3Ks65iiJHB>i3w}SSlqsg z8Df=gpPq{wwcbrFb++HyyE%0uMFaheaVDodadbC-N9$iS3olt-9zO54r11T}&xdxc zj*UV(n;4I$=$^m2f#US(p3TC#Abf zm_<+NPe@1d))H<7i=G&3yj%Ru|z*Q&G5sNE|u;kh>1U~-*^^RuS9 z^>3{`-~!7o^{#I#d=us@=6I{uvz1N|N9fBf)A$!#TJ8<-Txdw zOP%9RE-$CKoYZg@M`>hJ8CpNGpe{jf$mQgo>M%{B>2=eQ&=uA4^P*9fswS?BES+AR z?5tug-H?}6yAY@NN0)*A{_ObF#GO<^>^-(aN^9B6fnis0d_m(yMy)^YWqS_$`c@yZ z;aPa6JsOR#i+8v|=)wKVn0UvY-7Ck$ zjWm?+JlZDffwz}VXOr4_VmCgCs;bm^_H~0}_doVP#{m7+ni@5MQ|!Ts z+5-Oq2lq@Cs+EiQUa}kYv4ojOS)0 z)CanuE2aFW?rMJZTU8fKdIK7d0hff*G$M(Oh)h)Ljljqnv683TCU{*Ktc3I1TU^D5 zw(}qJBQ*ReTeIg66RiDux$(<$i-#c&4hbhtK#mMQ+lYX<5GUS+JeYyKntrGT>YS!Q zO-ha*jwi0*xu@l{0C+Kw5{q|j1hV|-z!}%GCKH1Y}*8vrdbyn#z{BWY8W0(pGTG13EfhEf*zjZ+hL~W|5;4#%x!s=kO z{a4WYRl;YSyg91+Iuq{SCcC%p*Fd7tO5Ch|%ibh-?bg5QfJ^f{k8J#gW4Wg$u7IqR zE8&qCweC(?VReGuk&0+mT11CY-=z$|i;w~PiQaY<>>UemBV-j0;m4j2PbW^){9ou- zG*V~vzgmDP7u54ic!M(keskX<-LhZ(|OR?$gzhG9b!x* z6r%i~W**%=1A8Q1Vq~0*JW$dD3gYQs=65y2=hk?LQ?HupioHiZvf>ZWM*9+M1Zl=K zWctC(j(JtN%ed}JbhM*hC<18j1}*a}Ha6+vs;a6!{JXbMz+oH+l0B+B%z$raBrMS{ zL2nDd^iGt*)Tvr^sz$5=&CmaYz==ZrHz&e}Pe4EmJqffotYHA8{Lt2LELpM(#VK^q zSWph8%JpvRCL!HH#Uh@QcT6R0YY-;RHJkTLz*BnTs~$+m46LIxH?*94X5I!K6tk?=WrsDKqah|dHv zk|YH&9{7y^22HLwqodpQK~jk}7~`QcHFx5jUqgTpmlu3qK*{|oDp*I{+!=w@v$4w_ z+?vGkSoLu>r|yzyJt%qguH7>sLCa_7Q+K;cpb^*!iIC2XtGTJZ0klCA{pmqi2G`j= zjmk!`1!!S5@U%Cigho+c2+!{wa3&&JITui9`7rA@thLAE4LNBUN=UTu*k0-n^#i6@ z{ZU|GOp<{Jeb$=2V0C8xo$CfUg#qM5DrW=O1Bx#$o(@Hf&v>G z8}FF27$Lh1yfFcK}Pn;iwE_62q>FDksq_&-uA8HX40ahj!~>l;5{2pRf_j zPt*k9JDqXNK#zkSI9nXsZfs+WyD!S9HFgY4C|He&5O8N_F>Vw!0>2EFD;NZ$#uuu= z;Zdw;A|_?_A5)mu{?isO(3krAOvm7v#-j&&>1h;%@sG53j=m?&OaLyy!C*DNRJ;#U z?O@IQ0TnByp4sK@PjVAKp3RK?*p zoX??t0_a`3QhVo4l9d;XtU$n^K~^Na+{k1#Y?$<{9AraR*8?WRep2&8`5Z zzouC`_Bq$$#|9CXb08Tv_~=XyC|G;OO&kGW7AWW;)#DCOOUn`4d!*wa&?}t99xVmX z#aAJY8G=M&7-DoL?A|Is#C^4OzLL|+Gtyv~L6)!c{OM%s+Q!O2t5pibcM%qN^;zS1 znzR16weoWGL0u~0WOr9Q_#>7KEQKi1zkQvCzP|olEiZ^pf|S1R)!DZhK7|l=K{^Bo z2sGq$TKIQQNUb=i)aqI1`{xi^>qR)LI629n-~h%EKyvVoj=+jWjBCmXNOmPWIWY{I zHfbI%8dO5G*oCA4YDdPpoRK8!IuetA@IX!C9u-S6d+UK{U^(n#?pJAG*WoyZzJmqb z6mK6C&0={s-;3o@ZngX>L0uF5XR0onot&No$G%udJ(^(fY>Tvd^bUI;e9%4lnSz$g z05f10%-BGC=`FTSssO;eJFl*>%_c~;G_soPJ(OG2wmZ)_Xtr$L^n(Xq*7c5#7t3z* z_tkf{DF}*l<%xwsqBx^SJTPRD^JAm-?j`D1<#{e zTLoQzXkO-z)jmt!#T>|vPEcKaW?Neu>O@N8Xba~RX6?#R_zG#|#s7Mv-cXW=aWx2J zv@Cftd9J44%wfk`T~!+~;cRhSESY&1&!T7SIPyT@Nq&#>GbWgmP*LnXF9(9(w&M#R z>a0$fKWo-Y`;1RT&wbTpW1Lkg7V;n>Y8F6AS~|1ptT}r^-&J&>xaRV=cEw2VKW-Fa zY#h~bCp_FbG2!FvT^prc`jO^+wmnLCiGtS?x?vd<#KHZxh{JeZX;TO?DZk^%^Q8De zodky)CXC0IpxU#>627$^iDDZ7d+4}27H_W4Cy_3W=VNFX0JuWwL@T|klMKr|R$}C& z4P{~XLGZv@nwtK@X$=G2n6x^8_(CHzSFUUZe=}8B8JdI_cF>|@s;%KKQetcpT_!0g z8}JvO%YOUbPU}lKcP&;Ad-bo6erCQk*8ILQ^qd4<2HQs5ll;%GB93tJuPm2K3?^pQ z(`%?|2CwDTpKu>mJ4d~Rk}g0LBEda*;i~s0WwT+qScsH`P*C2*_ieWch-6)B(%WE_hn^7p@YLX3%aKgj2TdNHz*N^_d|KO zdZ;)W=NTJDv=AhA-LLxR+Xd0LgBzB$k;88Ooo~ls4#!ku8#w5rUC$=B(@t(u#29j} zXH83LPI1Nz8zAL$1?5_|JN5{&_{(!gzJ;#F{338I_yp(vN~zxq3( z?k$8q3hjpI`t`NObFcn(3{WraXi3;iiTLkNtbe61I!J1St2^73k3Q%_(qY8!NxGhJ z&qg}Plx=6tyi4Zd@_%+#rKzbV&QE9apZX%612{d{H+Ky&Bzj^4tZ%Dx>~9x zh{{5_Z~}RPc?jAPA|y@Sy13AW7198$ns|7plt%PMJfe`M-YYMNLWn_6RGz#qEoubf z0o-pJ<;mZUsKO>uq7mK<0R7S0Wdo2(LQCX@vvLCaFNw%PqlvHIjYeVp^%Y8YP=nKY zw!BGGHB8|B%uc4kk5_ZdI=or_H*0 z;6S!$RH14g2E7ZGMgclZwz2ITlThotkkGIn63yNb(yF`^>M9h#076{U@dRC8Fl)$F z5lfASAgJNs_3)Q7lh^=z z{bRmzhHteZ<@L=v20foEr*SlS=OTwDu1jabm%jH5BDHboCfu;~c^tNBGzCDg4Mt1~ zDz6douB_a=*EBW?DF$}T*e!W^x7j08rK;T%^~lT(#vUq&B`Y>}q0@XYJ=b|X|ZRD~<)~7>dds(bM zBrn1iB~Xnpu>Q0(gOO&Eh<0G~m9b}*v><%?bU+SW6!jET|} zJCfw@>{qpsdceB;pX!BJmlLzfW;hWhEWJ*`Bu(p;{uhmry8@0P)q0iUbJEn}H(lug=eGS`Ee{UyOJ2 z<9WmdT85DLAKJmoCRjEGaJazgDr!#P@0f2>7(D3zi^5u^ul*F-gqf-}a2sj`v1*0& zV@{CqgP5dDg)K2-rN>UQP2#09kO z`k@H^4pJs>$t7Ty=W+E8q}BI+c)(9G4;Tm`23##%a_XIH6Qnu>Gq^(1vq;7oodi1# z6Cukx0_#J7kYrFEQ2zBXSXYr00 z`+3x#X2Yqu?Kt z%ex$~k@|e?Ct*3c@6RK(-E&My{0FEKN3px#p<~!p{`7Bou&?4LmVQ3ST0%#)Mf2bHAy#U_`I+1k%}ZMj*>bbZXqasYzMStStMjBK{nzO_i*Au2Ua<&C=Q^(kSVkFVCAD z(a9^t{C#E?pOk_n$i_R}qbI!P58d$gt{oj7dfb4uAQ#)lHuOB-)Z;i0##1&;a&Y?E zzfxR$E7a{z5><{Cg%0<0mo@3id?>pO5gAw0p!vERgYvvSok@v_%;p=%VuVxXtA7r4 zNH>!FX$B9wPsiEe0~#SObzJg$jH;@ZHZ8(fCNIBN|6x@hJ^8zDNZX|UynJ>+6Aawm z{`3e`i>a9D8MzT_VX)kJN5C!QUt8Uz7Qc8)^XP`TiHV8ka*^jXBt@T|wAhrMm7Kg& zx9YjNXg93tU{4M=Q2e(+5*PN#gvUdEcDOw=Pqr?IgI!jO(kyy-Uo?rZ5YR|UV8Vjy z^4cuvHf&ebUt2LSegDLU zeNJ1VBa2|4ERyY~xJ6Cvymj2t8Eeo5j67Z{ENG*Mc0PnVn8nP@jPdlSikP_gfps^` zAMeHgG3xB!C)UscaaT9Dm*#6|r}p%-OgLayKU+<1Q51_LGjj})R3>ipfc_OWt)bj( z_zm9Z9yJ2|eB*DBdlIhCl}=~HLYA>Ekq4fe>D_x4WhjcqJ6k2a7>vEmyPTvbp!HJ%c3U!^a3OFMOO`IZZ`NBh8KiO7EqauVQmC# zo`Ps>fq{Y6iTu z768`kp1G_Z%=uwTUK>y6t96Q0=6t+4_q4BXL}AO_tqhe3Z8m+JyjH?1!M zs7X^vke^Fc*iK>tjybp&`7;xWw7nIkRb*#}(c&$a#0ywa?Y)6@%n7V}A|oD_>2vb% zInL*Xb}*OiX*vBB^)kNr!jML1gyrq%d(cX(#ZFj89`r@jaWnF8ZRq8NVbw)+OCdx> z9jr_{W1d9d)3Tp^x$NV^H_e;sCu^1OJr;d7&=6k7*?hUd;e_mZ_H-wIdy^3GqBseR zrvW3hCLX3bT;~J|P7;vK&J`ZhK2Pq5J9NIWgbl!Pfx?W3F@>aBXwgALOl$N2VFGSmVegxvOc{{F4Fqti(6NQ#vm}Et;@%yQ42adeZ~wj z-K4GAC#L_BISHt?B+c;?Tq+njacCH|oeTj@dW>*y=v)-s+-8ZJKZ_iDf=l;hg9YkO17 znNSy_^r6`h546tZpS~gSSBA-`1EPevj4!aQfT!a^g3;Y2FIt{@o^(EX=H@_=q?Riu zd%C{V{)(e(c-O$SfZUUSimD<}9*4r5jMJx#p4%3V!gjG2L;3U^pl(Nc_meq8=) z+x@Bq5 zngoe=2`a1usN8!$ri)X;1yg92+2;ZON=N}`Hw4K0!<`-Ah}s&U1e6dfw;1XT9ImI)AiI?C;*6eGk`l-Pa90$irylV(`#Q|o zj_EmQ|7fEmo`~S@HB5k4x5HMOBkTIz$l8yt_-*i6oB1%dCt~-afkAt~SYX(er`k2< zi5I$l6zZ}}TB8!GTFCbVzzLvK!6oa~%_e4!oLtHOhh8vD!Gd^Rjo~lXzBdA>&9o{f z+?BKl`1;jlNzL{4TS_U<6ToEFio#84E_tp;<1rV+_9x~Nb`PWVQxGTKwO^m;Gd8+< z{xp=qyVlhh!%{_rSJ9qD%&zJ)7r?K{{)%NSlEY}7JzgE{wf{~A*=*@;{ zpy}1$i_sV z0Vf~~33&+RTujJv7x0|_3<>r<}55k0-=53A-EVPc-00sAhf zHl4l*&x3?v`!8eY>gOQ2Q@)}N2iT%MzN8$4=w#eo^t0$YXe^sNbbqaS2*M!6IIVlf zOAe|#a;EnBB%7`Hvj8>~w5$|I2?vxqZ)YGkXuJ8z`XBW67t8~b#Vry}o7>D6%hYzf z+%LCWXh#j8ODHYgq3A3}c6EZ!ISsm!{uQXpjJs}C7D()PIw#RAqm*qyyT1FD(~|AN zv(R9QdAuq-xL#tuw9{g=hNN&N_hIN#zC))%P6QLUxZY81eS4}$YT-a4hAMDezgH@b z9>ZheaRh{|L>E;CBM6{RG0J+h+P#@UEv`>|M5fZ?FsqqUPCpMv;Y%Uf>fX-AB>Wc5`$S6B81dj%dpu(5Iw~qt@!cx zIwG_Su!TN@LmQHlQ4OAb5?ni=*9mzDgJWm7xnn)ptHahb$3$0u`t)f}%`IJNw{Fe% zb+DVWydmE~=DW|3G;f^&?j-bwoABGPqzZdFr^jhcgyi|t+u&kx(Jy_sliS=IoUkyh zHMLLY+Tcd)Xuu$+KZaJ+a;ERm)0=}$0u)?k&(`f}jRH4|>4ZtsrukD*`P%C7{y(q` zD+~wdQQ@7JoRyrRDObF|0kC3h5B!A+8?jUgqNgOic6N2+kZm5r&FeIKzZc(X>$6rf z{5sgM)5mSJX}Dl-aK+CCyl-capK=X#cJKc1O&2) z7Zv3PIiX$UP#Sy8!f16ekjHkcViWE~ zix!y`EkdQY{h5rK2rWN`u2QIdH-19$4lE>>`FzC>6@7;IgN|sn`g0gWc6fO z9x1EeEq&NjBfk++JM-u~y0VQGV~ePo@cbj{Pi;Lt&YKO7Cp!q7-!9B3ZQB-@bj2fk z$1_+DyUGfT|D@iI4o;kv?azV&tNuWa;_Kcz?(}B3UbwKCmJ%Z%-Lrf-dU)+3owJn+ zUk>yR-F|Iwu3?bpP;AoB_wRxFiO$RUcp^Ig219Tex%S&#i;mS;yUJeZ@Yjz=8s@Mb z6)WtCa{Rcg89ft_g&f|{aN}7#9uoh)_zIQ$g=pY{1vT7aAG_?_D~AWdJUJ5XIHc03 z@X_t)uwhyqzy_>g3?7)fTuA8Yedl9e34o!`tY$}i!8@dCa7*-a*eY#-@7;&B4i!s85y5HJsjZt}i>;Y5in(eMGYOfHw9)rs7EQ(S&qp`k1l4tKNUoavgpIwg0neWF81}8 z{e|is!?9B^scgA{M{4i(e>Q7L9ZyXaHPdtVn)Rl3LcV(P@hyqR46mQSf9q@maQb-P z;02fikmvU&)bx$}4)u`Pxt|j>pV|7TjnwB)u(}r$cyTGt#AO9{%jkFMf4R_iQR%HY z-^N}Tg+gKEz-H-%7BcjuT+X3Bt~p<5Ci(^_LTQBzP1r+gvqu-V{v=9G zn$2T;F=GTeoD+eSMaxJNfRqs`@fE_kr3wlry26Kgn`DTsf%yEGewr#u-PJIEfJd$2 z>_1UtTRoaftI~nWz!^#as#QNKTW4=oc`_9z>T-K4uZxVEk?0)m1Uv>?Q9cVJr8>eR z8C`vSb=&r~TOom0ZQz}%SkuO6Sg7zP5kv`zRcj^{CVt9qe=Bt8H{b4D|K65Y2{dst#MAH>KSe092Co z$c+4(=SE;doIp~?7ipjv&co>>~LsWn1*>>njEaWJnk z_iY8~g8fP%wB%>)T3)OXSq*wq>^|*71ool=BAoz4TEn#^kq6Zd#(H(5itiZc>Q36w zAU-$bgYXe!NaR{E$c7^53-@q`qz_MBG@c7W`eBqS{>wHn$lN%5f-t;7VeeOJs)TkJ zp&KkW|CrugwGZnScor-O0wJ7W4~7T8$h^oV<}$X@fWY${=~ii%Isd_N z)8R>+h;b1|oSIZslK2q-(GRr&m7vFsN8|kSn+;Ts3lOZyL4aAjVS_<^p-QhEL97tc z2?$3`Mz%PP6ha*&vgG#jtJ&aMJBh*c+eEsD*kTJHj2w}{Uoz0qDx;nNs9-*nrFqIC zvgl3_xr#vXv zJYc0daq|j_-&HacInG2OAOO;WKDi~kQjmibH0*GVBJXGaq}-(i znLkh?YNNTr)sJiZeW{6ptrhu}f!p`LE{kgp0BcWd5##%IbRZut$)hB$*wCPo-(c_f z*|rzz!wD$;;Ls79GsV~8tpnrEFY$5r{_qFY)m!j@(Y2G=2K7RaQk4R!`vR$`K~ZfZ z5PEKf@0WAtlYmtR!0!t@C$=nsi^O~-BG!!UbQ_dG3?C*hUL3q68e zMpk;!lh?6myXAXPG`Yff|IGzxUADqr2o$sM^$L{yw7&=fd@e+CC~(T4@O}nY6dfbD zDp1)E41i%+iHWpJ9^^#OyVx}sw%6aVno9jF+7asMpsp&#iIxnO19Ak>Z6N+0nV1E# z7Czg2u%lIRR#7FvSeT2+f}joid>ImPn|Sr>wdP8}U!0l-33dL(BeX#~-cJfxbx!!6O8~5|)waqlRftsx&Vc}D0I-B>AO}tajfrkA-Fk=hRu>am z7s5Cd-rIoaCK|0C$mUe3sdD%1?0gaD7@;GPMHvrzfl(0X3cokf-NiZXG5_=JAB_!z zR4ulgJCCEvlngcZU4H)2|2)BbIK`Df3Ic9iq`s+RL9}IwfCEA3nDFcE=AQ=cCrUF1NwXBROT;9+iqTh}_-Gs?q+Ntw9`%LZ= ziw1vy(?`BD_YzDXHsW#(XvZXedkIesbdBDZQrt$v!`;aJ425U@`7pn0ZO0k5e! zAO_|1*ypG0-aXuPCLAjwsg;GD);+xc&KNQR&dC9y;`~~x;L3fIqNT@cu=V`&W=h#N zVLh?1{BhbI26H?^UNgHgpz%o3H&%S%<>x;IJ)W7WyN8Di?HYy+kxEWi4dNLxBV~*| zdlpRN^3(3L{=ZWfUu!4L&TkifE->``#L1HYm;{MAL6P}lF*sConJC%FGlRTUu>1wT zoDa}H*rAyYD}@H7270#h8#pxnjc0vv9q_L5?#VN>d!q#}#VJE}DOaupcRhUc$QeQz z+$0|Ck0+DXgc@t7(P0lAep9*bU2xz9>n82q%THnsj0G($EiGk9A5p%2r6+?DA?MV& zv}g62q#HY%|_6sy|Q{K{;H8^biE5m=ZQ> zzH6WP3VaSYJFmf-7lIfldQfylHm0;r&RnZwo+zh_?0h$K3hMpWcz<4GIU8Um6=NvJ zU=m@9aGuZf{!J^F8_Yos+kKc?ALta6jKRl?cG93#F!qlRKgYveot#kXp6fzp*>}BIqV&Fx3WJM_7e|r{pn0M~W$}F)*5~K$R*1j|Bn7QOlwA680C-LaaaA`AqqQu!o0B?cSRNrO-<&mzQ9_-{Njp0Q< zKnIJ!nONZhw!LHZNWhex&`+}p$?6W7RVzPr3G01pb&a(E%H=7DI_0x=$RES}oAQdc zpa;Pj*FYhYkS>w3;8APvt}|?naraR(8Q1@TbvM(HKtETpE0&%HX0un zJO#_PF6VP4C5nQO^4Y8z1)TyaVcXRr?Bil4rG4dZc+|aS=Kx1>a7f6P|1Q&?0uGNK z#V{z?o!)v4PR^7BdkhW-CiFVm+AXdJ_6iBf6uR|HK|f2>Eefd3z*@A8j7~u{%?skR zK9ZG@S!2D93_CE{cnlQ|6~9=QUJ4%1AIW1WmI)&0)aDPY2z%gQKwIaC&VgE`6(fbm z)sydmtUv$`CN+7MzSyh1y0khS3svZN%-eTV=Wq!mYEUXIU9*M=f~k;*pv+ zUCVV07%d_?y;-Uqn3_0mt*2|ex!BeOg3(wRe-K!rO2+utya3H313K2{u-bi}DaBtL$%D!EA55GmUQwUA|7 zwX*?AN-_w>rUHH%Ss4WoJmtZ(Avd9F-Y2Mynb-p|?87?+fojY1ura@j0pj)XZK?96 zTS77e+C_;FiMR$zsuW}oL~gI;MvHlQ>hutih^xeARa#gGq&2huH4*~;wJO*`TCRB} zIvUc5_OPTZiyKWfm&S(ww9$z&-zOtJ;=%Xs=2L~JxoixH$Z6~p(ECJR+XG4%pa3X1 zc{N`sSZnj<&B;qtUQCl&RQGa|l~r;U zpUOjLC&|He7R4r#-4{Bl8a^={J9N101vVWNHa51*eR-6M^avvy$)EzyrvA5Mqw_#T z0s^~_eg+#+#ZvPZDcLBtKZ+=R{Pyj>ofcU0o~L--E5;XQT`YVxY;N56#%Ih6Gqbc` zn!rN@LO+zS0K8;0&1dq|ovg8*Es$aN=Qw)0LRS7&M&YY%fNG_R9K?Y&;m-@(+;@}v zx}-InG;DrLoaVgZ_eoLFE#?v_#J3_!0lip_%4ZY?tgW6M@%!yT7Mk6gU=z4YUu=ht zFfa~vAWklqFo9$aRVt+`^cJzgQTXS-bF0l9Bt0hA(AXg+JTTA~+1zoa%#F7**Uq1Y z2YSn?l^y!7XYenpN$kWPHrI=;P`>Vjbsf}{RQJeU|LHKr{E^vcSuyNe;@@>j{1ivlziTKXe0O_p*jR?vM$ItY5`-d(?>aCOnU zbr$g~EDd;(gP7bdhzV6cRXuAWiVq>BW zsseD4t4Gn;h0G}Lp8)I$Cl-%N1>G}%9Bjs1G3+Db!`~LVWhw0h{`6_8Unzrgr0(`d zji$%uex{ z$P*zp0jxRlPOW)3c(Z?l&(lK6g_>N?l-)7hi$@Fa7LOKUu?LQll9JF|EWtf^VI(D8 zbG^9;k%1-5osi@b*H9K@N7QQI}@@s&0xT>PAEdxtb z7BE>bJwh?HXXjLqvBuk6x#zEDAS9~5*yS47F*12RlJo`fe)I2hJNZ9G(wT!0r2{f* zfve9wsK95yb_<0R4eqi0tRc2v^$QYLj%>W+pk|yep>xf?@!YnkX3&5vz2=PGOF%Mr z`ORwf?a_&WL;76bo^NibWdOiJ9!>B9Yqw0wf>!~NDJvRxbVP&0zKcdasa&wqSvBI@ zpCWiwrc`pOJPa*@TMhAQY9UyCrk&ztz35*Nt!4Of*8Te`fXmcF0(_v zceJ&WT5@#Pw6YYSw#|GV9<}+QTT;||e=aN36LlU}<`D+ZifKGIQ|I8z7L`HFj(_eg z>U#(4X=Om9=>3@bfr3@&iILZrXJ%b{`XFCkwZQD~plV>yFvHAlRa4oddp0o82-}2& zVy?*cx}0q}t5IDt56^X6TnVKe-!FX_e^ir8Wb=S`#hXrPM+;V+EzZZ~I$PBv0H~!5 zPy`HOkOgQFS?c$ejCtcMwZWTl5#bY1UjRAM{2A%_rG3|#m?=YX#&1gL0JP!&oM^)c zZ6uD9wDALRkp~VUnr>;jdC|=S{`nh!JW&V=yjX;|S6p07jlVfwdRk8oS+O5z=3yX` zTOdJ7ZtdT%s0#KqUdzBy$|}1f3R}L9*VgXDEazRzwUY%d)OHJ8)K3dA$vg>#;OF|h zHnm6HEdU7SNK0IeldI&^EjoUjoyqfi+!ajMW4mG~_|wm2w~~`4A2>2r1`U~EFs_2F zG06?@k;F!w=BOPy1O5G+rgE3lfc*$ZU8_!6@yP1ZcEoH5FoU!V-Q<0b3qW93fRi9~ zkU(04J$oiIlO|0%{o+N~uU}HREI+b)BrIplY#+e;3t+EOFVtB3Yo!2goH^ryQ=R`y zh1Oc@`y%3vBF14SwX^&Pm{)WUn|jp^U?2k(i_o=e0+legdZ7&&jZ&Gui-ydngR#-T zST_K3-K4EO6Sz)nd{X+lp=QIOJEy~d=nM9yyZa3-Wn|E3s z@t5hkvvmV~4}3HUwEsl|xs+S#%YM9(x%$SA+uc#CGeqg9^N;^dz*!9A?d|>dMdKE@ zu7*Z;V9v(g*eZ1qlYz-|MNs#sgr=>j;MCnX&I~P_*u2m$+di~6q!;rM(`ch75Ez5-#J3ra)>;U5v zIEg{i$2b|ah=y*ISp3RmeFf1f?LpF877MYi;Pnd;D>M!j^B}ADjVpUe|{BWoJklenDu z=t+HkKELEYhTVzZ0Z@^Skpe6zAS|#hhg@+&w0g(qhiA*|*V7~cjWf{x1e@IMJnL`{ zZ9Dsexex?WS3>fUoM4BXx#-df76r0~=u`u9-BWy0Q!wtxx>cB0`TE_(S!klcK`%k; z51?T)6e)x@?i=YcVC9wNeYjv~9^*LWOz%>=ki?4xg_%rhcybqt;#Ta-0L~_Y#=;+E zSKG0Pf@lsnR7L`tyj30f>olngaBL3lcyzAp(Ibr&=Z3r{7uqvY(LlM0-B-=*5EW*V|33;lSu~*qvpeOGQGH1<9MBgFXc*SQ zoYub?`jXaYlHndhEx~3dr`2e9v*1QeJqKtOrIFVbn$Ayx%FRoGZGFMW3=8EDDzr^H zIy`saGC6zhTo&{n%IR{i%}L}RHuX#`Z=>t3mSBY&SEN%f$o&lNGQZ}D$`6FWeNcbn ztDRLLhf8B@t#gL-H~NvY}eQ$YZ(!c2{@rwuwz!vGJ|0Y{q&Z zW_KacxsE}ayvVf>S6Ge!3^*XZawX402$Is#is$sxl-uTI4aL|j@)F#gf#E9Jp0O9c z9OM*|Oa*^II-@pc;Vnb(EX=nA$O<{|Vj8OU(Re@d+1+%U0CQZEQjl zy9bF&{Af7LT2HOJZ<~7~RKF6t7RNrHRNTGqNRLz+3hRdlnr@lJVOyru%9Uy>(smq4 zUzs68V|3T)CqmE`@=y^gChX7z^`rGrz99EnsFL%cZIDCZnt~hQ=_|uhJ2URwvppoJ zQP)^-FhvTsa-IO)5+0jp@9unj!6FW@;^(2r#>}eFr9Y4|(ouWm0dR|Xi9*!Q!=pf3 zWN_&0LXnBT@2=V_37Nk{3LLaPN*hGT>7N^ti4Hp$UD36)y`sP`Ewl7c(4kjPGun^t z`Y0`eZ`vfdr$KnyRR%n9%g}DK`z$WA+RXpn%CH-yTONh>?=Yg)j!l z#n3iPmdlV&Lu1ROcfLu@d|`r$D2XzW*gj-*Ru6vzNeB!1Stij6!DnZ-xLnMPKM&9&b* zOCrqnJ#N;XRduU?`ynxYXezj=eP!o0l38yu{Zn(y1a+kLoO z{pp72u&9tAM@~S4Y*|veE0654vALFX#i|JXD$@d`APHrkf#8Ce&K$7g{EeRL0IzV| z9VnI=GGS|Uf7GY9-EpsOngjPC=@5>3N4C70iv+$yhE{8inL`#rEd*@L8JqAY1>(Rn z^bGy6N1oR1lC%I~Ted85PoV;bOHCTReaXotQD@#)-(hd*yK-lsw>boJV+5;UX7DZ` z46zpvdJNhtbb1gzuuNx>DY^l7`2y4NH)(5z$e?WJ$M!rKqyyF7-NwD0y0X;*O$}SC zLJux?H}+%uk=^^fAv%8ih|Fr9vi8Du#yo!7n>t5-RMz)D3|xZLg0dc%3>_V*qA!buq#sGks6Zr`;% z*~@dwf$%H=w+7d-im8&f7t41&&HLcdp6BHvSx_?1B+95P+Kcng2=@mk|a&^UGB@Att^ZU{**qdmiJ z^*kB^rM^GmQ?K^;>+L zA-Sg|^MEYKX5R^2eQ;N_b@-EF@v~4g09g}(2{H|3f?L-e`hIqHqxtjY`8`ob9*h*{ zwA)3B-AerR?t;NV**keCot;=;fV^4meI`_{VU6#(?(sUO8;05B8B0`5!YZf^#`|_s zE_2I&_U-Q5>&Raa-}EUqC93iLnXrS)Wh@RATtW^out`@?c8pEC^%vhXCAV}r?}4rt z?o{y@ZFd!0AuT;0-3uakq<$-Omk@+bnla<7ey%m59(uM)@G`0$16v&X;Ss+(%t#^mkwb;wx{HmeL8_)u(cZn^Mh{4or8omf#T7;|uM zj^Xvg@IX5asbLAg36`G%f3~=%;J;pKqf+DXgKkVK%E%)D6TOn6g8+rLB$gsnvsLxc zi~R&%+wg9oQJjQyBn5`%Q)upwMwBgm-nz6^^|zaJ%E$e;iNL5l?v*KX!=m}iO))A# zv?LsjS&PwN!JD-WU-_2DUstqEKodZVjlbHQ`AY8gaag*Yq@4%A^v_;Bir$x5EI;K1 zjD?uIsUb^;d@AqX4}7>+w!lG$&tR9mM)`WfjVSd(2o zO#mQY`STAbWSP4pBqm`n)Bfx0rxtlj<=dV6f8GGfgc)PM!`!*|z_%}P*Y`^7EL#w| zg%;1y8f^$HBcKieP5j!8jRm;G|Ji4-ouB1nZf?C z1x152b~db_XBxYE_pRwAfME&eL6Zi?!_G#x&(@Uiu=A#&jsy*?XbIpKSFH22l-Hq=3b3i@1@oCgeoj$!B5++}yWBL{> z7CR+LEKWp;LP7-KK@C*$rWS$RXvKUB2-h=NPj5EiWsBwKU%D>#=BINhCPk?+iVqwP zqRa?x`d#d%t<$GsH_qtLbOmS&{*^-$FJER-Awk)DY~R|yUN|9)-NPaz(I(C>{r##W zOPRJnC}VyJJZ7 z4GR{6!ZER-9~ec|Y+_*p9Ny?!5i5fAhaZlhNUJ+xF~MqE=Dv8(o>ZG(g-1$G;s4y6 z89AWM2e7gH&6kWAgh~(?33#uxucemk-KC>*3;+_I--eK_MNJ734%rgf;?o{X2qhkEG*7n$VL%DKu@1XdG z3|aoAD>Ql!RY#3KS*i8^ils&tQA08v6>Nt>4sj-T#;8~7og8odd0UqO(F=@ioex`x zi#Kqp1h>GJ?(a|h|9^nse+QGeMqNhq++RB4aiqi}c#M0r)MRJu3wt95A2n4il?-Lm Giz#Dn{ZL8R^MWW)f)^>9oQz_3H*>-EN-ht*h(P zZCBpjzqN6*!l_g3w@H_20yiFO5#AcGN$-qCd2zdUwxD*u>}Z{4g`b}J?$@;>vf;{m5O@9x|aRW}*BRBvh~dgJuw`@1KH2mAfLcZofW5*s`c zH=92kYjWn?x$NEC|9%Z5taMQ+{MVP~-O@q-5DppXh`@k=Q%4>>eoV1nXd69!7}a(z zd_zCt@#B{)-T(eL_dw^PpF8RQ{fc|3kQ?yt*W#hQ1t+s+pM?N$(X& zQhl^UlH2SLp(@K0r%!LWcI_JJkVQ+t*EhO`;iVpl`MGWvE^LdDc6LayKW1jOZ@4i| z!kdnUCedqop7ba@+ef#~Ws83A^N}p=r7R4DZLpoJ%(-{@Iouw|qIQ_T~Q5X_1F5zh=12nk^Wg zu(LZzPD%Uy1|?@QV~p;(*IJo3Ib>W&Q-4O&+rGuGs9~X5SUw4RPg7A)GaEhPkn@Zz zU0mRqr5QD zeK>wc*yv4(VI}|mkN&e$AL$C2+OBEx?VEbpEj!1rSztTAosLu&CFcK-S7k$nUH|cf z65;8VA0O|~n5-;L3z?F1b8UuJ7kXU}EcdRj8Wy;Wk4(FDx&0aIIgvt%zm+_aE=O+A*yw!PAwDxDz^LLXH z=ff^iRVdf6iqD^i_Pdwe?Gpcp=Af}>&z{ev zK0d@&UH#37t!g^l5YuiY?%?PcC1y^0G3k8qcWa@!rt#d~hN=FKWFF?8BP|DZ3TPd* zu@S;`9r2iFj!`J}UX7G-6(W(5kySeCU27YeG`QB8Yn$$rm|s9vmYQB-(KT^NPp|Gr zeYAF|_Yotym71CwzU+c~bc2c2WC0e$nY4o@ap0!AjPdy*GehmRk-Q#afX3p=K$NYT;Jf%;Bd zPJ#32CDXS@jvR@ZELjf0$7*_Zf|7xOp({17y0%u#K;`Py{SrObdGO05k*qEe$KdnO z{BV1_mV<*sQ0EOQDym=;H#fIR_o&Rw%<}T``wt&}Tz24V@j(YVH zQOca@w+aJMf3@4*=oh-I49l-Sd+}mpYU`QPr%xw}+ifIGj9=8!I_2OXiWPbE=+QP# z&hu?)`q&7mGamfhYMM0cl2Z>x4Ghk6{rEYo5-WQ;K`|IxoxHoi*)g5ssH!SGjuS6G ze_(U7nz4z=ujWLm`X`4S)3>V2@ADn0IK(t)U8>gUl_fDvYVS?rdDWlioWIY9acta^ z_@uM5*V@uJ&|8LseFs$ik__Y&X0HMGmsa$PF7lEZ|JGm^zUBO$+if$F9`__ z9$H_UTn|#PAFK?)XaCBTF1z5?M^pNWnv|FJQv!3FG&9NS)-r%Xo< z?B8$H@z%6*Qsv0p_7iugMZ70lQ|j{d{cs z5>1+#UGnqhJ~~d>t9uG&XJ)QX7B93Hx~9*bI2c_oJKt^+&M@GyFfmlT(3AS^MLBPr z=Ld2+*|Hedo@E}^*cWLqHV3g{?<%j6ZzrG}qd3+ecYcf7(bz&O6nMmmz`%5+sF zYj=o(tMj_q$&;I?n1rri-wMsS&yQ_sAC$}JRdsn4xHp2IkMG{fisx1;s?%rA41_6h zicBcjbq$${N9RvhvMr$&KIL*ht>NWDdvA+Ec?9ZT{i56V>M&086Y_H{7b|bQ#1SoE zpUqic>GPx0GxiG#qF!5DC}m_~(llK!@>%yrts*Vosh=>p>>D4yOEHwQ{M8M9}))V z_HDBV1lGHV{V2iE&3pbSX^ic7-IK$Ccwgmxj9a#BxpnJShGm-&t|$t<<=C-f*s0qx z(xc~qXfCR%9y2o9RqU})X_l3V4RhwqnMJ^gYhPbS+28cU9uCsZC#qaUh5bPJ7FrHz z1sxq$G4qBiKkA|?HClaEM-3}|#nE_f-P*{<$Jd^186hG`wmeF)-1^zI$Bi<>^$7~c z;LoOn7fDGEQOa>{-GF>jiu616fd^|Ngek|!OL>$+?56sTk!%Xb#oZSs#`RX8qc{2j zUQSO><0sj~0_0YIUrac%y1JUFB~bKdB=!67F!_ABZK2D!q{H{kabELL)1DCp&7(w> z#Repx>h0SsS+gh)mAEC$;#SP)v!!cu>5BK8Gfk9;{$XK}6861Bhk=ptQ+0J2R={i7 zg{Nt}$m+DQ@pH|)6Fc}XF~`m2I}QhtS)%T^&3xjGN!>6{N+~LOit{P$GIsS*>1>0H zmcUL{ajR-SI=NE-)2Ot2czGRhJtd0+R1>A^-bv;R^shSEy&e>nl^x~uoI18JaCZK3 z$hki~9+O8NJa|A_)<0@*Z~x-uOIESF1|H*wU#i5>wl6e%0XUs#xzm$z!MIOU4z?2r2V z!4R3Qz|>YBS{Bh;PoJ{$gncV=bHwdAhrjHOo!g)^K{l3^n_9p~)W5~0B~?|`_gJj` z`}fBK!&lndE@yvhpl?yIo;Po;S5;S6XI?%hX_(jj)-*8eJLf?o(%rjvzkmOp;V^Ut zhvNF^o7_e8t6vW9kA7Dp%4*G0%9}x#1W*K4O>$o4NTa8lftB6nJIoL8@fi=*gzwq6 z&sa2yKy<;`yL=W3ds(u)<|lg_`O}MT)|(AR+m>J{ZUD|RpPs-)d#^9|vWeZ;DX3GP zqLEtt>C@%8k=7sL4?lkFL<4*=i?<=2-&MRfnXjJ6r*FpsOaB6>MnCrZTRj^^p1Z_V$nU6ghnDR=Gl zstV&#s;;l!%gy~^aY8{+(Ou86j&WGQH!DrQusKD8f5DjNsAEYCzl20v520(($l5u%cY7NR=`)d zfuQQ@l+h>yf`d(o(!4YacJ<-k;Z#?46zin90i_EwIS?a+EVR(6AYWk}d3Q@%q37^oVkkEY#rAtW>u+v`Z> z4^wOxPVM^zjRzWN{DC@MjTqECw#2LiDbHB#4$u=0{_GcW8L ztXfmTMP>{5LS#bYXOtbx&9b6^VcCYe(bNj`uK#{%SjZrC^GV8^wbunw_fSb6Mnouq z2Lh%Xt_fwAGG37dksG{8(CfwdPFu2NQ5Oxti@XDO8M+vPD!k>1mw=|BC_q^R$yNI!|anLzNom?C1J^5q>u9FRc!X*LqW@6F|!!$b;gP8jr zHk2$+6lxc{A4cQ+=uanRR>%A{D=V1H06aICOt9TBkcVZ&4kg$`(E$wP^tp3~L%Qug z(o{Br(ajYw1_BLAWyVR`-%L;nRRcwcTVF9JN)P(W`0s#}xzV=9Jbf7yYSeJ%th?Bb zl_0E$00qS-^+Ua`TY!JNp@{v*;};4*O2A9Pip! zcH^~v;o}biFzrvB|E3fh`O$og6%4_>`-1+ z_2sSU*@&p?5uzURR-svCao&G^@s(Om6gd4lj@l1x0l>xYHY3rqsc%eC>5=u@=0Ipnp zBvWwQdbHE8=G!=3BK89cgOR74)z?bZzw4Zk_P@gyFLW{pt<7bCZDx181iioN! zrlu@e^<5?#?8eu<7yBtA>fdxPD<-oOx17~hBgrtI+4j#5OAfm0BW>I~JgMP7dx||= z6X!0Fg`T~wjv^|8lMPJZ!!l|n^EGI^BP*$G%bgKVm%q^6Nkv75ZQG7&YqOY|npSeW zrS+qv6aUjU-kEz*N5_KQkN=C0v||YU(+DklO*J*+pzd?~7zK56oTMhsm0r@7x>?W? zr@^a)&Ua-`V|UweF4Xm?s3?Ggi{MvkY7gwq4WK*ZqlNGtQT)5-SuD?wS2l*6|0t5x zc||n8j?q-}{kbbyGM zcfzf|WT1vlwNJEi-1onIy9vZ~exm!VqT(HTg>0*i2WXG>D~tBIO4q9(MZWi=dt{<{r zM%3(YIL3|3l~J^MC zO4#;TJ(AJ;wf04-Ddy?ZI&7#TY3Ifznf$zF!aX=_|2+6sL*FvAMxD~2YSmj3103)~ z!q(t`Il(3-y%%I~CM&REfav<6+JG+G{itJETWa)SK;em#^U~wwbr3n@RD1YxR)fAVwyOl3f!<0yx>oQR77F6P zE$9m`&&P~3KTCZ1vOq7azNSX>{7i_upw7F4AzYGnWs59hlkY_i+S|n)`QaTd1_BOB z5J4kg@jN-XddMPeR^$7(K3V>8$SFdjSMy7UzYVNtTXp3%{oH6R&SvN}7j8eM_Ge*I zWqob6JJ52Eh)7f3hw;eEmts1)H0(Y7-}mJEtu8xbKd>IU+SlA;6sJ0Tbpd_iORA=D znVh5jpWnZp9`SgLDi{_KVczvsm|KlYO4zZ#2b$2*aOJngM!97HD1)NL0gw+Pm^bJx z7viFhpEwcRTYPs*yt_#K&{Ru$%N#8JoiuyerMYo3n}N}!-pnaH+$lb`JG|2(TGU;>#pn3p>1BaDq9?iFl@$&1|=L- ztIC|tD6IV@U)+%tkL|B;@?;pK-piMFQ|%P!15b6zPxeL&z4Th^@`@Z(&B5W3GI{v?`*oC;O0(2(3<&3C;$2RNh$*8 zb-8C<`?3$TTf6AG2)ZxV-o1MT3`@Eq=e}fHYL>-r)X%YENlZ*6lxH^EqA;u>WD_@V z?Q!8dRMBgMjD>$l%PD&b_5cW!c}uctr1HK?X?njz!M&`TpyqQQJ{*n(&9Uk@-do}& z5~cjwuryXhZfT>h;p&K{kl174eLOrL^9HughjhCg2>$%}LMR)veh3aW+o+xoxH5Dt z<{Q2yu0!9%uT*b?Msgmcz1MS=1_nVAlqGCgz0CK&6Mz5u;tx_25#`-l++6H3uES#l z5tUuu`zfd~OBILZ2kCU(A95Gt^YS7)vdqct01{@)8C6L0+9S`NJ*zZoU&?W(4=b4H zcI|!RaOaYhl>k6&uE+G=xyjzRq1uRxmo8Njf&wzUjUYfu3Bl>`#;Nd?tmV0qJOiz3QtF_SAnwrnLsJx52%@2Gc3P8#cM;Cl$ zaW-zf4)3k(Ps<@`KXA*Q@*Z#NxO@y_ORvxRkzc=lVUxe83El^s7b-imIlX9RE5XkO zKrDWbw4&N>78Mn3$9@4k@;xHNBJGqsd-3}CneUs0+J@%Mdw%RkLp-gbQgy|l+xA00 z-uyBI#*GvdGv6%J=~-Crc38P+uXnn2NYUHwZxYlzdFj&Q%!bE>x;FW@mAJfG7K;H5 zsaX#Npd;D!m67c`_vA|57?}s}{O1#NEPjLjobCYx){Ty?Wis@94_OpFUgGO$(7k&D z=JLxdMuz-jpkq>8J}V;m`S~jlF?fM7AlaA<21eTyd>Xpf4=X_hf&jWBv4h()-a@^^ zUP_oHU#|OI#pUxf!oYnCc&N-|(MPx&H>h$Kd#(=#w?5h737mFDtctzPOssTu+&0y# zusIPbLm1jVh>E1wpF``j^6Mu+e4ydkMKxWDMj>&7)!;T9u*0FS^QinEb;G>eqO zo>a|r7oX}KZSlv*(TPl7Gk(pn){W3!>#^G?6ieu)_y7+fnz>ALZMk03+zlQC@_q^* zolV?o7X;;>Kd++rd}?T*7`yRe*?N_m+d0aFG%=$U4*=*1^5DuyP~%@ZeYN;Opfi7+Z|13G&gVMK^j|VQ4WFw zWEr-eOIWuoNI?*8gP7yN6>wMGi;p>5JE~Bt^Tv`-$`Q_&Los|j+1Wa=qPefS5GyaJ zseSk}Hwxvy_-wIFcR|>L2WqcgORt(=x$;8f*e|R|(eLK7Sj6e(v%*sHZvw&o#caAH z3FV}ww)XVdv$K-#`NYK^_pZ(79+pctxXb>2sGw`Du9N-t;0NmnA${42Lg!BF+)R%J zn=@z6F0D@b5Ilg`7O|iXFPdY{aYsCd+T;ud1x^e?L-=sNV5E8Mi1q$+?OV2G6&Nz^ z*g@%cFpFl&$m#OBE-|vQ!VN3>fL&Rh+v{zqck)n_GPwI+fBjlS3Z)o+3ZXl}?&)7y zn)US(tMQn~PiiJ*qTQ^(s^Xd~nxzHnhSGu^14$ar0IQ#{5-%GYTP^Y0MQ*cbCHI@$ zHf`K^(5~-jWhj@A_T66q%TEqlD&pXddzINc44xON5z)eYAc1_Rml?yq7zB zXR74a=%j;I1Ok^#lcih24x-1Rejrwha=T&*Dce;@aTS$PgYT7?SVmvJci-qhQl{g%>5P zQVuVg8i6Lh1?};QfTdjjRF@GIimX zuePYHp0O_2Ieip-6@8N%Pz8D!A@QRuF+*gqEt;kO)7pN63twZ5$hgy@VPSM^t7b?Fd*_*`Q%B398anbZ=&{-vMr5df#kDa-u-mDb5Bh?O4o4iJC~lUzIg&EI zR(CN39-Y2Z>6Q%P-D03TxD%DQ(J*)Y?O$WTgV-d}zA@G-D8G93N<72ouENN3NcwQ3 zPJn>R8foy%F02SG5SF7kVVS? zTiI;#-kb7lHH2`o)bG!A6&*f2*Sqx&v@JAu%64;pDcj~us8Npvv^TWd{+e;>d?>uU zwmd0?)&j9O^_@+O45}GK83mvUw0P(%1y&{d9P-)4w6`-=&-G#RLecsR$PPAh5IXq>49*!~@@#mVEv zBwCMi_yGAU8kk5#Tb-J+)5)=NYx3^UcK*|dGJ}%}202~eL*c=02VWjVki!w zBktAo*FPF(W}wuRm8cFSK34oxUq6IG0k}Y93EoO&2jqgBA>R;Iup5=lJkD!pbiE?> z5`jvwn&2u~#uZzER<_X4486(iafo^*%$yL(*TEYxm=HxV~40;B(AzZrB z9LeDVtU@Rl!~S=xcf43Us-Q4Z05Af(m)rRS955(ZjRg3qsHho_+f0lX)oL}6X?92t8;BYY|+?6_;GlQyM=_(zD1keMf&5^YCQ3B2V!Jg zU&0HVsKCEa3q{I76n=UPZQ$#MLJR!AEBmbYY2!2fP|F*!$|ACCk{{?U9%5nek zV?`aECm#02FLE+2%IL-aFTa=m?l9oYHmrX&E)@4I<4222gwpu1$H=VtCBxpr9@kYh z!(Iu!&V{u9{~4nC-r}Qa9MSbZW^BFGRaN`(pG23uqt=&`T=L>ma){tfsiwxh0siew z2;~fK&OemhJ#>-ntDgQ<85MiKBvI_kSJ97trk!|w$+DQMPI`lGYU_y03vbnbKDk}y zU$3$*)$oe-#XVs)y$fPYGD_7I|G9>dGo1$>BOf)bf5xe8H{|Jds~q;qX(ZxlqxzUnrSn!SU906>Ha}mOF=C1v}4Y z-omMyJ9c+M8g6u5WU+=JWezwA3I2Z}$1B4YTBc;dG5jw1?Zb4s3CTh<3YI1`*3072 znH^U61B;(Jy_mErMyYG@?_`UQ{RS%UJPR9D{)^NJV1%-L-P29g%?B42pMVf{wQ zy1kt)%$<`3Q7~_3NZas8O17`w4+v;EhUy8Zp#~-Ct>Qk_T#at)!fTo!7DmQN2 zsEZWwgO1*vXamO|h~T+uf}+s+ZGZo^ZYGV^ES)#XPbHfIC~Znt zT?h-2phlEjo_%U<`pyHaNb`-}Fmv*F2HvO~EnZ)&g^sWvqAyzWrPNmDr|x1$^#Pp; zWf}32E~hW&6qjJE)DBbx<}dVk5Rm~R6~2Y>&ImA-rsUzfxpQ5JwS}Va!-qTpL7WU= zTo!>5rD-U;8quA%(FGdrOZ_b|JvI3lTb4;WPjj zyAB?_4`_V5L=h9FF(U!8z z8=KSgWjNiw)1$L*ISMJ95G;UDUc!O!J(8z|cLA=0)=7`F27n^7m>D$)aNf=y!e-VYqTw1B~DJjpePbfK*62@N^0glwMItU(R>-ZJ`(8g;Rju`L?8^_ z7hwj%)F&eVGnfTsJ7qbqO01K2wX-0EW`qK=U*dd0X5PvIi;sZU@EKQkW{Z_MQ))z1)M;qDL3pFn9~NjI4v>mXbU=y!ktrx77q2LqOZ z*DPH8TP^(k9W1@urPORiu?&b0W_voJjw;V?x^Z?^h5tUHlTab}KH<>+UoJfV-%tnXZ!t#Gwa>TtuFL zMfA#ML~R~Tzwf~zCN3YQ&QpIb@Am5rKvyEx-wyFS+s46BkD3E%BMDZI;t~Ir7IkD_ zY69qaf{q<3`;WzX;J^XMB(=Zjxw#zttSS;)bX8vpti?C@zs^ zs*=t2Gfogeo**6o){|q^hcl}ah|3@ zQ0*WXksk!wgE&_Ky(mcT&qEctXHYT_Tw%+4i)~sDlUNIN8zS@}o38fj21F@T`=YgB zY_%F0rYK2!Zrtq&f0~}Jj#>!j5jq)y3D0qSSAMDZIHV7J=*&}0dqXI;lRD>097|rJ zUuW39(~0|urEc4&l=da8Ykqdtaj7kAD$VwjP25>03Cpk4AR9QBL|_u+lF$>0Qw6z= z1YnH&z*V|fA*?6&IrfML0~c2#ik==rH|#^#sKaduN_3Bf^mR-bF2k)~TwWG)|9v%L zrf{NwP@W>*qetp_g}gt0jxw1RzSbaOj*MU!gcEBj?yRg#O{g+h06G%!-6x0cLOi?$ zg+Cncd|kisA^J!rWXzY!Q6G>^17Ep9M3Z!Lg5u-jQ$@w%(=6dZKqjy)U487m`lktu zxJ+)j-Z&G&pDpe!q}94p4&*oq2VKUrh?gOjUb{OU1zJH!`0;i4xj&KBLPRhNJq8QA zUqGPTICC32yE0@?B*lI_*0r%uzm%AQ;*rB%`#sO6H)Xka7RY6HX%(7Sjf}k9TN7LXBJpKB;i+YXohU;^NL1Sf6tHH1v&IWqq8A}`StDk+bIIW4wX!Rve zsL#Z7Sc%ImkBJch4^|1AI>Yrpn$oTlR_t8Jp`iLYx^E=nwcuNiw=nJl)j?mhE=n6d z4X2xknGkZX+thnX+;+l4Gp*TfRu|ddT9-bm4;(;9oM#^kTplRD`_7{Eo{)Y4A|Xe? zy~#Y3N%L#H`sh8-d=)t;|FcmCK<2;0!9YFeUA<3bJ1WI?*kU_Dp^!f$>K}3sQIKY$ z@ltu3&i1c}-C9X!WiOnf)R4#2!sy;RSKI2*>lR3nuUGE$`j%=OLyug<&k*obVV@-` zqH`0mNAwdY1Xe~mJJ3*a4sN{qgSscdmIi$<}~!v716IHukYo~ak~H9P3SDBlqjHxw}iqKz84r6 z{&Q_zF_`rvRBpm+g7JjLm;s3xIhFeIyNtnWJs0)1ZQF({QJlzf=P~;m0RfWg5|6J# zsrdy>8M{`j2Gxs1NHljtLVlzbPRtY-7#MH@JqcObQ*PWi1>flj@v5jifk+zirV*eP zEL~5iS=!9AjTZ}%qw@yII-FG{WOzvk{t@X_V*o?~cf!O#7vw}fDkznS7H$Xp%`Xs@ zn!kL}4{_rc6@35)Fiq{gNv75!5FFx-vw56PfbpyWONq0Gc+^2KY(9w~JyanY)i_??t zJ;zVf+TQgw*96X^*RMxgo9 zFbv*Ii}*NV))Sce~DRp(2-T-KYZSBH;6L^=@}vqFv;MLL1A401RL#UT2u zoAUw8qvU&QNIYH~%W5R3K&=lEBO_bhG^J_amZ&p_HLu6Ml=AxXZk2Au2VJ?$BD)zt zFyVDkQPToOVG_`J7eF*qRCOX=hv*veN*j$n z#~(v1@D4s2q2+mc$|kAAnPERYiii+H!OS&(6Z`Ch|65#>Sz~NHLPH4Qpz_^@q}PNX z161&#HPFQTdrv7}v}WR;LGL+(T@Ah4PF@+REx_Uy?5WEzpo^wHP!Yifs9A536vTE_ zLM;hCmss@1JgXxQF);922rcv(3#PNfjl;nbJw{{gnZ%q0?2y^HHbcVEQTK2Q9#uso z8<{I&1i^4+fQhi+u-$6H`BBK6Cc0R%I_eFpU+4C|}C>*EY( zud}rUm{ZbmUIDJhvHA5@_+DUu9u4XX+KiEClvbgOAUu)15W1k+QqZzjgR|-FTYDT4 zK_r(5xCo&OTj6#ULCc6c1IA(x%NG~|T5OOelPck*!rlXf`hg7y`SB+1yqHBxa^QRx z;XV;%8Mlcu;@Rtaje84_pfo_0T3RSwuSe#Lw9IOX_R@!3DcP8%Wy#7+ZPEJLGSN40 zQ!ydS2IX!CL^M(0bZ74x3(PARkF>lh0trIQwXyvvpPXDQZ|wCC?eET@)6fieXfln; z#(??}G6)N4adY^d+l8brxN z0*%6`x7wjRfIh-&nr%B1PHCtG!&$v8NNy^}NQcApoBco~Oqy?dj7%3)KK*MvSNuoH z4-aVnH%5CZ=1Qj4enG(@V0<&|Pe`J-035=eJgGq;K&ndq;!9<+u`NG|jX&zGh2cXJo2RgCr4o$-lIBq&w;fO?`!d&uy+%Q#;p&138YDn&}2G zRS0*M$k4+b`)Ysu)=sZA79OW$KM>;-9Cn|hKrkF{7Sh_umfBYxD$Foe7r9t zUliX8ZFa{D^bRTuIewg%cvsz9_1uxI8w^-<=J9lw#gIzKDJ1e z>A)0f(&9*JdZfIM9Fa_@>Hn|EfZd%+!{?>;t=Y`ND`>FhFe0GIR6XW_KYCH*RSV!*zZMSe(f-?YyH<^c>T2)MdPDsN@ytr2?hjL zMFEBC4DjCN*my*!HXoxgD#YI*rT}t#tusf^waebWwr&oUH^Zt!l)x@Hnr9JBugCT! z7~Qqrz^?&cn#!O#U7FA0>*Sv#&XjBReg z_1q?ziJQ)L=}4)OA+n%*%{d53St5c3XYvy!;PgYJ5Jchd1x~L`Tmcptz|J*CeGF&l z21pP>D4&QnAbgt7K6|HeUqQMO*OAC7w29z~!P?qd{60zuQl$Cy-J@dJA2(T0$>8^p zoU317vtT_Dxx&Fk%@r}5rP#QU+3!oip`$xq(N=6aoG@$pk5HO+vNK9~Uj>64GSUB^ zjFef7K9ozo0bKwFq4`wbdx++5O{!?u{WGIpQJ_)eupNoXA8bEcLTx%LT&#ZJ#vV{X zZ46%}B;+GxOO$iu`<`QStRMnRbP$Lz^o)$;M^Wi8glLBLim>~ABHb|e`ci6DA~VOZ z%tHNx7*lg|Mg|78SGJ*0HVz$ngt3u`8Hj&K@UmfMq7f|+$n30nzx19%hqh<1+K_J8NJ+`KefxLOqpf}Fo?!zw zpENDKB~)k?Yc0pN=i}_BttuFtbzNMktnJT1!|p zK*b1)B&Zc5i2{;ViGemfhOa<5B%#=!LIMJZDi2M-oIij+dCQ|(5#s@1CWzllwZ6t! z*XVs5jSU+%Y^J3J?XAljikG9g%0X2Rf?GF!YW4X)`gvKt;L6Ghv}JVbZGSBr4K;On zZ*MR2(|e1o|6_+rF?8SCJCcLKjS#>RV!4D5oq6jQWDdUyW3sX3C}dO)TD2eK`9w3n zw*mrOErg)<9P4P2R5l}Y4WX><$gsYL6d?s6Q}u_{|0K!*tir0i+-ka*>qC?abD&dj zUolF^2u`PrUMpokuyOS38xx=*#kd^sVIs8a7@6b_YDJ6`Do2UlhDw68#wVDZS_t+4 zS2J}Vd|OmhRI~P_v9WO|Grf486|t!SG$NN24KG60N7I1OZE|ol??}E}_wzC4qPB&( zbFKd+#B-xhvua{ zan$k8G)f23x0pv`QZ!cFa%i)*FM=86HPgDM#H8s%R~DqHj~s zu;wuZ!ZyUjUL~%I$lILSv%I{FIP1!t^pZdKK!Y7I(C`4v9!;lUX_d}Q?sf&?` z03#%Z5lvijmS0+sk2rKLeML7-TMh&L|02Rc|9j>Gtq5(I)6&fVM&b3;FV zFyMSY(4yE(P5lB&60};<{3;y~(uG$(j_IyI-0;P(b{AyVHebM$ckamA=YKvoRA zRyQ=bxLHBNMV2)$kbnRF0E{6Zy%Q`QQY>>R-QXP39^pUdPhxF3&8=FK-djO|)n>oECQ&v4XcdwHdRh5stfT@Q)!@)FlL19@?3rdIe1riRMo z&e~z4jCJKd1QYrKkqxQh4(uGLZW3m^8(!JaCCb{FQVmYCM= zL`Lrukv~cL1}jAoHWT)b8RR#iT0T=VGlxIFuMuKuSxL)9*fr>cL(sThz3m}*EMiZK zoxN+w^B)OvxBU8gH?k)<#0jraQWsg`v1^GDx7P;6Par5U4HABX(Xr&CzY4fk5R7>aa3HMO*>rHv%LgP!}X^CbbOjUfcDuyH9+Ht$V8Xv1ol7Oc9YxC+qbWsr z!ypcs#}F?wyjR^z*8W(=ULV*p_~ALKR*1|**%i@g3^8n&Kj)3@RtE`s%hs*WZF@_I z8*N(qm>$~~%G|hc!OjE3;?bFn@>fypf<$DaG{W@nu41yJsxoTvx{yMavIx8Euw$m< zExrFNU0T{ZV(Ewo9Zak+lssQ_d6ppvGlUt`g<%d7@(VZ^akq%44{V~^nPK$)Cf_JwB|u%pqdLqt%$rtndW?x6%u!VVLsx{*8=;01 zALQmHDzJYd<~bbaQ_?)cm;L@f#7NS?T5_kBtKQ|bMn4@T-Ij&P1enf$Sg0yf7 zpJ3y_db!`2;NblAGdw)OiRH2gy;MLs-mzoIyZUw#;ih0@`vkguYWDWi&WMy=Q*_lI z`01OjpftwMuIZRouEAmWFDyA~y?gF9f!(B@|AXDuLyKiX15czrGIzqVu^#a)~qN_b`hk7h?ZBoxX&VTe$;@mmC!fQgBwTbUi?-)pIJACoL z=?(TwCpT)FMGh@cmO4gCg-BA9Q#%?oR+(^?4?8wQNyT01nQR>PY*@+|uV!&C=5#MM z(D4lP_4U>9>M2xApl4F$;hx#vT=>A#%j;R&==fJbJHRC_?5O4eU7Wa6flptyT0o7U zx|Z1~5(ruEKVKGQ>U`mn)q+BoM`xCVUh2(tIj3CwASO99FL1t7 zUN%2QhMvA@y!x6WOcseR*S3II)KQ5qdTu2VzrOgP4nw!|2e;_hqEq)du1Bfemd#G zc?E?%N)8QF7RQ9gIx#RW{bLyG5@^uO8xWIL!T|OUHrP`vCqj!;2C=l_q<9eI+LjO-Xi;3cL{W! z*u8u9U~P{DE$3wR>TdB{1Ym!_I$1gU!>!QIx`dZwY<^)OH@a2Oil3Kww;Q}%5j#JH zoR(qZ3qP}R`~n~U*R$cBT>AhkXE(G3IGUaBd|Y!;)&JauRQ@ksKQ!y#~#?72+r>FPg9ro`KyZ^oJ>Y?KaO#zLq zk2<;b(9^5#{jo`1jKc4|P5dpl9rR(tO~0I6Lixi!>g%CQlDgK%j8&@eiQ69Cui7FN zGj5{XVsZEoD@}_iw_jr|7>9D_ZH5ey-rZK~mbQCkTu6(PhI8t0AuzZxAR zC0~CWHm<=J|0gSy;A-G}JJIRxTuR+h8?4BK=1)w|4I+nn6_H({#CO3l6ZeoAcA3Pp z7kd2N$POR+V4`Q`$fr5Om(>x3*r73`Dzx^H8;;Eoj^mMhU8V&+iO^G zfL^tD7RH|B!%khwe6oPvDg@X6y?IU3RZP=kthpNA3c2N2Tryb3YR#> zvTUw73mW<7z%AT*djrxw16(g#byjab^j=?e1r1d2Tr7>p!6U-u$o+jT_3@vtl_ zAddw@Dq|t+*zS3a{BDnJ^kJ^X21jYmK_}dPuckvT?Yjn!12VUW&Id{Ee*g8;L*8jD z01pPviIH%?czXyHh-1>m`K(D{RVn|nHukB+20@^p!YKbiA-z0bh@|~K>&sl|I{~12 zpThVx%zF{XGlGSl2hH$vuB{<*wnm@>?YXvGgtTJsf-RD?&CICgvafF6=qOdZ@qNSd ze{H!3wAZP~I~_RscG|^vKGf=XCdkk4hXeiT%1y-t`vcW%)D+MacOdh43Xhgb)djC< zh&>XBv;xn*eH5gF@HsL6P6Z+Mn6WYUlS9{xb3=JFiYZa6bPo#0v*PI{w^2L|TR)}3 zM)1dze|GWnQ$ag#wrGMxi;bQMW@G=O8Bq`sJlFt`iWPP`$rmcdb6_%J+_)m74P!yX zxE`U_Ks4M=>qce1P~X^C1#6`VaYaxF)h?gv`@5MXY}g@wh+O`#G4p9aHDf!iLoME) zjI_A8cJ29_b_ z9DMKThB`}g5DykWAcz@HRbrE{DTiiahdDaP_yf~b-I+$)W!~XJ_dPuAsTei>Mz=vH z&u%xo3XCKz>QX#Yc`A7WA#VHoy2N9OtUBIqLY{+|+tiQwd$|tF)=^cnu~1Xk-^d#I zp@E`9tP0_;~V%W;%jSarts8*Z_arW(7j1J#2Rgr zq?co*ows72VYHLJmJ|=rL(Fmz4$ci5?ioqG@U6bGW)4)3YVK8p-FA$iij`p}P9&1gpD%HH4u_{?rDsIu-*4+sOKk;8_QxFb94#-ss`F-YH3!y628aFq zNqSR7ZawPDHuW1!`;d8(^q3bwnED13RcaEE%^~(Pp1M*6flve%DP(KRERX}+GQ?FZ z!P@PEwTnBqE7t7?%XB0jxbP8fAUo}I=}-;+#+P(3I~09PG?q6TVeHvvSE*&jd= zp=HY0gI@dmj>=>5Zfu^LXA%ipmx$&eX-gs*c+F|}2adJF|K&oD0N1{v*@M3O&Hg4MB=D;a z!c31{BiTw z1u<{m=;jVUk0YKrkf`ATGhGB3AfyzxLrtItP{R-u!HJL@paIow)>mjK`FGpqKhe~v zg8gm(t9dV>pTHg5M1o7f%D7Lt4Dl&{Jldk&dJ;OowJ$HpNZzY+nt1388LTow_V0EH zk<6OJ@YxNBSN%xCyvMpD?n#8(2+IxhELF4(Z2~v(9B$&#in<;IZ@2{oY4FIbDyWi# zdP(?wc)PsutYwU!JZrT`tRPvGJm8ag7tZl^Ni!uLhR36P)K6Cw2Zw|Pll@KQ?j@ghP8yqH9 z9rt$-NlZu#3TSwca>imWvM@3E!&K@=@=L+ZE&rp^Dl;`j()=b8`aR4r$fU#E-|>O07Vz1j?X4KHkLNtc)ojFa+y;>p(W#>@Ge`;?xGrP?8cj|7I+3< z+`FyVMI>K-%`~DCM9O(d6Ei>9XYJNZ3=A9m-uogMV9%wCBXWM6eSWQzzi%gTCea57s~fq zPe08xQ|msyS!^Io>p&G~o}$jTKm3e>l9EhudWG?O_*-{C_xli^W6V8*o{4F@gWfCG zGvOfPMuy@>Vql#Ph6t=6W}rr;D_8cw4w{DGb{sW9NQjnHK5UKtzyxoCXeF3zCn8T! z9}@ENm;v`bV#iKlOe3Jt!zDa_enP?7AN6`#9!D?Jeuc|DpJ1D#C_H1bPGq(K)f#qj}I}1ihg~u-@hr(-h{vz zNZGsxDCq|cLHkUASlN!5A_!1Tmt%7hwfgLUcSR#=uLNF;22?|qV=2R zEA;_X@=73Le>@QK84OS~q$&{nZLF;K!1cAd9{;;G%2fbBe&aUwBd z>n3FGyqStsyyhD3tFSn(*|lJ3W`tLR!aX%N2Q+s>=Z%X8rJ%e#7bq}tB+V)AyerZ7 zM|%?mb53r`Ik7kAenxw}AF5j9)b!zZ84wr`4-g3t(0V z<8;z7wCDp6q(H8n#h3Bft1o`qD{Lo`(6l+;n1)s=nJ>NBBKvbYwToCf@oBkjD{t7) zJ^$bN``G8L9HQZ8HU%q^qnx|cpg~obE8%O2oCkHyo|^Z#M(z2kc9+yC*i zYzaw4TNIi|gN7s}8AYO@iAZ}VBOwiyq>PG^C@CfF(q2Y{QrgR`R8|tc&m*q;zCYvl z`~LIw_&n~<^|&s*JKyK|I$y8jc&?+oNh{h5<&3lFg@WSNlWZ)1dUpLCroHh0DtGhMxB7n*)dll@>PU=$ZF+JFB0$cTT;a*Nj-4jD-C$sNBM9L!9QZ2XTcUJ?Cg z?5Snf(CB%r__Tkm>*|io$qyw(DEJ)*~NEw)Go z$_1b6l}bAzc1~M!X(v6eFn+XWC#GiD_dE-WaDQ}i=B2^*KL7C_l9F-cF*7q`xOk@b zlvVqIDseivM26)F`QYz~_?C->)?{hg!D|s~ zJfclEN@KWKkfCe-^G9xfblv9hf3KmTKZ|$t+VhYBWfy%-4SgQEp}dEDT&9d;T&ayS zBb4K$_(g3(9{BsW1b7`ksJy`b1&>yGjoy5$=9w8yEge>c>J~*)?>efUS*xM{l0Kkd zL2>>0r%UAsy7ID6(jPvbWj?H_;TB9kFN&Y9ltA5t>=6Jw@;PiR>Lwgr!P}DV{a9Kq zTqz`SONYJ*yIgVZb}z40O!5Ae8*O?{_3I|7Hv87_anvtd<-Twzyl3m?LW!J>7UqE7i7$;-p=(o8`S)!Tk6=9U#Onp{@Xmq`*%-9v z1Dcd)B8HaZ-_7%F-06!29C|V4rn<6M=KT2_j(WKhCFUM$O>?H|Xz1@0UtvlQ>Cc~Q zlLKY*RHizt7aKktouH=ShL)p#B7Ql3sB_37EA+)2%UM>lIqE0Ta&g}upez)wAaYCM z5nG}dedol~i1X*w;u3ewF!VG`F`046|NWMjS*}-=z2<-@tAN$}1UPW*dZ z9Wybui*1#H0uOqU(_3z4wC99?Up;_FIL|}W0ku~NN{lquciZxZ!IaK~1;BkT#PlXJ z7{tXW!Febhc@<48fl<)2LzV3dogtz2*ju~gJ+`$z5kXHI-fF$TdD17}W-C)QIlbng z=?vVzB-92zvH(ZJ=F2NfpymjLeoOtZSdRXVDwpyIl(J9CJY{A<5CiP6>tTJ+SC2rh z+Rw+&Xf7n0f?8S7ZT^9Qr3*uqeQ^%%RN_7gcAnG^z`NM7eMJf(36sKg9!GLv>sjVC zyyw7jj{4)PpUnI@HU@7J8(#7QJ7T~BsO#&WAMplv2BOs$_L-=Z_mTBBy~v|d9@}AL zG#%S0&hDTJ*5u658e<5p??Y{gGkQa^{X<*%ZU9OpsAkLWqJQb6<8})`1khazE@ba`M$)h^5`Ho~qvQh|PZM=+819vwE+3_X0Q#pq42G0dgGWb{7f^SsZ2{ zZ_LoSsh+i;8T-C_Ro4UW*gy|S8(P=90 z;!X>ajYxHDa2-*kulIX=MY5$N6p56d5Twy5Xz>n`Y>7#|3WwX+yBpeLaRZ;AjsO-ei{ zODds9!J2Y`4u{Gpimod+oCfNs98B;@xKOwd=a+RleS7$m6>xOiJNRE$XS;qbos9i$ca4A)Ig^ZM1yuU#IjEa(6rP@(dz2XWy!0VyWp z#px_x;;ySAs;Yu`tjlk(DNEpy8fY|Mg3|XsA#WOXy}Iyj9uv82M`jWv(&CK z)L-FQXyLgG^@MM|29TH1alylpAPdzlE5j{gdIplyY1E0}b#wYYycaG)wxY4&aa>@i zfgIF?yP}5A!Md#jI*f=(OLPfSr%zXhM3P~kB|BBIZD`ksdEq}=fIgjOV;!`7(2Z1B z|4P9kS@UaTAw3#&w%srZltD3rOskXF$52bv~=;{-eUTF}|#`_oofd~Nl%HxpX7VaGvt?+HOw zMJz=Vk)3b^@5vA}aSLQ+IVm6<^6XOxQvA(%=b78bHUUavqpl2%NkG*Y^wglM9b#cO zV0`d}`C(?N0zg*5SRX{2MB5qMXxNn&9bLO`?_L5k%Aq>!OKnLt%A5}Cp%!rO($doM zVf3e6_%tnbb>CeE0;v-xh}<%k({3{%{}fQ-wiVlBV`CY-)08?+Cl%0pSX``NP^7ot z)T@^(zrS_uh$P%{;RYPJ!Pz~N=<;Ed8N{BgSbxd+dg^`aY9(y#X#D9@AocXQdG#nS z%3ZJ_>VQuM;TGJe;Gw-6?@9fvO7D-0%41#yu1WoT+`X@nQo{+eAl);;hwsF)35MH; zy$~`6_$qac*kiler!k~t0Eo37?axdh)ibC=EYZ}$2zh4DERkDmTpS64fMH6^mzjfo z!JEjCl2pGjhi;PdY$F-@jSQ``tRP{JFm91+2T`XKx$YD$=O18 z;vgqc`|Suh@nHy!$+iiL;u&m*w3h%?ac_QkzTa9eNgvBOdnmG?*fPBXNt(CxZtnc& zj9g!Ur0x=s8FA0w!RtiX90<`Aq-?!r?}iF*oKjW=-?n7VcVS+Rs-Z1dHgCq@r%z?j zsX8H-2={LU+l3?c%`Qr=zqir#=cwCCSjng~LaRRr1fUB>40YV5A*_vsix+FFWim0fnYW``un_H(sN?)TV*G9=J;|t25btwk%I64)N z{VLcUgPspQ7F<>!$^uq~Y683g9q?@v!hzHi%j~V28?i~h>XGtVYirwQXU(h%LfNQc zSCxzqBP;MCo{+x`AsT6Iia{B|QDF)pGy)|jrANH8uiwUlcmKh#RRfG(3cr`Gsc8r_{=!R_PQ$)Co+F(dV~#y#1a~R@@OS3}5iyXKm>2u7xbMa8 zMFzD&2;>R$1lM&hdGWPZJ3mfn1rkRLkrUD}F}GkMP7ul`Zn$koK5PN3n3E;uJ_gAV z1Jh%>#HDD&MgU9grS+4dEtbuTt=nHd$-!3cz_W=U@!j`^7)(l?i2|56@1sB1(GV<}*?;sQ%FFa@GNu$JgKc!nGx3o!BckNmh6p(%UUL9P?TL5!HBOp7t zlpK_vsvw{oWz0-$FFX$+wqns&PL=y`t>4B3 zb>s(?bGaiS&YDOJaJ7}7Z_tMwhnAJ7cjq6!P*9?6>AMoqFAeBqiMY7)Nc7_PI?spJ zNe9fi?3+YHFhgcN4n;xF4weI4K7maO;i5(=W0O?&M&22dv&C9HMkO@{ur!VXs!eiZ z|HL}HheyWrxD{6#*(tU!Hf@{t}xI*!lm(6u%V_PvrLyeqpPovWc>%z)HS|bZ^P>Y z1(7A>x$b+x3y_!+^)|y$*)n5tnUq8;_uRR{SS*lm=ZFsGu0xG-_QHkJc)^+?toQBf z!Vyu_`{~8`&rN}lGhKL7I1zZ~;b}$r+p*g-E$%mtrzs|U_I`^EtqS`?|5pha8K~TI z9~;!W+ZL;;0Y4ym_rVl};3cvgSWD&BPqeW$#-X~QJpfh$Dk`u!!T%@(2npTGBweSVL;nfE zM*r(3E|-bDoY?RA1#htxG`wUrqG}FWKa&Da4xDJ}07G*2M`iF=NO%pcv0Y!U3aT9HfNg35133-44+|tUkmHy@ zSD@qYlh6i+#OAEiuG%P|k*DSZ(|aFKY;4BsVbvq@9)efM!TAAk5<3DN8N5*buwuNk zusJDUB=W>M>Rzsdg-Z{bAT*wN{R_i@osqC94_p_Q+9SmBp>@t#r?yF>pVS2q@HYYyO8HO z%fm5~J-}|~_RQ9!aw@9bKT}T1Z{E7~!i$wA*axvMtwWeRbo2In-ZrBjr#arArN3*J zKN?n4z}Fu1+gwBKh7U#9t9C@_bEhgV1r22fuNVd0V{fipx@7oGHt){v{K*WmEHfou z?q9j(@dI7)h?zUrq}qu`JV<)7lhedCu=-OHWLdieB5m~y9v!j$TaGNK*^ICP?1pH=fSEEt7&4j=tSR35;$W*Z%Q?QRbqS;U*-ojm z0Fli!0;ItP%n?Am2(%5n!J7Pvi9sb3faDg$+B{-2*)HT1e}9=Nhu*qkPZ=RMudoRJ z>af)UZ|$+=lDq+Ugm&`O2sR7I%}|XX>k5zEP$mBP{*|mG>0Tr^ThL$I`k%k-m^J&mQhAxXbHWxUKR-S&MbJ~tNJAd25@bx!nqiFr5tPiI znNqWdm4W?1`!8?CQ%d{E)JJsDisPxwK=D2M~?wwjTPgp=10QYft zfOXmww;%VnVS`lcg;q9vErtkBlPHJr0@W9qulH`6XdGWvb%|rhFHWTO*-Tr=JTU^}kM((Z<*R&`e!+>f| z0+XRUT{Xk@3U7za2@oHQu&}V&wuLf(zUZ@(2@3)^C(k~4Jvaa$s2G8uq5puO_x)Tg zjCnmgu@aQoiAI<`S6MExe$qGy9pk;qN=g6LlPt&c)lI+lbZKg25|aB~_8%lR8CMcn z$$!e1F2sc{q|tp}@5cB#hQT}iXSMV!wX=w-ZW24Vsl`7rYnhB=@Xhkz(NfR#AY@m* zU~7KH;$XT*6j&;e4M40{Mi(3KVbk<^kabo9{Lj|?Y+Blv<3TTAhwR|p>d*J3LeJJ2 zptHu6GQPjhd;G>WOI&jnb=`96s&j>VVtJOZNG|Ym8x>6=t4@j=w5BwOMJqh1bXF`X6)XV>rVW(-nDCvgZy( zW?2SqI@K@xaqjd<=Qk#_ZjGDps8)u< zi66^QUJuBUGq{eeAZowim`Zf*gpFBM3_EfQR%`aXSi1cNnmL@k+D5&(jd7y4I7?HSvO?v;+J5!T`0qMHP)kUAC`CmP77=j@$0TVg zFcvDU*&cFv6u@^SCitM&K<18~k?mrrYbX;PMwywY-*M&=egb+ptGTubbzjOwp4l~9 zH+L0+^oDC4m|q~ZYG7H>7-^yAaA^8Qk}0B~QHG=8hyx}&h59ybZgHK)})L) z%Ib%{hd>l9L9dCB0N(GuPfNzNSwPIC@NKzaKUH+XM#D|4|OmP)lPtkSr`;&e;YerKo0p&OU|m88fS!}=?GGlphAm71@cN=Psg>S1!YQ@~A`1##c#4oE`jx2I%Ao3` z`XR7c5CttvPZ}s2aclelcw;Jq5NHqpq&AtMi?PH>n1^yd{@Cwd)b-<`klaaR@!}~c z&3s*X=n*#!;}L@#)(%p>Qu-{-AZM{nB4l5X1efo0<90`LE<_C;J{}R_82nw zdI;G(Y$-;WtPF7R$tZR!fSfHqGV02x0h49Sp$S5HH3!3i@KxZ2nrQdxUI|dva_h6u zZ#MBe z%flO#>3s(e^sVb^AZ3AjYYF|seQ?*Aq5w-MDs0S;;dmJRlN!%Wrh_o5&ca7j0o?`l zR)={kqzoFD*W_*=`+BmVLg1?34SkH@KDxAZHaTtV7>yK#ON|Z~uas z&J(yocL#)qYJuW8quDLheq{Xp?Tx&>9|Asd-_`+=BxP37&#!#6x{#x#JXFrfh+hd) zKs*x+;u8R_X`*+a-~)j3fOa~OvhG)JQvs_fwXD!}pm3f?AUm|6z%3{}X{AFOKh&`m z=oNi|paxy-gl235;;YSm4f70nbaL;t4q;;uBt7yrJ`{QZ*j;C--F)5&;FtVA7?WwJ zJY2ny@xkN{0+d&+ntwsYOcVHlWA6hl=m;qqj$}!Ft7~ezq#kIvm1R9QjtP-r5LGS= z4uXL~bMNU1t@tTSqIpBo-JyJhn(_cj@$5KG^HG=BM`6PSbz}uxhw=faGwkYGc6h`Q zuDC}2`YRCM5De*xfeNWDKy8rnNcAxju+c=5IV89g*ULy#FB|d*_7eWXzq4IcV z2Vv9|Ua{g?(n$X_ueG@QFtDY;@kJv+p)Oy8y)E6obu}7^IK;DY3EjGkZM*(V^3X5$ zgL{CRurg38voeGg;U9bi5=S23z%ET8&Ga1M=mVNa zo7Al#H4p47N5_V=u-{bL-)MlGnIw>~6^XzHNF5P?gXKtOh3j!=ODZ@8z*CG5x_J^- zQe+?zEKD)jgyryy(OwCZs2oa{)1;;Y9Oo|Z3hg9H<_`4UoDV0~^7Al6QxfVIvwBB# zgbDytQY1p0v>uKr4t{<~!Eab4^wa@wG5U`_n;?JzISs+kfdn+K3KlTBwi?>PWR?8n z)wpSlzR6r~FoHNmNNSI!4e>9ut8-f0W8?0`PCyE1#4De~@94ySRtfHh%&4fin*@#K z0-guQv%%<$!~O4*0m)kKc2i{wQ&NoLfhTPO!bvck>8>K*a-hK2Lds!)#X#40`eR?u z7j%%^60*kyNB#Z-ci%1ecAu%5zHDmc4}=mL8=XW8W{&hcSn4k+`jcE6AT9imJUGa?>x^tK-#NbWl$4Eu!_;s;L!AO^}8$ zv=YmcctIq$PX%3Fg?x?xbfbaI=uv4Si;QGtc;##+qW#9-CP6xOX-0Q>_}X24o0IFW z;Xr^cem#I8#p-CByVxs-IF@8Zud)ZdfpG;PC@dOP4r#>KnZw9Pido|JoW621W0-5f z-7droG{2;)l*Ks zX;x3~JqnbTN=y3zfyIkXt^q7>%EdTuk3w8yk#X(b9XoOy zJ6S=#1MXOq;tMPZ>NbW!xuuC7mgsJnO)&?98;VoSwQG+zPWuA%9h(@9B_Ihc{368| zLP!Q{=ttKo+e5`lho{-m?~~wJ$Z>r)hkh~U7iqRsM#@$d;$JX|(M|J^TX0H!f&IeS zt1DUKTFF&ISPJ%Jg-y z?{2%zgUCEKf{QR6Vo8egTj(^#!DvjqpiSNrO5ln&z6NhHX6K&B@LCJx2f$z&LL0DH zg|Ssa#eKT8Ewua) z`gYX0$P(LeCf+E88^m&H0Ow*dedZq@P!jmK!v46S#Tq$DG%FGbUXFnVW;umy19~oopVr~W$edWqi$vQL-btP=Ln0ss0+V%0DtNmsYoJ2}R=PsDSm@K!y}7eB<4Yj@ zCngN!Ys`dJI_Q6!1$*!P1ESBly4Lz3ELu=XF+}D7dM9uiION=Hgr{F0X$N!#T|3Hp zR3OVpa*C4|;?{+ra5Hf+Dbu=Pioq-J5Atap|cX<^7@YcZz)!xop{M2v(st7z4dDZ^fQ!sS~~^yBHsF zo=cb(I-4}?pZl6P!P{n)*~L&n)wRi#4s+UsW+@LcbT@`RZt;8pzINnsfsg;d^}L}r zoX$x4DM4Elf zT$iw|6QX+eUbMZ6JLUo;>&<#s@`Ix-w+HP=At02qQB(9YtM^%U0PTWef0bI>1+Bd)YRS zSw6`nRMp8#T2I)_a@#;oaYxGI^is3v&9Wq1)zWUjU zcv6!whvM83DRa|eC~&mReBW38pioniyh9JPtvA*c@7AtaF0T})A?Z&9tay*Bw zDL#MI)Xky>P1=Kp5?7xH2?zv()~9gbeaV4At$%ofEQxVAAcObG+x!22(0x!1z2VfW zV*OH1Ch-9HnoiJ$qL!eB4F7WQ+qWugVarjUp$I2022@T`@UFJX_Mx4ijGM%IH1xe~mGU!t0jHN? zSm7)>wdNrf5u#NZ1WMHpjWhxR3%<<}9KQu!CnA*RklsR%b*Qn3%|~&75q9I`EvhRt zVsJoAFaF*Q$r7>>XW?U!M~Xi5|3XiQYLS@o+#TLouF3l1v?0MANOUkj8PYvqg(6T# zmtT!Ss>%8%suKxM=IdU-70WOvnJPa7A5u8<;D4&ezma|orP|Sl>RT!70|kvi?`^5r zuJ2y8{Mn#sfV(1DPZUJv*_n`NoaYu~<$(a;L;qH0cqWe|4k;Z9CMup%SHv z_If?zLnAtIumfAaxbnap*yc~c3)xNyS%9gKQcE_9j*bo+fhS@3+W(c`P`m?iQWqou zT@~XsJYOIdtszS+0GelcHnLHVsp~}6%wf!egMtvantLZ~Yx!Zcm_#ovM3nn?UVwiWS`2a?p`SXrJLcw(q$){b zGylUMZ)mc>bSCc5I5`vXqMEHa?p(56+#mU_K{BjTolw`+HqLmyzdXcYx9d^VBarAD zr&hAtc-mR>eK;N%WPRqF#*3#@opx^-+MRPzCOc?U`D31ngVyqPz4=}jCa-#$Qg1^% zI}#)av~hQX!;J!Usk%Qco2`6l%gqV8%QHZVltPJYnQkW6)(jkw39kvD|Fo?SQ zqndWklsZ{^XJZ1~SHwzu7#kfKQA;rh5-=07$$?2kYG?Okxq_^W;4jR)j{%8mS6teZ zW|VUGa?@_mFtNj|$uL!ZcwKPHQ)Y%e+bW;_^$wuw3yO8*s*l?{aQ#cX4@!KhD{I^* z=_C7+TOf+geZ{TLa9(+Gp!xe3GS&Br+UjV)gT|9nS)a3({Z7BFm5T1rCVODNHv3O1 z%9MMqn&EljUr2RZFmRbSQZO$&Pwc#wQ*Xm1b52%h(3FtqLxpV?)(ue;qV<7>`S{6( zCuaTmh0i*~T#x?avG*Ku-zPGx@t=UUV4}(4b3n*>&uRZYve9Uf(3^xwD@s14x@;d! zld5}Y_&3{6xpvKMTO|6lV7m{(a6|2q9^eX`E*AL55qwUC+|HgrS~eR>d!x^=2}r_> zoGu`qF6F-=6>Hwlsm#q*NyO$Yov~6PB6s@ z*ji*>40TmN7f(V~jGCk<87jsA5Jd7PP{_95&-*GFJq@m4N7Aq0n-Y z-{HXZL!bpXe@ab6^WopxDgo751$<3NYdioE3dxrNBd>!-GUqYM{t$TLF?muDt}~j% z0CpFG*5yF8OfQNJnY0VgUgUWH^~Q zTal{KcI4Y`mQAgE+8%T$2;icU%*!^-~r$Q@vK&1q`J)}bM$w*$D2Am{deufMWB!0NuIpD=pkDDRx+Ua@Z0bfx6SVDsND$D#cUMw&-xqm^f;K- z8TLz$TvDCM6&X7#ST6Y_?_R zY~81EEUmsbHp6w4=XB#Jn?~cz9gWtux`EL=fB)W))=5&*edD}*8wV?g`w2|lpv!&yEAS#KBCq9+OphJi+IZg>mR@%AmrW#W0BafE$OI_PWc0GyQM zsWjW+frNwv%f*2cUp}hVRZ%$#;|)u!w^4$a42uIy+Z1yF!Z04hM%Tk7>@R;e{8q^d z46v8IcI@` z_xTtI-`UqpXGp;}clxP$6N-BzwIep}{S+W7qO%~TUTUgu->((e_KBVVftJ~G@90vF z?QeV$U&!QL#I6f-hT7q48UyD<{Yc16yDw_PV$6{N0#06ANSn>kx860iIq>-6Tls~E z{;8j-XdJ(W1;vm~RwEAVzu$6mv-%pxy2VUiU*DNu$y=va3v|qu;|^i7xq0Kw)LW%l z$@!Hw>%Q9U(IM7+e%Q9H@A-azzPScMU49$})IAX7Oh7ShvdC%%ZOQU>-##74ej1ya z0y2tNo`2|?6J|%;FVlf%8fklcg!Mr|fpSgctrZFi3Vm&be7KRw=E1W%o@z>K5u_X( z44vfBCn?-xU2MJS7!wBtbtgo_L?_~A(j>h} zLO}Iqu{=3mcKi1G>|3{PrGkJ0PO@9^hJ!(s_0QV8$u%mhsmP!(z5aD;7aai!Dp8s7uT1@ zs3J_?tpdh+ijGYX%8YAQ?4zACZP4L*c$=f1!yac0ajeSnr!EE9``(7O)m;KMw;7_z6mI4a($pe zCNvl~wq6aP5F{wJ=X4YH3Ks~$Czh8|R{R{Bh5C^TFFU98LuloyOpy-7VaSSAngWz@ z2BclY3a|M+CQAqc_Ftr4Pj72$d*-NzHY&~_nevcvys==(R$yC7he-&|ejMUh9&~Xq z7L-ED?ub1J*ory0Aw|zYVZdyZPw{RiXIOCVkstCC*OKeYHGBz_5YQVCP`%}fasZt9 zR-i(`CRgI6T*7`~FUD~Y^#%XKL^TgiZ?Gab%2ogu``rm!6)D}xUFPZKWr6>6&LK_3 zKq=lEbEdG?;CM)NMY-Ih_%puOFZ3v%>P+bb1PFmzCsquKdwhTEdqB*@E~5Nn7-?x~hC!YUrEV|NzXv@maaX7_Bkb?a=jWLn zmYIB@UCa%4oj_IaJ=o+p@ygNU*B##9ue9XAykgr{FdzIf5Gx2YE0Omr7`5o>4=|{@ zy1K$rl@ule$Y#ho*J)2vUw=Pf17-3lAk{$&WIieBu+oL_OXYI!%f}ke`UZ<0%|%GV zc#m{tz$U;IujD)db5dMa$3ZUr@z6ojJ>Mzu(fPF~hvAJ^pF@4@D_1g@U1M}NVIviU zF0VHcqlbcUG6RU1VjI;oG)^wc``UK&haI}iG0eaqbqMynr^wJmXyK1^nYWo-22pQk zPMtct{SX4;_9P;LVfv|ogx#ZSe9cD_cB6ME7Y&ek%ANi^(8>q;stOmxfyfVh53@5W zh$bd<1|=vZJJKS8sRm(R0XWoOjQrRX5FA_qIOS=A4iBRSXn=WZdT|f@?bAVbEV(cI z*ucp|O`{=I>`0V8vN?eGpCEdkmW%f{X@pc$#0|ydph!#7YeA7U1U#w)rgh3|p>zT8 z=@g-`E6+8l=F4WvtVbr{c#2T_P3wzK&+} zv5%C}8GyhEjwM`hJO2g&_*%)|1_}z8&YolK@pzRfw+CAe6PxcNyO+Ylj{L|{{88NS zalqxDTmJ$SZ7>E!bRp9rwDH(_EiEh5#{@5864U9jXySQJl~49=@kD%L^cka6BjP<;07#1l$cZO=S4t~PZ%UukY<8W zL)DJCSb?CAfFG4Se~yiY194)}bx~0t+lxXW(w&k| zL6b!r7j_{>505IiY!GJJs5yTVCrABt2U*Ix!ny!LKUh3%NbiAmj$Tge;`_=bDpWhw_kxppuyk+JL;5%{(S< z0N;WH_1`|WBiQBoX9@r6%B2wxGoC+#0>Oc>L~n>sT+*oS+bZpy1Q$l z%Ph~qM$(P?n4-!E(g&;Fi48Nyxpz8sN`@Ym&!0c9&^nI?jXfqTCMFmuS~!6wV2dZy z3}TAQFHOmIXs?8VM93A0^5{5J#jKLpM@h_1wzJY6$## zH;&d7c^eSV`N{I)RL^ZII`lpq27koIJcX^4DTa`P{H3VVJ8^JOF$A=kDrl^`psV3O zj@)P|Iwl0?xTA{;4mh&r2H=FF+;gOQUDd-epepg}H+HGVE9&f|x_u!eAh8TNjKEkA z;=hZaGbS<>Tj4U-x4J{Hs@p7)d)GX}>(C)fyzkE}>De-aFSc)GIV&`UA%2r|9pKc> z-#(4Q@kn<98|WbBEd)8;zza?}&J+O%9-RdNF=!PEAokakwd<`6>veHIBe!0Xw&Vqlbny z?xM71s@e)-H*eZh4n}4r6x7sj!*4P7!b}9xp2B-i!Wk;B@in3FBJt)jfGJM9wR798 z1Ai^=rvOYi%?1Ytiyim)?q9 z)6@&U->*kE9$pO|4ACAB1%en+=kQ{U&0Vxpz!^M-J3F2Qgp-4`Dj<60yfR+u{vi>< zARR&B*ok%92)QBj1pZKM(D9<_v_~L1I+|EE3~}3(H;w7&?L9TFj+9dOe3!gr$r3d* z33vl^4Gj^bxjoscaT_#ljL$&(ewW*Fu=)*ABrsZ{y_hh>r zyr=OE(#cLVl6Yu*5qLS70^fl~RE;n|9j6qwj8_wZEt6&BlPs-TQg?4tzEA>^w&X z<94BbjA9{gtZWRR)4*Z3xzA?zo!k`GG^$@y3qEyQNyZU=<3K?TboCtN? zhWq_>?bZ))=n`p0lk~bIU&CKR=M3FV%IS;0^z`+kSCx~n53M{jKQcakA$Bbs_M(x9 z9=x9S+3t|9urN3ukQS(IpkaIa{ATD#msPG4sNpi)8^n8bq6R>VhKuQhWD7GckM&)? z28Rk+8gV{u9ZEO{Ccg}RTuMWugHkSV;R=uO>JoP!&1U@FyW0-0C^Nz$o{)GbDKEvm zkT0ukbWv7ep$b1ge@9>6475LrN=jw8f+bClHL|}P`dsg-^6=3kxa%A;6^`fS=FTq^ zmO!bCWh^GNZYB;p1m2y!dUY0_EFr7wkgV~{#zD{B-M#YUG)@|!KuQF#IO-IrhuQ^w z2a{LC^Yo{{Hei#dK#5y@{>)Q(X!JT%4$H+sDSx`Qb>fRf3xub#B2qm7^iB!(T8jAG zu)#m4wXIDM-}+<0r;v;D+^~z_wQ%~{XNc)j^F{Nnem-)8cu#Cg6Jw*^&3N0O76Tg_ zs760nFOqD~=D)B)3jhzMt1h~R*{OD}~KkRRypT_0En1loK z%k7*$8k24~?&b0fQRRD+W(wPV6~yWXqX%(rOaONMsr<=VDf=i! zH9!cwLz89eu8)2x*+(O@j;h#~?a4Muk-uF3cz$a`a$V{J_%$4%AON%mKt;ZcWkspf1d>5Olp2-r5QjG#XupwZfq`lG+`U3EX>U_xbu(9#k zjd#IIAO2L`P6vkKs#Tq#$HwCIyr5T`xHfV7-GX##Dqbw>`E2J$U^M~6P>PA7s6UhS zj??k4_TqL9y0Ue%C*noqt6=h6)-Zea?!B~0y}i(zhYaZO%-<@EUL_+l%g0{c%f?20 zCcolfxuS?p$H0L7iW)46U{Q1(3@B7}9v&VLtX7A(xVd3#DaPBe9B07tnGDEzB=9}O9401D zR4CIZ#j|WX5C$9rCI#?XRb1GklbhCGH-YjX+^cFyU<}8+-DVBTu)?#Us-MdANZ}WXH6>UmrWidU>A6-#5B)e5;tt#Y}MQjvBc3}_>xh|ZnrBIq+6(JY)BtW@9 zkKYGNA3tUnIy}gNpE`{)J*bz`0)Um2^K3wT++VC@wpnTyFkPf$ryB4)1E6tb#*{Uo zd>z0)AlD|>FS_Fk_LCW>eOWOv0vPfbjL#xJ@(7$Q<=5ZQ`HnmO;_Z)3)7aT7ptwnc ztD5oh)T?BsW@BjWn%l*ga$zuZ7YdO9LVQh>Kicp%yV}YVpF2oyz zMJ&)a$U$Mjnea4b4WzU?D9qtZIK_Ilr(2XwCQTkgR&pA-(WMj9TM20I=cet(8yr8p zGt;!U30Qcr@{q!o!Y@Z+@2`2WIO|eJ7JDGNOctM!xAs;^iHYD3)NvYMI^1SDmMNAf z*b-TR2#4wD>|7u?&l#BCENsaD4CKTy3`YyXyq1<0{6~v>k9F~gQQ3C%_qTRTp@473 z{ClQ{s4-%}78G>+T&}F5Qisv1IXOASH8t7=)rp}l_l2h-GsP!6TLBf#^{5XOD5fz} zD-c5H7oP7zs_MRJ^YhH>&hhbowEM_sI~=p%E>9wo;^xhpt{EHbqoF^>en4@CV`GBN zX4*f5ezteMd-uI@?}naAL!|>h>a+cR9W}o0@Ekiu)CPM#Tp(+#x7Teci$cDlw$Qw3 zlG$dr*^E^k-Iy@YyNKOwr&*lt$`Rv=R@|c3ufH~4Kfa2-OXAUbt@eEm z4xyk9(WpCj15UH~*=3r0>@-y)3Ht9F60;NbWFH;7btj+7zU;OeqieVC-Lw1eL2%Ee zemd%%QXiHHRN&*aW=FY3Q5mqci~QV>(vs&!DeEOTxiR2sX{&8RsgButoNO3)sfRk? z!Gn!XDbmJ6;H*%E3){_aj+@@Be!5?Gy}j|0xp{H~I-<^>nrS3e`)Ws$zUGH}_3{hU zGOL_Q!ktWXm7Je&=nFKb4)Er&H)V4e3jK7l(Wy#S9XgQ2DQ^?>B`f97_UHTWX=DTX zBwPkb#Bs>&R6uPEIVbQ4!<_#}y7eewcL) zl4P~A`@t{gfQm?yas`prlXdEsdOPY$l(RSt{DOkmqGMukwfoP{HeV4tt#W(#?mCcC@sP4u-eZ4gB5LLvXSpssLXHVV5{}>q~5>)yB=vQW3;MVP~ z0VIzyi2z7O4Jh0)`xZq2T=SSIY8q4zCSy%T&94E-5%3GqhIH%1wX>xAB@+sT~Xu_%N z>}3wv(%Guc-H%E@ z7x)4B>V!mR_CywN*)+bj~KpzQjMk5MzV*!dL(!!%JE3QqonVH@) z3W}4tYZ$h$5(J0Fa0qs)h4_ zL`4ivV<{^Ar#N~Q6&3xD5qku62$R;9rLCh=255^S_|Rx>P_ROYNkkW=F@pl5WIKf1 zt-}dhT9!N~A7AMbtPKQ2Bd~>h6L6VmHm4jh%OzIs)79S$1DEs!)UY%V0QEE^ttayH zm6-2e!f}^yOcMzKYnSk%MH7*227JB(zlY9J!pFZ-h9%Gt3V4tISpqcx(g5dlOAd{o z?xn03X{c0z29d*&!kZjB7%+*LNIy>^`>1pGCa=K`7AAy^H!4A}D)`iq{g2b{B=NZ@ zDH!KL7t#rt2+)3VEtWgy{)lK%JgC>+Etu;RS~j%yXIj>gT(NGi+#hi*iZ17%eaCFw zV<6T`uKxPGxUqTXmo;FgpgA^JhdoTPOIFfm+dK+jY8|+NJw;OLfZM1U8b=o?N!&rF10>CC4W+ysH zL8veS>wq{?0{!=*VJn5h^W-tK5|hIk7OJY6l3N5}PYF?Hb{RJVPS>F60IrTO($QuDgvmspz*mkMN$5+>wQ4u`j z+O3Qi-K2S^UkL}gWaNQNqfU%=p9_(=KVzR(-;rgz{pmZY|!;iY9@ zE<54k<)O<0Z{ZZxGFlyc&;}R_eZiAhRBX^6ORhl-4c#Ne^Gx#QZ@YWS<+E9@R973c@ZCuH8@=0vrhdS znqD~kK!T`!*Np1R-y(n&QHmk-1#=Okuxp^R^Eiq{LV1{;ms{K?cQ}fWu1!dp$EB?8kK?hwGjuUp0TrVOltd=(o zy%=^$Vuj4jV_h@W>)@>d=|*vF*iO`8pal2liGkN>xmk8uH_uC56LayRKR}TAw)Hi& zwSK5J$Av_=TPUA`QC0I+TtYsBQ5^6<#`Sjth?EG8x4S;w@9OC}iMUKYc#BquzqHBB z@%VVfQ*UARQRI3VMCWEs&^=Pm)6&FQPu;Pw9fAC6EHJfgKn|w zjpUaRN^nb9kIig7S_c~4N=3>rR(Qyx(=FYt{Gr``m8$pL8(+Qp?rG4nH`#s=k}Whu<692g#lRe92`U%E^M_V=k#vf9w(6)Nv-OOf?dIiQ zBl#BdaAH+suV-QnKFo3V`@9#Y#yP3Tk2vsi+&*w)mpB0lWM^mDX$s49f|-vLtpEV! zo8|FW#Xih<`#$APMvJ13_2VDtFxZe;wM42C)Hr0MGN}i}vX74Pm?thDaMo=LxVlck-f!bK1@9RSt6gxxIJq&hZ&A4|{guz)Id4?uS?eq~(Jt zXY*k;C+avsIFTv@ADP{hRkweoro(%=>$*$IS3eBSaLu}RXR5Cc5(>?69#i>*R*$J( zvlWm;{EBsvFJ>~1Z)ydIR0Ve93GY>fRy0PN!Kx7Y($stN;p00{+~d$cNp}Y)5WQhL z_=Ny8V?g+65F8L$IwFmd3`ss?-DdXumuBTwDP0otxQgv57%P|k$z!<-@XBKOTR~t` zA1PVa{ma_uKW^_`t>C{qatX}s(9+pB*UVAL{SwCa@&$a?R60`PEdtk|I$Q_*4(N;@ zNa|Pw{UWy_zYjaM>=a6G!1NVtZB(#Oasd=>B=i9-vGCV@=l7p-6pP2+OMm&#yQ@}o zH1vMF>~$IM7d}tV(a~vi60>9||Cs=0rrC}!VcUVbwLYnBqkrz_-8(>OE#N`k016Tc z$YlsOaXO1KVsvJww$tlPvnPU~K-kDpACxbu-+MTrJ2q{h$kBMMM5Z;S74z7-F@1+7 z!$8cR(ls(q-gX|yTt7$+ZzJm!hrb+F6HNnom^!}UI){3!*gT*rqfq-Q?M{-?m8gT7 zS3d(*yImuy^Ev1DC4=#5@d{U_Mv!k?W2^fyi8qN~E?2KA@G2Te@p;McE}sA~7TeBM=MRQain$lx+OrY`CySJ9qAc;9>3Ti9=nS zPub@|Co{f44I1v-~FDA|m)AaLvOc+5xzE zc&6a`U_PBMYHjA-FGsOB;1?lx@aaR%@VQx#=U|A9(UZ}@@Q0sU9Blw-n1Te&ad{Ks z$_en6O-U)=#=#}imHM1}2f}OC_k4-TGfK&-$zowpIRJt{tMo8Y8Jq&>ZGf#QdeNd8 zr;ea#lo%=c{bLPHUAhWjH?w?#@NeY>3bp*xe0?BLW(CGV`Nl&RipCON#m-|0+eV9_ z9aK@P`+L5QiYnnTpfWZY%>bci45UjV9J^lPbv#5zot!9UY_X!?&T`o-XFC$mVUe{L?Y83c?dtgb@D>;#4sSbz*! z&YDrdAO&5o&p?ep5;)+Y*J*z$H9SBdDT!GHNGA1-iZV{An&@o#Sa0(OLo+Am<#nwS z0T9Bp7)sDA}l2t3^>m-o-Mi|J>l zKwN>mAwzmV)+q#{g212Dk zlNmrSLqu&3jS5iLa@5fjEdSGgJ=mHGYHcB^gV4JeeVWHWO9|?!AaL@eHKlQC%m@1@ z;N;z?0_xjTKlb+r(+n(ZNf4hv_ejQOTAO%dNjHjdQg^#0*W#4I`{)C`(E`LjN@jmO zf<$Q3+a3oFEQOaQ_!sE(01wID22{Yg-|@hx4MS4{-W}~b|-pZ{HSfksto*){&2j~ zc>V5O0PYl7J_L@PR} zA3k^sL4Ih)O!}RS4%$Ug?gtnmY~V;gK}HTaTcB=pvb#o76F7R8@%|t7-aMMizik`6 z42@KzC=`()iXOK`NJsB%VD(=0B;r>%z<ppC5M`UNy;HVu>ft4 zPG`dQjy`8}q>gXy=z5y#U*ANb>Eo6d0xIK&ll(t0-NWaBtz~8EhUxtaKdpNY zTkkSZEa=tF4XtXT9dvSh2OSj*oY4`K-BqP9ucoSLMtj+n2;x71bq)cQ*;$Ql=+O}%n8m+a1nsBEF~X}aSjMBU@2 zL3~h-gDaF8S?{sg{yO~+7hwEM-ZWlI+&tj9Y0PntmEyv2lxoqR>H$YOdcCjTzu!r^ zpNU}>S_AKdgip2yXDK9YTyOLA{M+Wj`>pd&kHe759&wlghP>D^tD;|SrEiF(UpA#&ban{z)Kug|mG#IJ#O|iUzA9 zB5c`3M3$|^-!$@fQum?X{`>2)pMKaM%$G3QU~QgVuHX6y|8C*2{PQI{{a4)h{~Waa z-#&-`*EsQ0%k3J5LO6;0zJ6GzMp>HMq5(6JCZ&zzhtSn(mCvq${Sl=bth+zz?mlL?h zu7l71{HfLtAJK&lTvVQbK^I>tvTN5axHWL$i!j2v?36Q^yh$*mW*AUR>BUl*dRLB&O{O2s6%?#EK zIcWJ(0CpQh!Ca!aSCHrBdH`Ioq?^is!Kn&cXIcN?K;3I0zK|4~H!(c%ZguR4 zZUvITdas~aCQW^`DqNE|X_Hd06 z04@r!hQCkk&IAqyk$ewPZ=AoFX7~VqWMgX^Ukdl{xXiOwzi58YPcXu*xjGxvbF7d8qtg&{Qe=^r<0?wJK2kq0dJK=-cZ_5N11me z@!f%yj6yEpZ|c2$gx{mEAkYQ1?TkNDW^_!{aicO+=yv4(>Gb zd{Khl04n%=DMQ3q0@P4Q68RD(8DXZlgyXL>-e3}vDiVzc3FyE8i(8=MFp@7I1<^UU z)eC=*A``V{iXtp4_U9uFR3`|~i94=TiAU2%iCLj35_Fz&ePCH5MM65^*~F+_|Ow!iE{6-`WGhQUAqbbC#xT%hfuKy{$6lYrweXy_;U zMZZX?c(7*;xlB4arPpq^AITq?diqh@Jm(|;R5yM!p$6v0Fc{r9HWQd|+xFW2($IMG z8?X^YxFF5|F!qO@o#==@a3Vw0-6GY#nPAsp~ZJb=<&&Qk?2}lz&cXW z+hU1+6#R>Q+2u#)2?`QP94emes!6EQ$!82IKb=fWx^z|vBvM5c*b}4L2k{0%4CjEGCh}W?_bm#&QBf1(wxHvWLcKNE;HeL8-w`NbJ z7w5BALJR@)kcBc>1RrP-+$0@$>`xds*$m?&Qve2&7np!kfzDx=_5=g=1dIIxbV6Jk zpU@Eoyq0O|>cd~5p-{ozA>6iP*2|EI9pww5#|4_90ky_^Od81Z8FX=Isu4!ZD?3V% zljXDIR}QengewEB^p*<2XH97n9e(FW*u3V&*z-tRq`_AC7>HQlc!8gQ6>Arl1jvqr zfMC0lhr-_;>PAg`T%6zGM);I~W+Yx+fIUn`%jEsaWM_cX{d@PN824*eM;a4M5G{UUCzQl*cmD+gXcniiLu$q4a zxav~0etqqp3SU<7A2Ha}sNS$?Yv7a>^An*0cm`ZtQ0>Sp%(XEspGZ;y99Kl3_U`dL zbB)7=j*QEkbCzltu&-UaRup0HWIc#wiK>I*mUHv+_>bv0jm^XX3a#xG&$;noVPPMP z`GXtpLb=8RvOC@n1wIfBx%bVpUv|eFD!mM#pzXb~v|vY-*E#;FJWVfma;gX&D>pPX zT~s|Zg_E-cKEnI^cILx>j1r$QfcEqEe~4rELU_2OC3C3bVtBU6?brRJW46j9kYld@&4pu&TgMY9^IV7|GBOA3eyqm98)* zsFcoNNJ~K}B!KT*QByJuA*4twJCxjtaR3Q$NPGxhY>?;9!kc3^rabNTXmV}x?cWeSo-nWjO$5%lxur3-OC%lW~Sq7P!BQBel?A(9m5?7eozq5 z@Y`0s*ZkIPd~o^3B@n`|d>;7mqZDiJJw}?Kqf=@lqOOTZh-o(NJd@VnHx`t$GY(A> z0uGgOnzn(H3<^J|+#HLV2A307-0%}(_Wr~v*n%>sMgIvtX35&GLTAQn4qAU?T>=&5 z*&w3SY#KAE!Q8FHR2`!%ZTOVT zY3=NUb_?hTsqi4W_qPMq$1Us4kRu{@Z(Uw)ZcJ<}!L%shCV!FL&0;Z037bI>?t<9W zkX0%|!BcW~4WuMy1u+v`tG0g*i8(@xp!C-Ck?gGRAY+p4@{MI0gM%!2YfrLRBy@pF zKC2N2yz|p6oIpS@bly%R>$FKRM@u*vP&~xlUZQwmI<}S5)djs#%H}JJ6o!?KUBZI= z08RdL%4}=(5Aqj)^H&kx`hv-`yDp9Na`pm;LoY_hHp=}4D5YShn3>Dl zMbDt&urFUWn2>ZTV7fz&@whxmeNXEMHuq^Q?UGtMRWP%N(`FcSKzm@ZQ8o34z9CLE zs)ny=qVhUZR8++OLui_sIMY!l(W%fLUc_I+h=eNw8>RJjsKcu5hj*pw2M^wl8lb3ngitUnnoXb@ceJ2^l*Q4x!d`X1XH&XKBX^T>hoXjUXD1V;9A8s&$HLPayXRDr!z-M`gQ_(aR8AMGIjxW?|4%E-Y9PFE4QJ>=nvjF z5H6y2Twq|rNWinJ-^bsNL$idyvNLig#DXSyj|lK!kb_g1feR~F{nNUPWA9)o-{3!y zj%(g*0P$$`C~67PSt{Dm=T1NY2{P_O+apJgM2_&?n_qvIUuwyc*<RIWtdir~t_GTkpAlvqkVC0t-8+Rpd;OlNtY^ifj z?Y@zHC~ZSi{hO5?x!W?&nx*N@jz3#xF-wzmL1S;wXph1@c7t_(8tT3E0emji_jFg3 zc-H)Scd#b2aoX$4xerft8yU%kUn&{M3^;q%(IK_O%#GP=)|lcKm>%^eXOj8xZn4a1 z1t^@6-k9imbSsDrL~~+_B%(bV?m#_mZXE*}+M z_uC7yr78=-OBHoWzMU0QCMUJZz%``abTKcO@N3?@+<1M{dz4%3n>@>^F$M4Px(_Fn zb2>NfOgYtk^&FHzck?U3_u}2t8i2+-V|Pd!)_x49=EGosLqIm#|^O}f7U0LP_dvBP$VFCv`Av z=q{5Qk-<_o*noSNtOV1n#Pgt~{~i?(5D;>atP$2i=Gg-gIldY0({*+V z2%5swl(d~)DW15vm8Iymtq&%{uqvY2=K~C8~riuc!uDF&vj`L zO`N*{5TSrvR6B3hthQ~QEUC8xgvzhHJ_1b^8kNl>rUuXRE1R0veuue0ML=aqgfpl2 zoiM*qpCzqxLM5=x-bP|#R4x%8oiQ>G%Oatv7u@Tc?)2v;K7j$2un2pH!x!U1Spjd z;68xx1D~HQ_;UFwIM=k|fz<=YACJ}qVTAr!JZk#8n7MbWKVhl2I;zXASp@U3rI_C_ zMX)6xegnKN_z_vV!fSK|I?=6;FQkk3HE>h0FeHt^p$x?jn|Z$5G;&0s5f?Zowx}7i zL?c_1nlqQDkFGDIh!(UUwUA8PoR>EEMn*1LtMz>$cnUEkr*@&(f0nRO$fM8p-po~%M-3QvadG39 zawlX1CjsEnbrWzFTe$Fb~IlM|HHV#?Lk&6+)0a5t00B%J==-0$w zixmq?^u(&ZQbo;QC^R3z8W6yN+wG)%p}b~m)~q3G4cL$X=wZ7Rs9*9c)x7<@`xmMG zrI~m&X)H%D4EZSFDZ=^rI)*Ub z3Po=$pB_UGMaUq|0SYm}--dmhCOo;owTOQ|Dfjmq{TGnfQ(y^Jh;Qv~00ugl4K_&l z8*1wHGPZH~;H)~Va=GNygFPPKpxGjl83J0F zy12C{3+>YMB&J&n?!gQbx&)TU4QJ5$ut<893fxLBeS?f?x+?LR$VCqlC~zYdb}#!4 zPAG4FVYFYO$V{GFf$$}aHSl*bZe8mUh)rACc6NpM*U~=kby30xejIK?t>SqaEig$M z;SEP7-k8p!eCW4lQlme$WQnDh@inT1lpKm$1u6vr5K>_W#dBEe77l{`Wbv_P>;4jb zV-L|lmLQqe7&X6iI;iLEe6EtOS)t9yM;PbgL^A&(e5hdfV??Qa(#sU%8RYE_Yzg-} z=R`LA&r1gd%4$GI(U_+F3bItPSqw|}J}UvW)(+aXSh^zF)8N{KGOiEaFMtJ$t(V69 z8I~bRGhF$gIPl&00j#((FDtA`f=DTNh>AU+y92Jx6^wqjpWD~ZKZi=CDq z0N(!n$Du=;-`TPj9hAE#5;(=vS(P#QnU3eiq~2{;={RhBz(7OG$P{2D&HNCYaE5Oi ze02N(!ZaL<%ICKtLkIknVYVCgWGcY1Nw)C&Ob}(a-xRy=cyHwAC8hl(asj1VI@Ed2 z*&0U8qKk>Cd#JKm+ji+362gLd@iN({GxN3^dhP=tN4-qk%m$=r`pKlnj*6{Vz=#h9 zhu)|A$c{kNydpRaA)h?vXjb&2-jW}N{s4Viv5yD`?W&u)B?yU=^_mXqt=Jvf|L@gk>)}e-LSqg>2{+yDfOg+d9W%3t+}^<-+&L8*Lzjc+4Hh z4Uw2g)#JzSpNsu7S<})oy!T(qh|vF%I~XSY|9`0Uf8;~32t*-pAvkz}tTHXtxILlJ!R~F9ZtsT z{nvas`Q`ui4-NZS`&B7WFVfr=5gqqKI`KV=@4H6J4qRV-{Q{rzV$o)3ca&QKqR+SS z-(Nf5dDa@(x?*a(e@AR1hk-cHM*Ak==^xjNhot&mEB*6hduGp=J@s@`*c?80o1UX9 zWQ5mx{zn!e}A-uw^a&H#((^Q_lp!-?pHc!eC-Mep3-gUM;758-@%p(-yLx}`ZK$G zNdx|sp$L(l?{db0ck)(~dQ z!@YSzTT-qAvWDRiM)_PY%)4Mrqb{Ug!6=2@aSwgc>(1pEVAwe5|Y<{W zfGC38NtH}weTIBN2qg#m5Ta_QL4>51_3TnM4m$RB@6F0JZ)2{Si+*%Ape3eP>?NJw z-=#8aq1&>HmFHdhw_&Y8NG95~@hIDMGU{>FBH`=dcsSbEwwpeLH=<~2FU8S#;vSFj zRs>feTMjta>Vy{+;KcI?(C`%(z$px+4Brg(AOHR7I2i;HGkEJw0~Q8f%gT^z64c&b znw|3YTD1VF4fw@HZso# z@>gH?7M_fn6kD1%KzsYy09~hgqj0T3OiP!-?~k4mTo;~waU}}^`zPTi$if!vO~xA{ zX&bm)QW=B}|2!rYI{@9#H7n=QzY_*wYz!eRz+*ju!lMILAWtshK7r?&Xj!;&<>4Mq z1`edjP0z(1{2q~}YzGJ?@ahmLL_z4ZjA)hMx#;b~4Q-Ab$;#!OPwL%W%!w6`u8nM( z(0Hx}C5ZU%fE_7O`ryYWCt&sY#T9S&R>T4&OhMyr-V{Ng7m3mU$dZQuobs#@u;`{c zgK|v|w>VIOan29$L~ikilLynSP2}r4^~yJKYJgP&sa5zoZ?e- z7!17G8&tTWJ*M^%tI!9Y2$L{dV_^zL z`>Uu*hL7uFvAp(e8J5$~WP^*cZtsmBh@e9QsfglyT>E*i)U9lKXsT-21d?K47yl8l z5IJT#rBtIGj&q9zVUorMXbH)rCa#|x6+pivuVcve_8nyB=Jqi2D1Qai9g+`i%X3)2 zq7oA2=`vV<^v~d=qKO%RPEXA3$*Pfev57Y-zlgQBK2>Mp;86Jd<4C@{c=eF@C({~1 zY5Ft#4u*^+B9v`l<>XNzRg`QV2EsZ)d!0F@w(l4dbBdFir3VA5Z)E`1%6|lr&jh5m3 zKX<9`rIxVLmbbo_FgGC49i?o&%|AQ?GiOSZAPwd?4TD!^6VRu|eh^7~0b)1m)5SP{yCw-@ap+@EH#qNJLt8Rf#PaU`tY1$l#U^KAhe z^TPIKf4vM+eSwZ6xY0C|$HExun+1>w;4|GhDtg60e6eN09Ha^Fx(FuaV-BfDxiM(O zNi&Vdy2scHs3XKv4Nf^Lu&wP)7?felIMw6h z$dd*Klfg`fS5g$T0Kj-5?GhOC>UPr`0x(J_z0_@!#bg^qfef5N>VkAJY)dsc0pvn& zpPyf4rD8?5UhGd4yNOm0?q6CuF;&1c2NM%~5NuM$8`lCfX#8$dc8^=1Gw(5n2T&A( zN_WdevzM5&;0TA`&y)1G+jMId$V^(Ieg+%YC_SO9N_X{YR>l4A&S90p2Xi`UuZ|p9 ziNZ>9-^x%#Lt{6S4vJ>5z=lZchpz-8WrHd=Ywe-)$l`%jXR8C@-fEg%TV!6R@Hjd7>{1!v%>pPKd;xyif)bTzoH3ugC!t=8!E}ZI`B5(T zL#(cpPX7%|iFi%VP)6HC2(oFcTQ?a*_e}{K=26fCFx6ASlj(S#51Ul51zWL#$89I) z-6lsJtNViFrUQ7#Vr3m4Mg%neh897SEREoBhUZ4MJczs?ml5VY=rN6sv6q0I6x?JYq3vt-K6@}!KrkZgj z5_yRML#7d&iHXU*=}|b=4!^EmVe7c)ibOeO;ee@%toc;7TnzT~R)KW-q-R%A-tlte zQ!s$VRSx$E)3LBU*x+(CqnedngVj&oa2J_of3*=o=?B;GOI zk~rzf2_3pEog146)`{^%le<1SEegIHZmHbsb*&}n_bE^a7aa)INiw{g`}JM|sDhQ` ziv3n(vQMK#>;1Uca~#0FV`F36&`N(lWmJ-zyd`PVT_+2V%&l*gbCRDsWP=tcOxc`} zf5kvbin!ZFIG9M2%G9k-Wa*rE(FLb3pyn+C!JeKA#0%Ryy#naO07JwuW!uyA|E22N zY?>SLp7%$0BOeW^9$FXS=8%Knlt2TFyh7h#rU>QB6pF`%)96>SgWbMH;8xH1<*1CP zch7k6dnLQ3cY|XY7>X2ApCHp!5p4?_NwIg%olo|zVyR=#*^+e0{&)bp=B=N@&ZsbZ zJ48e>F4T17m@JMjyNhNL+Q^2cDRJ9UNL)C9!nMIMi;Hsxi%ERLHL}pY4q(|Gn;JA8 z$$1hZ7`B`t;W+BorB@7~v^@p8E=|pcwmeONQN?bzt%|biyQYfg6T`#YyuAa!`A3E_^;V+WLH?j#a6kHS?3&sOd!6_gFhXmm_Q!Wsd(|I=djwN+>S8xuF*AUo9YoKjWzkN60$6VSX0xB~qnpNu&T zNNB*BKM;#r8krYd)A*O)GMnB3Y7OI!gGbNKrTWjMY~XtIgu9&<#$9kN1;B*n#{RZx z-9^DgQc|o8uyu=VYpQMjqIA#Bv{d9k5ebw9?kN~xVB5g#vTH-7$LhauZ6V0jVH06L}pOw4x2ZvV+ot0?A0#Dlpy7%kIOFxH)VZfp*(e{4> zL7RC61Zdt!9SO=rskc;;?DNnSEeHAs;b3vXiGxJ8hW}w!yx%F~lPv>tG4baVJ(F>VF_; zY#hNDzfRvbQI{sMv%h|-F5PMEDRoAn%Gbi@%tq3Fi5AIu&%_l*F4Bn7prW=kE>OyNjb=2T{!KGsL`v{R;A!L2OT}`*3ba)RUDG2odoHtP4n#%+Yz;dFLw>+= z9TZ#u2)KOqyLL(&aW!9=(?NrO{>{|iX)~gpFZun zdoQayis#%uyJD@oe6ORPT!AZ z{qLlwYfb+lLB0#3qmFUqcxH(XQ>RaN3vaWJ&36UGnt2;x(HZd1C#Y}n@D?^viZ)E4 zXfW)N%!t}FLl@AG{xfX-&W^_E%@Sjqh&KDI{GFiFFmKId8~8GtE32FC-;V4O`E6!k z&*5|Eq@J^^P@}^56QG)seyPFGx$c93llVP-J}xeXA;kU9Bt&K7_sG%e4RT+Wi072{ zUVCbR1@WK1)xx3^CgZI$*v@U9ePW ztsN@<+5W)E^jW>*fy%8?n-g1xcOSOKcm(FYHP!~>e^0~=%TM!}zqJSVJz;~ijm{2dL?hdtqAQB1s=9JQR?FzOG!``>?5(!SYo z|AM=PY1h!Un9=9NFVCQj;(3Qw&La^ONPr8LF5P5laSi|*Mq1RAph8+}xnAqm?^T;6 zQzMT1TfH3(@RMRQE}uYwGynj;0yC;gN%T3OBp#oTV2hKJ2)qDh$u*Yr2$=hAyYeQq zZ0E8a@N9AaaGH-oh&-|*A;cW*-hX@z+I}(@@lc@85xnuXjLBzPnuGw7N8r;|OKmMX zL$I=7iGatC@q$ybj#lWz@(D&kDso@Yqmtp;S+wUj0N~KD4eHXR4lgB95|+2S#mn)Z zr+5kQudSEPX?s8l6@AWuKZ5@$sI}Mla%$2|2?;JkCVhZ`F6nhnmB9-wMqPxm5F!M2jHuv=Kv@Yn z8`o$}4>!2E?CCX85h>Z8Ko0tb_(Rg|QREJmlP{H?;4$HAXKymK-2#CK;7uvnGW>`v z3@lIu zV|DI(xMP-)>=w)`THeoX#l^+Ra`N={w9A!=Eqz)BQXt)+x;{%JeK^RCJK6((sf8PDBxK;y5Db_&{mR-LuXrOBB_*hj;0Jb>-<)vbA1=Vm7x0|s#T;?Ri>zFva<^id zLL9L!dk0ujVGhxJQ;b_#M10oU^;ol5vaLRt@+KN4+f;?Y3l)hoW$EXn=5H)nh!=?s zhjIoxh-3>GXkAb`m}g;1a%n(zCE!{?JX-)?bNq5NOq$yS@E%T2WNs}7u%5=bSZ>fu zumWrm-|w_mmBr0fe>YGS%)S@4Zdi80FHtpDMFvdSSQyDjT z-Q@fb2L%XsZAOB8hC2Y56vHBh;?63`f@^A_We_^|gvNWtHkrapmvS{98=Qyvg2*v& zOj-ckzs|DbAh2=OIeJ%*chO=;SN|2Wa$!-?HBx_jNEQa_kuCSkF)1Lcc)%F5Rg#(t zkKS#n!9zCeyC*NR8+j{04WP%~Y3v1PYYR`C;XQ@j&%o&Xu&)YZ7+eFiwJ~3N(am(0 zg|{p|U`$O|gwlSQf!S!602|q&8^u2qKe}{$Jx3eGUzS1k8=@>3at?);*)-5X$u<;6 zBNQr#j$|~p@BfCgoAg{DU7$H8aZbsz-I5O zbA>??CF(DEqk;9ke9v=sl5B(Tx?r+Tm9GAgW#;E~F)N|9gYNnSyi|zZjKYZq3OLKo zAZ)lQ-i;Y|c>fO`9q%&!0s2#=LO)N3Y2DG@!ZY#uVK#sRw-AyEj-&LFja$%8qKTol zk!}RsGxT&}JWu>-qCTISE06Qs0wR_{N^Abo=a;ymQHzy(v-bO*Tx*I?Ls}Jud1Cou zLqf4Tcj{@M!$*w{^pwQ?3(YPz^})o3m2s#`^(WUu3|bG~BL*L?rZX)_!Qy4m|7RAY zZ>hqQh3xI&yR_DC-fnpf(z*6w~$ zsF&zbqKw@b1KljS?wk|#J5FLrP0E`W^lSSF=PH~G#4&=U34kckB0r}fTxu!4C3r-D zDk}p5oO7tO5fu;d*X>!x6&4)Qo4IYKv$?RB|FNEGlM{YYQBo0bRPLS0&H~UO$uKX{ z9P><$!GEhbRgtpe&WnWuIULI;p@?f`kCi%w_T0pyTL2J@ zX2J_iuzGec?@vzSfNwF2Nqu2lR-d7e)U2s+`b7c__^LoH_RRIZwKV&Zp|uUpL3o+F zPaOZ2L=fYjwDd!F0ZutaLsRoa>P>eaGeLQ@75M95zA5QIjB%3i08UPJ>%#2pe-I|q zyjQ+@5|-D~EDQ6%ZEH_bpEZdM?-d6pCaU;qAakeD5pVw`nOPLa${lR`pYKW+BVRBF zW*hC=@f|M`W}t3NT8fY;Z~bsCquEqmg2n?p6)pp=*_S2mRLk?9XNXq-=%#AxF-h^+ zfG)SE`J$W9UyQaFm`z{h3hvE|um(8C%UhRF%Z$4bOKJi>Xc=)3Uv2#4l8t=w=G|qj zAc?3Um^V|69o4a>vGIPJ|7oI<&vg}C%Ehf*=6qt;QWUO6^C9Ne##IYms*v?<-9{fL>mVq9Ai;#Y~UT$ntCzXc>sc zMBa};=g>Sq0zY|vzW)fu-^3h(EhgQrgD8K9$()8a7} zRHgv5?COchgRe43#b+_>q$qF2B}vK8zqoFaOcmI2)`~Y^ufP_2dwUADeC``A=3$-> z-D3;*{yMM+0FF;%2ts=47Shu@TW~KDEFCIUfaID_zd!e;;%?PGV{{V1V*Y14Rc5gCI6aJVDhs5e_jB z$XmZJIzf&g-GNPz(vrytD(RQ0raYj1Aja^5Yakl~FHL3ezqkQ4qCNz~$c6(533EE2 zKgZ2MvesWZ8!IlgkeR{gax|>1g(=YxXDd^L=7LNxC>Vj5gDYJ>d*l1-)eIwrlLrG8 zEtE$jdPjNbiJxSPnvp?HRe6PkplY>zjp|8ECYd6PLrHpV$|1e$%`e2wf_OCq4*z~7 z9k`NkfJ*ZHwX;5!NM^&^cvB+wc#Z4<*0ij4y%EUNmcrFiE&h%Tkax4)5}%mph!(WJ z%_m&T8U@(rzQ5gP?AeV8FBag_t&D`tD0C7uw!oE0;f}cQF})806_cj>M2KUoKRi19 zI{&cj)#ZD9Fd=Tm%#L77uiwMFNaw%3dFI|N7QRv)U^{{Q;Ba|k48o7W7kLCcySZL~>P-q7gLZgiuy+PCPCCFG4CKdIFn`em z+f8$Jw5R9+fcpMZ|c44Q9BH5SX4=PjqlV@Sa7Xhg=mq?du|MW>BWx=k{T|#Gd z**%t>@Sh+eQoce)a@F`AC-=m^{If;(-u^%E&u$lBU>~lqmzNkJO7Gm)9C<<&FT&KO zBNL2`A%iqI^rvJbr{~(pkA8%VF2L}>#7D296uSqm);uVF?nXQ;M3q3TAb3;H->pR( zqYdg7;Mo02#n+@{oP%Z=hrErt3@3J!UjY=E8 z&%)qTC#m?AwsbAWu%M$C;oNutmiDR`Hq$l*v`;J|NON|eRwBz6IO0pM;$&#R`7+In z54C;H%a3TWXe>#67-?Ndh`}&Kog|BWXprkZa{DFA5G==8soEeVAg4=sQ|1hRKPhmn zQEt~C`ol-NaCr;4ui%`h1)#B~p_FPOL@8{yEZK$|0aCS-=(v*Z<5OpCt5Y#MuXCdy z*N~)wz&gbZ!i6(@;1_})O8l1~4^OdaR_M9#Hm~OuPADlbpFxSrG0uavaQRMTDE!>0 z@dIb_G!X#-Bckp~^sK%lm!#<(*7;DB|-_e8p`vA(z@ayqjvhoCX_CI7L*$^uTbUNqyKPZ%{m# zB76zveD?Vnh^_~p#8e$}$-YgvS`p>?&ku;fH+>D!CE%n4P?ap;oMDKi`t6DQxH%Gy z$iUEC15pwD0i0paLtl&%8SnR^T5@N}l?*gES~#dFS^`;h6RGJU@*IVR0D>W}F5Cb_ z+QFSxHUG4f=;_r9j`^=Tx)ocRMnxzJ9_?I4PVOXo#3O)FfChP}c^|szfqoNdB1FVz zG!SH-iF=j!SVI(zjDbA4T!A-2uF|=E3x^TEg5!GI9GA!j$DE%V(&ZzbVttjqJNa(< zM=h-qWE{0g_9#n<0;W+wFG+SnsPbOJLX_!TCJfAus|(yKFvW}( zc&&<+fQ6$hwt(uDLnp)ED_$|k;&-{cPw0*1J>KRp&Qn@i?~3tJ?F1&xc=pOQw)xn`PUkJ>7H{V)+C8AgU11 zI#Sl|y@7XmxYsg&83Pu~>Oao9U|kWx^QjGEFqXX| zZJhE73;ve$K`r>>q5SV}agmtpvo#|~3Px;iH@Fs#f}e$4(-drXfUZxwzBpHOXSX;I)Po& zdhtS$eh^c6$!A;x#Y)w+C!+ncG`Zq3HMI5Jd4hk^#p(Q1&Sj&${mlYD^GoEFhpbuZ zz{Y;<+wW`>BOxMX1>lEMtaNpmrie(51C9?W@)nAVSEhU(gAMY(q>8p>yEaIEkzpy~ zubQBViJ1?sOW-Ob0-QOM6|{S?yenPe$wwgh>DN3`_|EBl$$L;p?)%bnQX#^e2X_NC zmJG(2vyghed-)?rMg5#e69WIf6doa^;|i8h<$xt$2P2M0Ane= z_?G8(T1H$@AgQlIPksHC(<3o2|J!foe8#`TcWKt2e=-6K7(}D^g?GEv=Nm5TTkU=0 zP;+TOean%zITbzBLYg-9>smo(=&`(34e$!^Op4UAY**;7$8sZ=6|kZ==855^o;_<8 zcnA7cf?C|c36W|?dEwkWMY^DSL!kovayg?E668OZ$^@KVW{5wRhKY z{yt#qEx2+>ISi)(SgQDebMYqoU`fL#E|6S7xMKzSf(}8g?G+vl%Y;djR1945`NKTd z<78#Z<@5ISzRX(hM#Kl|AU}WX7t=BdSj*D{&iBUA0si5=x&O0c^!E(wKF4yq;mL_v z+#Zyd_iu~2LMG;7E)NIbTcTUVqT2b4sG4-E2V^*c{)df#e4JN+DP}|>W|A~p=!D4X zkElc-qIp9OF%!MdWwm}$uv;F3H432&x;!8B^C>t#&q^;{`poIx%trS%NArGge>9o1 z8MXWJIj?j_nzekqG!7b7I-PeCNnWXX_49lfE)+cOlzOyRIQeti-Us(wTsCU8odXx& z04C3tU6^BT(tYcH8b5(HSlGC`#3O};071is2Kt}ZC&0mA7_u$~^@1EP){J*|M0q2V zZS?nGOszR$pu8)ZSeyTuw{)-J;@&J4^U7gfb(@=__;4~-2cv=awo{=rq^aeJ6Qa!X z9p~;k$YZo*{(4tgL^)Kw%8ONN_|rA>1L&Ix5Z(^>9b4V&q05bb10~l4&!6m5ogf zVs4NM83zC68QeBxf`jS?-+K*eT68ij4EY)mg`1E+w6s+2LcT`s0-XUO$#Er(E#_*O z5+W{j@niMxBFonrHH`@!UvDoSa-M4@>w>HaT&8wYG-rc_SjSJ;iI^gV-h8 z_NDEcAa#C==5Vs%EX@PbEk2GumC25BS)wyF6;0XX7M|Lf^vyBy9<1I~t+uOK9hd!6%%skW*H_7TJtWS>DK)H%NaU0!ixvLN%hY~Nl4JMHxGz52m*6c7~gY1e64T2ewMa}fk#l=6zp5fru{n0#S@ z7vtcdegjXb6@yjC`Yr|sKZFw>ttWioHE}sFCgM$J;;6ceK2UVQUFTLEOL)uT>6atn zsS-mhQ|M=ERKIc3EDUuiLz5&JHe8fW&-DQr)&+VJu+$%d%Z-z+pf=t3@B?cA*n~#F z2R8ug@EcCFWpH^y#hTlG*~~j@VeH6+Lu>6CAAz0&#Qw9@6Wnc+i@+wC@ZcXn%{Vmoh~&L;PBF42rI(PXiUPSU#Tx30S-vW@p!mzxTq!(MAy zzeP25N50>dWfteONTBI&eM(j8%Pp@~-e_z@Eu!rDnG0f<<(6Mis!F0ivBy#w~;M+YASYGJ@PE*suTJm8M^m|ooGgLLuI$}w|s2saO5OXmY)rAhWvr6^K zYiY#|d|TYPy&*@^#A77nT~$le#u(Ga6P5V={N3_-0?T)MgP()56@~R%Ng+6>W%g}A zN*qRNyKr&nTokCZ)SN44!w&TV1&M^$f(Ysl7)hS>}b z5N%YYc)|HK?`d-H+VPSC;OE+HI`)fB4DQ*pOS9`OJGUYpVd|x{B|Yo^Jfzik8`lHp zam$|FCeqF>xiOFjU&^RviVEKRLO%lYSM{ z2i4V-4)t>CzBiSSRLV3~en>pgQX1P?aoNym+CCN~2`wo9qfg%WlYfT6lw2j9yjzxVCXe)ABdG%N$}EG14%r}R;95S7thy3-z@ zQ+rv&IR+S-HI_dpc@$#&(_mn7TtN-^4Va=to8Qq=BD)<-&ljkvMPevP3JnZC_0XHm zg&QY2V2uXO#mC+9BLRP2+mNvNg8xNF}hEyVagu_Vow_yczUXi zjnqG&STYQ1TWI$|%|YLO?R<|@#4L7Q$u=aXvWMmW-BY=|K-ds-C4MPuw1<|Uj zif?m=(b3{(UZQokvAe9@B{)2M9gYoLtgz!)hHC?JCu#~&08R9|+R(j3S~>tul{YpV zLJ&H84D`e_RsUoH(>xgTs@UH_6ot?69a2cV-tvF)7uhQDQxmJ0M_XmehBY+#qv7||Uwdo^q zMMd6(7if^Cqbw*!iNd2&;2~~-lj^7CjtNo!Z~^8mvK^2|qkb%K{0wEk9gw4Q7(fD! zW^sA>7m@*if%dSMWPpPpI+Z+Q;G(kXlR_&v#4IOvJv21JZ$WOia#zDlZ-_DaaMT%k zJk<$)Bm9it&fcn?mpjRi;3Uezh{-g7gfFi% zTa|S`b&w@8_$#k5Ro>`fZ!i8}`04)Y;lXuHEzP`^iSxcc`8sGN|E}F76BilUYj_+> zsH$EAWS)iWYb0BEc0ihT6#$~`uzHOaM_AQ_@G)#v{2H#%j=1!$Ll){bm#5Z^Vu`!&Ye<( z!$KEW2KGbIhXY?ztbU!v?_9z3!v+7k&E3ZHV%tWQ9?neKI~vj`BEo0p;<5|`;!jP7 zo8zH|fgGX#sZukc6}Iw~IJs%?^v=C?8tTU{FOzt{DI)L!#{zjFJV@DQ9^<8n5|03U zy3jCQ!5!(0%M&*&kS$JQi$kH!MsYHUj%Kg%DPgQbWzYJn%OOiVjj|KVsT{{+71;jg z@&LOlkz8U3Vz?(L$gl#eq5r}$9SE~joRt%bAbf@Q=yaHO$phL-dA4hOAFR6)aQSmO z#9UxWuKju)qcOs6Nb_1#}2Fz1d?9$jO1=j# zhE8OS1}{PATs~sAOe}K9UIxJ?EU+q|3$Ib>Er3hZb(B-*1e5|%h4<=js~jGgK+_di zAHAm#lRD!b^f-Xk6)pyExoa4|gw#q`AnC&;*oI4j4G2r%Dp*@Vwa)(o1C$cbu}h)- zehA^v*~V@#bbRq{f>e5Pixc_$8vY)C%Pyl3%x3|()pV_6=`;65DIYWwu+&YrEXNlLPMpboMF zXvT8|wgafrANgtA=QQ0rd@(pey}d%3_ogZ5I6JSx>Qlb@B5%SR%mB z;scs#1l2zuyR|82_#-M>XDEv=T|$C<@-*4?E3LA$DnaA9wLR^%qarTUDI6R(z&oPx z9`@+NV@4B)L_|)YB|HfL=oy|mW|+TbaCGpQOT9ye_6JP+AGsHA+IOAS|GK6vOZf}-GqzyO}bpkc;>@60TBeEqD~?^3yZ?%nNa_&I=l_y-0m zP8>qrjL&X+Kvran1!iQuRtqg(%I3NjG_KBX3M~HcRo5+L$v6usNhMjVc1e4{`SYb; zT=`F$)^t`J(QXi|DYD9VV0>&l4y0{i~HTv zzbl-wPmgn64h76?3e#|PbuD_BI`Pg&@|e3?Yq2?sCBM{$Y_M_MVmyr>Y=z+TXj8{` z@(s%$^KswH{rhwE?mXC{}P-IN|!67qkEa%zLxjg^;TtW16xx2d;%{AkHauwWS z7J~DdxfcZuGOM7ckPOhS5pI@$U(cz?9jvea%SXR_Rl~l_OHaGDnMu+|+cGFwdu)ok zJ?9=_GiB7y8m2ZHQq?w@w3^0_O#H_GGQe+C6`$k#D9<|w4sCja9rMNVWLfaD8(wR2 zngq$-%U-|m_IqbQG~Bf<;tZ|8u)vWCt7LoK{#|I-wFbqkv;VK^zC51lwQGNC(9xtQ zl2S(z5=m%4DoPk+$Q+Vn=65Zf=RCjP z`+0xQ-|xFWpU(4~6SjSS?|a>AUDvg)wU49ZW6B-mO7*vf#pbs}XEZappA<21d#iU|$Jf^js2o{pus&Wjvn5~8G2Y|Wf_ZZX2b0aPhlPdq72R>d)?_ma z7RmOn(pe&>6Rob5gbM5T>oxl+Yx6_>;1#;Y`}VcFs1{lL${UpKYYxyA$-QxN_cdes zAb5SXrb*G&ooGdzE+HkwiLu|WvFmQ-4mXc}xLUyD+G~&e^iXES;ho2+-;Q1*GZB(P zjB0nf#u}3GJya`LaqYz6=7jo3dy9VcPv_!#X7jM&WDYVm8J^F=a?w1s6Z;7N*yPLf z{IzmiINATj*i`etH^cH@F8AK6^4_`FW?VYCExa{v%Fhtn%BU*I%|$LUT?Z4-t?&EFy9p8x4k z#$n&-bLYxdNMBHq+GD)fwnAYpmpz(zpEfy>DZuzvVLh(+8@ooT4YiHj_Dk`DtFu7g zD4MQ=G2-GF+(LWbqHjF6MQYyzf8Z|{5a)-Mq*U}NA~m6joP$Fh=|ouJK5UFMw*Qin zn)V~vP*3kUTI>no!2mo+!Kk9K&uZdJaMyDd$7Vb19FQXJz?vd+YeV-{m+Mv&zh+n< zeZb_>M;oGC`xWq;hCvMQL|~a-2q3zn50c}T$l9g6ZE658VTsHJNQXaabrzb2k>69s z8mjawYFwO3N!;=8TY245i~AJuy>ETFmDK@m0JK!1WFH@Lq)D3jg^5O~!OHZaNN%T` zTDn}J$2^8q$Eni5FyrW7Qm8}<2_=xT39pN|A*y$6n5V3n$AI@RYf5rd`(wW`F8d%g zL4D-(=Z2>8sGoRH`RrxBS;HV}NL2V)CoGJ6rFp`jbrpMRpDDP>YB&`{)gin&c$4x} zL+R@yP|?z`>KgZsyARM8??e6$ke*%u3d0~brt5jz)zVtJq3LLWFqd!)LJ^MrW#U4P zIHt_e69~uKA*k^9^^NQfYM{rfachYy97I~(fH|HAVaotKXJM0`?BC07WA4P7BK}G( z+wW827S)%EeKV5w^C#b(E#DsNU!jEBmFf_seLqfAG|SwUjnic*dbBVw{8neGnb2t2Y_nm0RwO`I(i4u z;^J@AojlMA90d>%iNrIkOvn$ALvEEcHJFj$eYRr%5OarWiY7f8e|4>Ec>&0_Pv&4O z{4&%s0O4>UzQ8*Vwq6fCwjr+Y+goH#0vea2lv+S%k>3+;+p>Hn6XKV__SAIh@IqPJ za8A&kaR8_e>}&5GE*FN}t`Lug4UJVaMCz!+2qmx_-y62!<;FKAVXFQ3X>EU{Cop9I zp-u7LDKwd|vBCtj@(W;mMOeq+N(}e3&m86#4CsImhXMJ+R%D(?s&7Hq3q^}l!%HqI zHc{`E?(oqlq2Q)(WFUW=%F|?fdSXbk0Q@Q?Joq3HwS<)D_ymH_!z z6TArI08Y};MAqI517(4}B&(Gke%h)@({VyOdUx(tTL?d$xnb=$>2bZo?ng0+-=QA< zFxv)^<5NADYcuw;cs*hY^j~p!kPG2gl~Y>^6dc_q=77Yu4FU$UTAszRX!bM|>n-R; zSX-ppezDzVEBB&xPw`{cIXBNFkwj|lOm($11#p8{8=^e0j+i%b23XY(=(d1oVM3uy zd)mH-fv6u*A7K*SOu&gyr9`1#G#C_meZz4~{r1&=r^RD_xJ&e^W=W{3rtBP@G?yz$ z!s`-%O(;E{2F_mzBOlQ}|2&1Q?eun4=7OVMTdVH>F@sO?09phn;9~b8mtg~KkXm;% zcTI~o{DcS{MmbSEenCBsUr4CbF0-}}hx`@Lk0YOR*8>cM)HfQa>5$>H;y7pY#QOLg z9TpW4fh~0KhGSiyp6Eo*>)#a~P$3e(+{N+|WwUtF{y=aez(2rdXbVE=XlnGH=nkkh zBB%&Jh`Dub3H8i!hUvQXxGjOhuwv!SO!^TEMZo{g~lmiOLNiLju|h zGC-K3x++3J?Aj2pA~(%FEifMJt7)X9(X~S{Qy!w?z#Ne+F#jN!4yp-Blk9LDZdn+o zC6tIjj%d{U`pdC_IHSm?vvDaS4YX`3SvWsAsQ6LBDUjgbD|TxO#40I&e>xc_Zpcvf zuEbw7CG4;L&a*18L#5c^CNUJDvsYOPp309JbOD-#JOQEoD$ zBKy`)3<+s5TQ0nbRb;R8r?8$uJHNmB(umGjP|JQ$hd2S*8oD#M5v}qxf*DFPZE)sQ zFtx(}G=h=*l9to8ru_*VZlRyJgepy@-u;@&+0s!MNIe6txc*o@gMsdjZC+sq5ZCaO z5aTb##Owr&L&ZNbfAB6~OxjsV{#DL^US^K0WmOyF3cPXS#B`W4Ex-kZP(AY zbmP+eP{7`;hIS`Mfhum9!*z(Y!@9=SvMVe&sJ`0r}C%2F$&?K}Bcw0wp zyafOSj_CvkqFbgx*1`jumI0}H60$3JTrtf*`lOoluZ3-vm*aM zELjaOQVORKot#7U1?uQX#YWffO97&F5Ioro0!T3g`hCxU9pQ4qj)%Kz&KCcH!?$vhtzfO$`V0A4b(S?x_#pPd%~O{ zf^=_W!~ECqn(^s9m{EnhyB^R*BLsW50LBqG3ERCpdyYPpb1%7K@o*m@@nlbo_c(+c z1)2OkJY2diA(sYnl{J1t?ezHqa~W?%rZoGahz3|CxXSs#aA$bQO%RrA50^d7AIL2> zWp|#GMb9^U6(EyiB%)P?{5Z)Aldv-=B7dN3g$1Qy*OPwJ#u;fuq)eT`_Z_oYNs|M& z!V+*agtvgO-tSx@XPO`;XAr?#i;rPWc^MKs`m=2iV9pC@BRn}DkXaIV1kEe!%A=-6 z;P661iNGL)j7#Q2p-dw%EIcZIBtK5?wG=qW?h=;G#Yj4WIqcLn^@HD`OrQM+z8rvv56J|FrJ&L0V=rbY zJ6sKzvaI{-BO?SA>>sqgy8-(j<(?cB6Wys>*&%o29r-H6090Iq_{YGk54KW&O(RRw z2q`N`jId3}nx484eZVtNbRM)-UnOUV<)q$`FUPL*w%2MY)V{)a5lg7 zrli-}Tcd@N?kwC^(J@b&%KynD5&=pVelmi3Q${}cc5%yWfi*7|6iQHe+sg!HzutCR5|Xf*ih@vP4y z3bEg3+G}WNconI=ygXhko|!X(yeFwkmsaj#;4dxWy8$j20}?LY%B^$;pck?$*!P`6 zG=C4jdCr*&4untB+rR%Ct28}7<;qH7%MROVpQ0)en&RT@Y$aPdJzEAac0^c0q9}jw z^20`VKTH?NI+_g*cKn(Ts$c3$WM))MGU!sntid>!p}c;{Kx!fZ7Ny&KFtZx9H}3m} zW9t?uy53_(__T1qQz{rDN#OGn_w8kAYlwhBj>bVb8Z)-vVV}cpz*A0?u6&YIQd*4U zlHlbttP-n6R5Q&GUy$_{jN^Q8Bap>82o1I(m^Nfh)PU1f&{(*Xl_x*LWi*ict|NL@({V3wEis0So!!)mZ~U2ihvr?8Ju^b?P4}2Q^9qw zoi+0vqB>ATstqm2aN7N+5CTO^C6vWH$F{j>Me4coH091Z2Hmju8AlrPXn+G$H zglW#GRw1N?$JrIAS_X12X5v3H;M{5(9!=F8s8BXUf7I-%Bfr%gbPeVbT)2FBJAPcs zs58K)<*WN=qP z55j|HbP-LDHzjTYX$K{$*o#v6BTnfn&2X>(f3LiiC}r}+5>^VIa$BgR{o)p<&hmlV z$Zh0ys11t}pMff({>EifbI>V2yJ}h;>?$~{sYRdH3LpU@+_sdk%!wZ{&36tqs&QrK5uKD+b2!7 zd(G!JT?7dm)kfHvsYylPz+U}k5DS6W95D|$wtXfvyu~2ZO5OUTP=vv%8<*i$^u+>3 zy7NwY2ii6XazG{p>Rqtok3reqEZpu_F%C)Dl0kqORO>y`d|~74_aBqXdFRq+^Y%7q zi~Vn-`dFoZXlP5cbv!~EX;=^&JWID_;<2G*9ST2{Z-vBjwm-AF@vhk7N+WMN}G_SVeW|V><4_lD$& zq&_Xxq(|tu=3aNnct_U{x6cpC&zue9I$k$&sg_Tps@PupdG)1{J+TrGH{&P+{-$Xm zEm#q1fg_`7B>BB4yuwff4jHCD_iwF{YPE+a|jF45Tc74)Y zU+KuL6cSfr_2SDhKsE;@x$2+HU@hDfR)5p`*;$k)r}VcQFV-9x7SWM+HLO!rwGm_o z8;Lw#DT)RZvEgef#Vfn>2BFyMy!A;pSqtau+#UfJ%@lj0a9gJlDlPAJe)Xl5<$I>_j(c;3-*vf*5tMP8;F!8y zqnmu2fPqZ>nY%6d=I6U&D^Bb@ZZ-*@f@2zMVOb1yF&;j*+d%)7($AT5M*le;M!VhB z_NsmzA>2IW`rX;0W1~D`v2lQ=2vPh_NHA?@w^daht5Y|3o}JjvF%bs3l^(51Z=afr zR|h}u&AtbhVtJj6eawB+kX1~=)jTQL29JAEpGKnX$rzx-csw{O#vMRwF zFqR_W0i9xiMP=%=MQ?5r(09LB7b7Dgtf$;JWBG;l!>PUj%>0+V*4W&5oxThT$j5qH z0O$P(UOqm~a1qWVr<7iyA#O<}-|e%6)l`dd7XF2-Zv?WA7neP)#Ajcl_0AkV1&f6(-rjjk86-3-L_`V(a?C{^w`NH< ziAyV^t|>(A-)t6)Oc*mqeS2IAwwGM9_8cqz zw@~{KRDbNt7PLb^JtW@_vMdh%s+YpURUwRo{^9`M^pK_H;LO=miR}T@ssXGWPA)86 z@2dmw2t8i*suiD|z-PdnS0T~Hf}$~o4fGj33!%E&J+9u^-DH@EJQ>CeD8~;%ss_ur zD0n3kjZ^=;XF+g=D?C1Nn?Wtkfv45C$#z3m@rM~-aaH&XO|4XOl%h`WCSp&NmE)1K z^J1Rm2(D6MHB@AHZEc=Ao7tqzD0qeXxd=Opsvov2@z#Jz1^QA}Dpzd3y+Nt*t;^LY z1;Mvi)srN_M7hsT5;szSK&?XX2y{D$fIfqdWsTDxfr7Ba)B+fF2GFmYVz3ewLJ?|E zFrqiHg4;}38Hez6ao;La4vYGAfxPVk`D$(Fl#{mCCpa9M(n~uOm@P2CS(&vbW7#vC z2&6_`E`vrTNPDfrr?J2?KsRo=*5H8&#CBu-$%f%#VN}b ze)pJ+*Z^ngjkxN30N;j@!8SoOC^9ECHRnk;P7un2Rxf+8d5YE#M}dE z3CQJ1=>@=VB)(2quAqWObjAQ`SH{N&M|c^NIcKhJcO9>FR{V&;hr}(!l1cQott|&d zm3@`ADMtkL}yE=)uTe zNk7-Q{}f}OB4r>9%;n>=l60$wqzNWTQy^GVZhH0_Ou5~swf2v{aAKD>Ge7EkZZUdt zAfy}(b90s~(S}^O3O)9?7z=G4zC-JCg2SBop-mltOwE|1asV_bZlkDF9{sr+D@$1F z0VyG0aXvr<4kPO!k49|wSiH(&V(SHIJ4$`VP+t{9L>du<5*ObK^-&2RFRbR2#3(LN z_bv^$;+NYYO~#ahKP4mriOzvMoPHmafNfB>Auj1Uw+b-5t;|2$O2*zbtcFsxr{0&_ z2^ir(c+P$)a(nmWvXbwTcXo%=xG-AKK0bC_hU|ZhsBLtJ2&=C>2FrRO zKE4#?7FuHaH`xy5McuWIwX;Cr!|~Ue$<94o&lX?D!{*@*Ia2#8rEZ5Ch$n*Q=#;B@^ z1N;WH0onwbGV@38vIQBRiz#Fl>s`xP7!e-MuuNAR=napEj6{~}fX)%J_5cFg3;skf zy&g)ah1{l0z95m-?hZ6kl2Q`djdj*#x-y(B^Fu8z4gz6;DOb{iVDWs?U7gqs=^(-dj&0e2T_F+q^mqOV1XKm;Qskb z7v8UOKk){50zn$c zC`jJ|=vna15td`ijua8ax22c8;uqkSYFF4rWiOMHf4v9f}q{+ z&G-dwR)2x55-YqlZ5xm^m8yB(%=r7gT5a4`2nE#H|2K)g#_8*8XH#Qzn+rq722ptL ztm}}3!ZGA_cL%thUErYb9k{^QV$w*NpnRvx496@sQ2URr1${Nk#!r+2Y@#M_WInQE zX|6NBfuC5k?wAjn@~I61oeisT&r?$@axtv}i`8|nR3=hGrylXrf?xq(zJBG! zx853CC9c_Mmw#oRLB<>^J-U}#1=&DlQ<+V#j1fHFvy-A+uRCj znVa1~Rqj^e-&X8+Z+*}pDYM+_DzUo9L#58`iV*tVhvGiTsWkVdT31RZdyH_Bndp#RCbAR3J zJ@~NJ2tz;+fVp{{Umv#%#0a;T(A!H95oZDWg1=s}#(d?*jj=#f%vKBeuIHO9Bj{q) z@umPpyekn!C_$Ye*Af)8^Kj)1MS%y?HF2w#2Y$KPKZ>tdg+6iolw;UDgW(<*VJEOP zFf?ZC=;;)hiBxeaR;Y1&tb3f2>HOi^?oxplrw1Qnz{kz@%BeV3brd2IV0y*VK@06k zjeul(((m&r42&5Wts^EGU%!Lpc@|> zs{uL{?1kIYuiC4J;oeO;6fBOC!$rYFAgL-<(jf}$I;ehHcMEA^!fp{Z6`O{@97I}; zp%`&sNMi%p2SUq{JWjsCviSZR1YQ&QURN>R3?T@x-yI`X;rJYyb!DDGYS`U93yL!q z_*WtO)s|8&$Z~~}6k>wsXgTmK0^L9d7une|NlDFr-xZd)R!5+UX#$F=+yq$)BN6f! zpN8wA|D4^2CJ-Sz$&JW;VvYa?lJZw~?M#9K91~_YPQ@vUPuS8C|Iq2kM3t_&cHp@#(}jBD2PE zBgFuXsvf&F_CZ@B-U=d!P(yo(oWq3;iP?9coWyz`Bjo@5NPDNxu@S zWzy22*tyR5BCr}_l9FG+qBWy!pNtAYcTMI$|LQpVHQ3?|rI5<# z>Go^u_{BU#dJ%bBsM+5<19MwT5i#8Go07-@4guXez}wH0eb(CI-K9}-GH(C|rU&5+ zMX?LV5qAa=;wZ~*fIY8qe6_ezXi57r{kZrZ{*!tCs)GQnlN#v74yMoC}WN?;k9THWdOUxAIFE~SGTef zMcc&*y(=@qSFQMt4nT4@vFINlwI{&N1Xlb_6#XMLY_H_1qM%SD$~|Zr6BvFCS@Y1Z z&bNT9+gQJBPZanK{ZYAuFQLZuL39HqucwXzZ!J$VLOEq z0J>;u7Q30r{?aM-_p55js&d!M#C4&kqeES5MV$@?a#TZ`BJGQ(v`;!JDg@`(CI{3U zQBX+rQ!Q&89+}>gp)_RU5|M21Q7`>CGk{p}_zz*KmEI*wD=Qn>orJLcekIq2xAOh# zU*?AG(+aTv=ilZgSN?DQ%aZ$;xsl17=pz8EhnKqDRe^tG&IH+wZgTA^4vL??b0-6@ O9csH&)3zKq{eJ)fqfb@< literal 0 HcmV?d00001 diff --git a/vendor/github.com/bytedance/sonic/bench.py b/vendor/github.com/bytedance/sonic/bench.py new file mode 100644 index 0000000..1d4c357 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/bench.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 + +# Copyright 2022 ByteDance Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import os +import subprocess +import argparse + +gbench_prefix = "SONIC_NO_ASYNC_GC=1 go test -benchmem -run=none " + +def run(cmd): + print(cmd) + if os.system(cmd): + print ("Failed to run cmd: %s"%(cmd)) + exit(1) + +def run_s(cmd): + print (cmd) + try: + res = os.popen(cmd) + except subprocess.CalledProcessError as e: + if e.returncode: + print (e.output) + exit(1) + return res.read() + +def run_r(cmd): + print (cmd) + try: + cmds = cmd.split(' ') + data = subprocess.check_output(cmds, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + if e.returncode: + print (e.output) + exit(1) + return data.decode("utf-8") + +def compare(args): + # detech current branch. + # result = run_r("git branch") + current_branch = run_s("git status | head -n1 | sed 's/On branch //'") + # for br in result.split('\n'): + # if br.startswith("* "): + # current_branch = br.lstrip('* ') + # break + + if not current_branch: + print ("Failed to detech current branch") + return None + + # get the current diff + (fd, diff) = tempfile.mkstemp() + run("git diff > %s"%diff) + + # early return if currrent is main branch. + print ("Current branch: %s"%(current_branch)) + if current_branch == "main": + print ("Cannot compare at the main branch.Please build a new branch") + return None + + # benchmark current branch + (fd, target) = tempfile.mkstemp(".target.txt") + run("%s %s ./... 2>&1 | tee %s" %(gbench_prefix, args, target)) + + # trying to switch to the latest main branch + run("git checkout -- .") + if current_branch != "main": + run("git checkout main") + run("git pull --allow-unrelated-histories origin main") + + # benchmark main branch + (fd, main) = tempfile.mkstemp(".main.txt") + run("%s %s ./... 2>&1 | tee %s" %(gbench_prefix, args, main)) + + # diff the result + # benchstat = "go get golang.org/x/perf/cmd/benchstat && go install golang.org/x/perf/cmd/benchstat" + run( "benchstat -sort=delta %s %s"%(main, target)) + run("git checkout -- .") + + # restore branch + if current_branch != "main": + run("git checkout %s"%(current_branch)) + run("patch -p1 < %s" % (diff)) + return target + +def main(): + argparser = argparse.ArgumentParser(description='Tools to test the performance. Example: ./bench.py -b Decoder_Generic_Sonic -c') + argparser.add_argument('-b', '--bench', dest='filter', required=False, + help='Specify the filter for golang benchmark') + argparser.add_argument('-c', '--compare', dest='compare', action='store_true', required=False, + help='Compare with the main benchmarking') + argparser.add_argument('-t', '--times', dest='times', required=False, + help='benchmark the times') + argparser.add_argument('-r', '--repeat_times', dest='count', required=False, + help='benchmark the count') + args = argparser.parse_args() + + if args.filter: + gbench_args = "-bench=%s"%(args.filter) + else: + gbench_args = "-bench=." + + if args.times: + gbench_args += " -benchtime=%s"%(args.times) + + if args.count: + gbench_args += " -count=%s"%(args.count) + else: + gbench_args += " -count=10" + + if args.compare: + target = compare(gbench_args) + else: + target = None + + if not target: + (fd, target) = tempfile.mkstemp(".target.txt") + run("%s %s ./... 2>&1 | tee %s" %(gbench_prefix, gbench_args, target)) + +if __name__ == "__main__": + main() diff --git a/vendor/github.com/bytedance/sonic/bench.sh b/vendor/github.com/bytedance/sonic/bench.sh new file mode 100644 index 0000000..701986b --- /dev/null +++ b/vendor/github.com/bytedance/sonic/bench.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +pwd=$(pwd) +export SONIC_NO_ASYNC_GC=1 + +cd $pwd/encoder +go test -benchmem -run=^$ -benchtime=100000x -bench "^(BenchmarkEncoder_.*)$" + +cd $pwd/decoder +go test -benchmem -run=^$ -benchtime=100000x -bench "^(BenchmarkDecoder_.*)$" + +cd $pwd/ast +go test -benchmem -run=^$ -benchtime=1000000x -bench "^(BenchmarkGet.*|BenchmarkSet.*)$" + +go test -benchmem -run=^$ -benchtime=10000x -bench "^(BenchmarkParser_.*|BenchmarkEncode.*)$" + +go test -benchmem -run=^$ -benchtime=10000000x -bench "^(BenchmarkNodeGetByPath|BenchmarkStructGetByPath|BenchmarkNodeIndex|BenchmarkStructIndex|BenchmarkSliceIndex|BenchmarkMapIndex|BenchmarkNodeGet|BenchmarkSliceGet|BenchmarkMapGet|BenchmarkNodeSet|BenchmarkMapSet|BenchmarkNodeSetByIndex|BenchmarkSliceSetByIndex|BenchmarkStructSetByIndex|BenchmarkNodeUnset|BenchmarkMapUnset|BenchmarkNodUnsetByIndex|BenchmarkSliceUnsetByIndex|BenchmarkNodeAdd|BenchmarkSliceAdd|BenchmarkMapAdd)$" + +cd $pwd/external_jsonlib_test/benchmark_test +go test -benchmem -run=^$ -benchtime=100000x -bench "^(BenchmarkEncoder_.*|BenchmarkDecoder_.*)$" + +go test -benchmem -run=^$ -benchtime=1000000x -bench "^(BenchmarkGet.*|BenchmarkSet.*)$" + +go test -benchmem -run=^$ -benchtime=10000x -bench "^(BenchmarkParser_.*)$" + +unset SONIC_NO_ASYNC_GC +cd $pwd \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/check_branch_name.sh b/vendor/github.com/bytedance/sonic/check_branch_name.sh new file mode 100644 index 0000000..d1905da --- /dev/null +++ b/vendor/github.com/bytedance/sonic/check_branch_name.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +current=$(git status | head -n1 | sed 's/On branch //') +name=${1:-$current} +if [[ ! $name =~ ^(((opt(imize)?|feat(ure)?|doc|(bug|hot)?fix|test|refact(or)?|ci)/.+)|(main|develop)|(release/.+)|(release-v[0-9]+\.[0-9]+)|(release/v[0-9]+\.[0-9]+\.[0-9]+(-[a-z0-9.]+(\+[a-z0-9.]+)?)?)|revert-[a-z0-9]+)$ ]]; then + echo "branch name '$name' is invalid" + exit 1 +else + echo "branch name '$name' is valid" +fi diff --git a/vendor/github.com/bytedance/sonic/compat.go b/vendor/github.com/bytedance/sonic/compat.go new file mode 100644 index 0000000..015aa62 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/compat.go @@ -0,0 +1,131 @@ +// +build !amd64 go1.21 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sonic + +import ( + `bytes` + `encoding/json` + `io` + `reflect` + + `github.com/bytedance/sonic/option` +) + +type frozenConfig struct { + Config +} + +// Froze convert the Config to API +func (cfg Config) Froze() API { + api := &frozenConfig{Config: cfg} + return api +} + +func (cfg frozenConfig) marshalOptions(val interface{}, prefix, indent string) ([]byte, error) { + w := bytes.NewBuffer([]byte{}) + enc := json.NewEncoder(w) + enc.SetEscapeHTML(cfg.EscapeHTML) + enc.SetIndent(prefix, indent) + err := enc.Encode(val) + out := w.Bytes() + + // json.Encoder always appends '\n' after encoding, + // which is not same with json.Marshal() + if len(out) > 0 && out[len(out)-1] == '\n' { + out = out[:len(out)-1] + } + return out, err +} + +// Marshal is implemented by sonic +func (cfg frozenConfig) Marshal(val interface{}) ([]byte, error) { + if !cfg.EscapeHTML { + return cfg.marshalOptions(val, "", "") + } + return json.Marshal(val) +} + +// MarshalToString is implemented by sonic +func (cfg frozenConfig) MarshalToString(val interface{}) (string, error) { + out, err := cfg.Marshal(val) + return string(out), err +} + +// MarshalIndent is implemented by sonic +func (cfg frozenConfig) MarshalIndent(val interface{}, prefix, indent string) ([]byte, error) { + if !cfg.EscapeHTML { + return cfg.marshalOptions(val, prefix, indent) + } + return json.MarshalIndent(val, prefix, indent) +} + +// UnmarshalFromString is implemented by sonic +func (cfg frozenConfig) UnmarshalFromString(buf string, val interface{}) error { + r := bytes.NewBufferString(buf) + dec := json.NewDecoder(r) + if cfg.UseNumber { + dec.UseNumber() + } + if cfg.DisallowUnknownFields { + dec.DisallowUnknownFields() + } + return dec.Decode(val) +} + +// Unmarshal is implemented by sonic +func (cfg frozenConfig) Unmarshal(buf []byte, val interface{}) error { + return cfg.UnmarshalFromString(string(buf), val) +} + +// NewEncoder is implemented by sonic +func (cfg frozenConfig) NewEncoder(writer io.Writer) Encoder { + enc := json.NewEncoder(writer) + if !cfg.EscapeHTML { + enc.SetEscapeHTML(cfg.EscapeHTML) + } + return enc +} + +// NewDecoder is implemented by sonic +func (cfg frozenConfig) NewDecoder(reader io.Reader) Decoder { + dec := json.NewDecoder(reader) + if cfg.UseNumber { + dec.UseNumber() + } + if cfg.DisallowUnknownFields { + dec.DisallowUnknownFields() + } + return dec +} + +// Valid is implemented by sonic +func (cfg frozenConfig) Valid(data []byte) bool { + return json.Valid(data) +} + +// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in +// order to reduce the first-hit latency at **amd64** Arch. +// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is +// a compile option to set the depth of recursive compile for the nested struct type. +// * This is the none implement for !amd64. +// It will be useful for someone who develop with !amd64 arch,like Mac M1. +func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { + return nil +} + diff --git a/vendor/github.com/bytedance/sonic/decoder/asm.s b/vendor/github.com/bytedance/sonic/decoder/asm.s new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go new file mode 100644 index 0000000..9ff1ad2 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go @@ -0,0 +1,1943 @@ +// +build go1.15,!go1.17 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding/json` + `fmt` + `math` + `reflect` + `strconv` + `unsafe` + + `github.com/bytedance/sonic/internal/caching` + `github.com/bytedance/sonic/internal/jit` + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` + `github.com/twitchyliquid64/golang-asm/obj` + `github.com/twitchyliquid64/golang-asm/obj/x86` +) + +/** Register Allocations + * + * State Registers: + * + * %rbx : stack base + * %r12 : input pointer + * %r13 : input length + * %r14 : input cursor + * %r15 : value pointer + * + * Error Registers: + * + * %r10 : error type register + * %r11 : error pointer register + */ + +/** Function Prototype & Stack Map + * + * func (s string, ic int, vp unsafe.Pointer, sb *_Stack, fv uint64, sv string) (rc int, err error) + * + * s.buf : (FP) + * s.len : 8(FP) + * ic : 16(FP) + * vp : 24(FP) + * sb : 32(FP) + * fv : 40(FP) + * sv : 56(FP) + * err.vt : 72(FP) + * err.vp : 80(FP) + */ + +const ( + _FP_args = 96 // 96 bytes to pass arguments and return values for this function + _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions + _FP_saves = 40 // 40 bytes for saving the registers before CALL instructions + _FP_locals = 144 // 144 bytes for local variables +) + +const ( + _FP_offs = _FP_fargs + _FP_saves + _FP_locals + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address +) + +const ( + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f') +) + +const ( + _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') +) + +const ( + _MODE_JSON = 1 << 3 // base64 mode +) + +const ( + _LB_error = "_error" + _LB_im_error = "_im_error" + _LB_eof_error = "_eof_error" + _LB_type_error = "_type_error" + _LB_field_error = "_field_error" + _LB_range_error = "_range_error" + _LB_stack_error = "_stack_error" + _LB_base64_error = "_base64_error" + _LB_unquote_error = "_unquote_error" + _LB_parsing_error = "_parsing_error" + _LB_parsing_error_v = "_parsing_error_v" + _LB_mismatch_error = "_mismatch_error" +) + +const ( + _LB_char_0_error = "_char_0_error" + _LB_char_1_error = "_char_1_error" + _LB_char_2_error = "_char_2_error" + _LB_char_3_error = "_char_3_error" + _LB_char_4_error = "_char_4_error" + _LB_char_m2_error = "_char_m2_error" + _LB_char_m3_error = "_char_m3_error" +) + +const ( + _LB_skip_one = "_skip_one" + _LB_skip_key_value = "_skip_key_value" +) + +var ( + _AX = jit.Reg("AX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") + _R9 = jit.Reg("R9") + _X0 = jit.Reg("X0") + _X1 = jit.Reg("X1") +) + +var ( + _ST = jit.Reg("BX") + _IP = jit.Reg("R12") + _IL = jit.Reg("R13") + _IC = jit.Reg("R14") + _VP = jit.Reg("R15") +) + +var ( + _R10 = jit.Reg("R10") // used for gcWriteBarrier + _DF = jit.Reg("R10") // reuse R10 in generic decoder for flags + _ET = jit.Reg("R10") + _EP = jit.Reg("R11") +) + +var ( + _ARG_s = _ARG_sp + _ARG_sp = jit.Ptr(_SP, _FP_base) + _ARG_sl = jit.Ptr(_SP, _FP_base + 8) + _ARG_ic = jit.Ptr(_SP, _FP_base + 16) + _ARG_vp = jit.Ptr(_SP, _FP_base + 24) + _ARG_sb = jit.Ptr(_SP, _FP_base + 32) + _ARG_fv = jit.Ptr(_SP, _FP_base + 40) +) + +var ( + _VAR_sv = _VAR_sv_p + _VAR_sv_p = jit.Ptr(_SP, _FP_base + 48) + _VAR_sv_n = jit.Ptr(_SP, _FP_base + 56) + _VAR_vk = jit.Ptr(_SP, _FP_base + 64) +) + +var ( + _RET_rc = jit.Ptr(_SP, _FP_base + 72) + _RET_et = jit.Ptr(_SP, _FP_base + 80) + _RET_ep = jit.Ptr(_SP, _FP_base + 88) +) + +var ( + _VAR_st = _VAR_st_Vt + _VAR_sr = jit.Ptr(_SP, _FP_fargs + _FP_saves) +) + + +var ( + _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs + _FP_saves + 0) + _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) + _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) + _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs + _FP_saves + 24) + _VAR_st_Db = jit.Ptr(_SP, _FP_fargs + _FP_saves + 32) + _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 40) +) + +var ( + _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 48) + _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 56) + _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs + _FP_saves + 64) + _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 72) + _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 80) +) + +var ( + _VAR_bs_p = jit.Ptr(_SP, _FP_fargs + _FP_saves + 88) + _VAR_bs_n = jit.Ptr(_SP, _FP_fargs + _FP_saves + 96) + _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs + _FP_saves + 104) +) + +var _VAR_fl = jit.Ptr(_SP, _FP_fargs + _FP_saves + 112) + +var ( + _VAR_et = jit.Ptr(_SP, _FP_fargs + _FP_saves + 120) // save dismatched type + _VAR_ic = jit.Ptr(_SP, _FP_fargs + _FP_saves + 128) // save dismatched position + _VAR_pc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 136) // save skip return pc +) + +type _Assembler struct { + jit.BaseAssembler + p _Program + name string +} + +func newAssembler(p _Program) *_Assembler { + return new(_Assembler).Init(p) +} + +/** Assembler Interface **/ + +func (self *_Assembler) Load() _Decoder { + return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) +} + +func (self *_Assembler) Init(p _Program) *_Assembler { + self.p = p + self.BaseAssembler.Init(self.compile) + return self +} + +func (self *_Assembler) compile() { + self.prologue() + self.instrs() + self.epilogue() + self.copy_string() + self.escape_string() + self.escape_string_twice() + self.skip_one() + self.skip_key_value() + self.mismatch_error() + self.type_error() + self.field_error() + self.range_error() + self.stack_error() + self.base64_error() + self.parsing_error() +} + +/** Assembler Stages **/ + +var _OpFuncTab = [256]func(*_Assembler, *_Instr) { + _OP_any : (*_Assembler)._asm_OP_any, + _OP_dyn : (*_Assembler)._asm_OP_dyn, + _OP_str : (*_Assembler)._asm_OP_str, + _OP_bin : (*_Assembler)._asm_OP_bin, + _OP_bool : (*_Assembler)._asm_OP_bool, + _OP_num : (*_Assembler)._asm_OP_num, + _OP_i8 : (*_Assembler)._asm_OP_i8, + _OP_i16 : (*_Assembler)._asm_OP_i16, + _OP_i32 : (*_Assembler)._asm_OP_i32, + _OP_i64 : (*_Assembler)._asm_OP_i64, + _OP_u8 : (*_Assembler)._asm_OP_u8, + _OP_u16 : (*_Assembler)._asm_OP_u16, + _OP_u32 : (*_Assembler)._asm_OP_u32, + _OP_u64 : (*_Assembler)._asm_OP_u64, + _OP_f32 : (*_Assembler)._asm_OP_f32, + _OP_f64 : (*_Assembler)._asm_OP_f64, + _OP_unquote : (*_Assembler)._asm_OP_unquote, + _OP_nil_1 : (*_Assembler)._asm_OP_nil_1, + _OP_nil_2 : (*_Assembler)._asm_OP_nil_2, + _OP_nil_3 : (*_Assembler)._asm_OP_nil_3, + _OP_deref : (*_Assembler)._asm_OP_deref, + _OP_index : (*_Assembler)._asm_OP_index, + _OP_is_null : (*_Assembler)._asm_OP_is_null, + _OP_is_null_quote : (*_Assembler)._asm_OP_is_null_quote, + _OP_map_init : (*_Assembler)._asm_OP_map_init, + _OP_map_key_i8 : (*_Assembler)._asm_OP_map_key_i8, + _OP_map_key_i16 : (*_Assembler)._asm_OP_map_key_i16, + _OP_map_key_i32 : (*_Assembler)._asm_OP_map_key_i32, + _OP_map_key_i64 : (*_Assembler)._asm_OP_map_key_i64, + _OP_map_key_u8 : (*_Assembler)._asm_OP_map_key_u8, + _OP_map_key_u16 : (*_Assembler)._asm_OP_map_key_u16, + _OP_map_key_u32 : (*_Assembler)._asm_OP_map_key_u32, + _OP_map_key_u64 : (*_Assembler)._asm_OP_map_key_u64, + _OP_map_key_f32 : (*_Assembler)._asm_OP_map_key_f32, + _OP_map_key_f64 : (*_Assembler)._asm_OP_map_key_f64, + _OP_map_key_str : (*_Assembler)._asm_OP_map_key_str, + _OP_map_key_utext : (*_Assembler)._asm_OP_map_key_utext, + _OP_map_key_utext_p : (*_Assembler)._asm_OP_map_key_utext_p, + _OP_array_skip : (*_Assembler)._asm_OP_array_skip, + _OP_array_clear : (*_Assembler)._asm_OP_array_clear, + _OP_array_clear_p : (*_Assembler)._asm_OP_array_clear_p, + _OP_slice_init : (*_Assembler)._asm_OP_slice_init, + _OP_slice_append : (*_Assembler)._asm_OP_slice_append, + _OP_object_skip : (*_Assembler)._asm_OP_object_skip, + _OP_object_next : (*_Assembler)._asm_OP_object_next, + _OP_struct_field : (*_Assembler)._asm_OP_struct_field, + _OP_unmarshal : (*_Assembler)._asm_OP_unmarshal, + _OP_unmarshal_p : (*_Assembler)._asm_OP_unmarshal_p, + _OP_unmarshal_text : (*_Assembler)._asm_OP_unmarshal_text, + _OP_unmarshal_text_p : (*_Assembler)._asm_OP_unmarshal_text_p, + _OP_lspace : (*_Assembler)._asm_OP_lspace, + _OP_match_char : (*_Assembler)._asm_OP_match_char, + _OP_check_char : (*_Assembler)._asm_OP_check_char, + _OP_load : (*_Assembler)._asm_OP_load, + _OP_save : (*_Assembler)._asm_OP_save, + _OP_drop : (*_Assembler)._asm_OP_drop, + _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, + _OP_recurse : (*_Assembler)._asm_OP_recurse, + _OP_goto : (*_Assembler)._asm_OP_goto, + _OP_switch : (*_Assembler)._asm_OP_switch, + _OP_check_char_0 : (*_Assembler)._asm_OP_check_char_0, + _OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err, + _OP_go_skip : (*_Assembler)._asm_OP_go_skip, + _OP_add : (*_Assembler)._asm_OP_add, +} + +func (self *_Assembler) instr(v *_Instr) { + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } +} + +func (self *_Assembler) instrs() { + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } +} + +func (self *_Assembler) epilogue() { + self.Mark(len(self.p)) + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error + self.Link(_LB_error) // _error: + self.Emit("MOVQ", _IC, _RET_rc) // MOVQ IC, rc<>+40(FP) + self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+48(FP) + self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+56(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET +} + +func (self *_Assembler) prologue() { + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.Emit("MOVQ", _ARG_sp, _IP) // MOVQ s.p<>+0(FP), IP + self.Emit("MOVQ", _ARG_sl, _IL) // MOVQ s.l<>+8(FP), IL + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("MOVQ", _ARG_vp, _VP) // MOVQ vp<>+24(FP), VP + self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ vp<>+32(FP), ST + // initialize digital buffer first + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, _VAR_et) // MOVQ AX, ss.Dp +} + +/** Function Calling Helpers **/ + +var _REG_go = []obj.Addr { + _ST, + _VP, + _IP, + _IL, + _IC, +} + +func (self *_Assembler) save(r ...obj.Addr) { + for i, v := range r { + if i > _FP_saves / 8 - 1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) + } + } +} + +func (self *_Assembler) load(r ...obj.Addr) { + for i, v := range r { + if i > _FP_saves / 8 - 1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) + } + } +} + +func (self *_Assembler) call(fn obj.Addr) { + self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX + self.Rjmp("CALL", _AX) // CALL AX +} + +func (self *_Assembler) call_go(fn obj.Addr) { + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) // CALL ${fn} + self.load(_REG_go...) // LOAD $REG_go +} + +func (self *_Assembler) call_sf(fn obj.Addr) { + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX + self.Emit("MOVQ", _ARG_fv, _CX) + self.call(fn) // CALL ${fn} + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC +} + +func (self *_Assembler) call_vf(fn obj.Addr) { + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX + self.call(fn) // CALL ${fn} + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC +} + +/** Assembler Error Handlers **/ + +var ( + _F_convT64 = jit.Func(convT64) + _F_error_wrap = jit.Func(error_wrap) + _F_error_type = jit.Func(error_type) + _F_error_field = jit.Func(error_field) + _F_error_value = jit.Func(error_value) + _F_error_mismatch = jit.Func(error_mismatch) +) + +var ( + _I_int8 , _T_int8 = rtype(reflect.TypeOf(int8(0))) + _I_int16 , _T_int16 = rtype(reflect.TypeOf(int16(0))) + _I_int32 , _T_int32 = rtype(reflect.TypeOf(int32(0))) + _I_uint8 , _T_uint8 = rtype(reflect.TypeOf(uint8(0))) + _I_uint16 , _T_uint16 = rtype(reflect.TypeOf(uint16(0))) + _I_uint32 , _T_uint32 = rtype(reflect.TypeOf(uint32(0))) + _I_float32 , _T_float32 = rtype(reflect.TypeOf(float32(0))) +) + +var ( + _T_error = rt.UnpackType(errorType) + _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError) +) + +var ( + _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow)))) + _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError))) +) + +func (self *_Assembler) type_error() { + self.Link(_LB_type_error) // _type_error: + self.Emit("MOVQ", _ET, jit.Ptr(_SP, 0)) // MOVQ ET, (SP) + self.call_go(_F_error_type) // CALL_GO error_type + self.Emit("MOVQ", jit.Ptr(_SP, 8), _ET) // MOVQ 8(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 16), _EP) // MOVQ 16(SP), EP + self.Sjmp("JMP" , _LB_error) // JMP _error +} + + +func (self *_Assembler) mismatch_error() { + self.Link(_LB_mismatch_error) // _type_error: + self.Emit("MOVQ", _ARG_sp, _AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _ARG_sl, _CX) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ", _VAR_ic, _AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ", _VAR_et, _CX) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) + self.call_go(_F_error_mismatch) // CALL_GO error_type + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) _asm_OP_dismatch_err(p *_Instr) { + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", jit.Type(p.vt()), _ET) + self.Emit("MOVQ", _ET, _VAR_et) +} + +func (self *_Assembler) _asm_OP_go_skip(p *_Instr) { + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(p.vi(), 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) // JMP _skip_one +} + +func (self *_Assembler) skip_one() { + self.Link(_LB_skip_one) // _skip: + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 + self.Rjmp("JMP" , _R9) // JMP (R9) +} + + +func (self *_Assembler) skip_key_value() { + self.Link(_LB_skip_key_value) // _skip: + // skip the key + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + // match char ':' + self.lspace("_global_1") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':')) + self.Sjmp("JNE" , _LB_parsing_error_v) // JNE _parse_error_v + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.lspace("_global_2") + // skip the value + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + // jump back to specified address + self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 + self.Rjmp("JMP" , _R9) // JMP (R9) +} + +func (self *_Assembler) field_error() { + self.Link(_LB_field_error) // _field_error: + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) + self.call_go(_F_error_field) // CALL_GO error_field + self.Emit("MOVQ" , jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) range_error() { + self.Link(_LB_range_error) // _range_error: + self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0 + self.Emit("MOVQ", _DI, jit.Ptr(_SP, 0)) // MOVQ DI, (SP) + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP) + self.Emit("MOVQ", _ET, jit.Ptr(_SP, 16)) // MOVQ ET, 16(SP) + self.Emit("MOVQ", _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP) + self.call_go(_F_error_value) // CALL_GO error_value + self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) stack_error() { + self.Link(_LB_stack_error) // _stack_error: + self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) base64_error() { + self.Link(_LB_base64_error) + self.Emit("NEGQ", _AX) // NEGQ AX + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.call_go(_F_convT64) // CALL_GO convT64 + self.Emit("MOVQ", jit.Ptr(_SP, 8), _EP) // MOVQ 8(SP), EP + self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) parsing_error() { + self.Link(_LB_eof_error) // _eof_error: + self.Emit("MOVQ" , _IL, _IC) // MOVQ IL, IC + self.Emit("MOVL" , jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP + self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_unquote_error) // _unquote_error: + self.Emit("SUBQ" , _VAR_sr, _SI) // SUBQ sr, SI + self.Emit("SUBQ" , _SI, _IC) // SUBQ IL, IC + self.Link(_LB_parsing_error_v) // _parsing_error_v: + self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP + self.Emit("NEGQ" , _EP) // NEGQ EP + self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_char_m3_error) // _char_m3_error: + self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC + self.Link(_LB_char_m2_error) // _char_m2_error: + self.Emit("SUBQ" , jit.Imm(2), _IC) // SUBQ $2, IC + self.Sjmp("JMP" , _LB_char_0_error) // JMP _char_0_error + self.Link(_LB_im_error) // _im_error: + self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC) + self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error + self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC) + self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error + self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC) + self.Sjmp("JNE" , _LB_char_2_error) // JNE _char_2_error + self.Sjmp("JMP" , _LB_char_3_error) // JNE _char_3_error + self.Link(_LB_char_4_error) // _char_4_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_3_error) // _char_3_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_2_error) // _char_2_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_1_error) // _char_1_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_0_error) // _char_0_error: + self.Emit("MOVL" , jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP + self.Link(_LB_parsing_error) // _parsing_error: + self.Emit("MOVOU", _ARG_s, _X0) // MOVOU s, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) + self.Emit("MOVQ" , _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP) + self.Emit("MOVQ" , _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP) + self.call_go(_F_error_wrap) // CALL_GO error_wrap + self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +/** Memory Management Routines **/ + +var ( + _T_byte = jit.Type(byteType) + _F_mallocgc = jit.Func(mallocgc) +) + +func (self *_Assembler) malloc(nb obj.Addr, ret obj.Addr) { + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _T_byte, _CX) // MOVQ ${type(byte)}, CX + self.Emit("MOVQ", nb, jit.Ptr(_SP, 0)) // MOVQ ${nb}, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret} +} + +func (self *_Assembler) valloc(vt reflect.Type, ret obj.Addr) { + self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ ${vt}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("MOVB", jit.Imm(1), jit.Ptr(_SP, 16)) // MOVB $1, 16(SP) + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret} +} + +func (self *_Assembler) vfollow(vt reflect.Type) { + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} + self.valloc(vt, _AX) // VALLOC ${vt}, AX + self.WritePtrAX(1, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP +} + +/** Value Parsing Routines **/ + +var ( + _F_vstring = jit.Imm(int64(native.S_vstring)) + _F_vnumber = jit.Imm(int64(native.S_vnumber)) + _F_vsigned = jit.Imm(int64(native.S_vsigned)) + _F_vunsigned = jit.Imm(int64(native.S_vunsigned)) +) + +func (self *_Assembler) check_err(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ" , _VAR_st_Vt, _AX) // MOVQ st.Vt, AX + self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING} + // try to skip the value + if vt != nil { + self.Sjmp("JNS" , "_check_err_{n}") // JNE _parsing_error_v + self.Emit("MOVQ", jit.Type(vt), _ET) + self.Emit("MOVQ", _ET, _VAR_et) + if pin2 != -1 { + self.Emit("SUBQ", jit.Imm(1), _BP) + self.Emit("MOVQ", _BP, _VAR_ic) + self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(pin2, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_key_value) + } else { + self.Emit("MOVQ", _BP, _VAR_ic) + self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref(pin, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) + } + self.Link("_check_err_{n}") + } else { + self.Sjmp("JS" , _LB_parsing_error_v) // JNE _parsing_error_v + } +} + +func (self *_Assembler) check_eof(d int64) { + if d == 1 { + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + } else { + self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , _LB_eof_error) // JA _eof_error + } +} + +func (self *_Assembler) parse_string() { // parse_string has a validate flag params in the last + self.Emit("MOVQ", _ARG_fv, _CX) + self.call_vf(_F_vstring) + self.check_err(nil, "", -1) +} + +func (self *_Assembler) parse_number(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ", _IC, _BP) + self.call_vf(_F_vnumber) // call vnumber + self.check_err(vt, pin, pin2) +} + +func (self *_Assembler) parse_signed(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ", _IC, _BP) + self.call_vf(_F_vsigned) + self.check_err(vt, pin, pin2) +} + +func (self *_Assembler) parse_unsigned(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ", _IC, _BP) + self.call_vf(_F_vunsigned) + self.check_err(vt, pin, pin2) +} + +// Pointer: DI, Size: SI, Return: R9 +func (self *_Assembler) copy_string() { + self.Link("_copy_string") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc(_SI, _AX) + self.Emit("MOVQ", _AX, _VAR_sv_p) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) + self.Emit("MOVQ", _VAR_bs_p, _DI) + self.Emit("MOVQ", _DI, jit.Ptr(_SP, 8)) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 16)) + self.call_go(_F_memmove) + self.Emit("MOVQ", _VAR_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) +} + +// Pointer: DI, Size: SI, Return: R9 +func (self *_Assembler) escape_string() { + self.Link("_escape_string") + self.Emit("MOVQ" , _DI, _VAR_bs_p) + self.Emit("MOVQ" , _SI, _VAR_bs_n) + self.Emit("MOVQ" , _R9, _VAR_bs_LR) + self.malloc(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ" , _DX, _VAR_sv_p) + self.Emit("MOVQ" , _VAR_bs_p, _DI) + self.Emit("MOVQ" , _VAR_bs_n, _SI) + self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("XORL" , _R8, _R8) // XORL R8, R8 + self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + self.call(_F_unquote) // CALL unquote + self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ" , jit.Imm(1), _SI) // ADDQ $1, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ" , _AX, _SI) + self.Emit("MOVQ" , _VAR_sv_p, _DI) + self.Emit("MOVQ" , _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) +} + +func (self *_Assembler) escape_string_twice() { + self.Link("_escape_string_twice") + self.Emit("MOVQ" , _DI, _VAR_bs_p) + self.Emit("MOVQ" , _SI, _VAR_bs_n) + self.Emit("MOVQ" , _R9, _VAR_bs_LR) + self.malloc(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ" , _DX, _VAR_sv_p) + self.Emit("MOVQ" , _VAR_bs_p, _DI) + self.Emit("MOVQ" , _VAR_bs_n, _SI) + self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("MOVL" , jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("SETCC", _AX) // SETCC AX + self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX + self.Emit("ORQ" , _AX, _R8) // ORQ AX, R8 + self.call(_F_unquote) // CALL unquote + self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ" , jit.Imm(3), _SI) // ADDQ $3, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ" , _AX, _SI) + self.Emit("MOVQ" , _VAR_sv_p, _DI) + self.Emit("MOVQ" , _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) +} + +/** Range Checking Routines **/ + +var ( + _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32)))) + _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32)))) +) + +var ( + _Vp_max_f32 = new(float64) + _Vp_min_f32 = new(float64) +) + +func init() { + *_Vp_max_f32 = math.MaxFloat32 + *_Vp_min_f32 = -math.MaxFloat32 +} + +func (self *_Assembler) range_single() { + self.Emit("MOVSD" , _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVQ" , _V_max_f32, _AX) // MOVQ _max_f32, AX + self.Emit("MOVQ" , jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET + self.Emit("MOVQ" , jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP + self.Emit("UCOMISD" , jit.Ptr(_AX, 0), _X0) // UCOMISD (AX), X0 + self.Sjmp("JA" , _LB_range_error) // JA _range_error + self.Emit("MOVQ" , _V_min_f32, _AX) // MOVQ _min_f32, AX + self.Emit("MOVSD" , jit.Ptr(_AX, 0), _X1) // MOVSD (AX), X1 + self.Emit("UCOMISD" , _X0, _X1) // UCOMISD X0, X1 + self.Sjmp("JA" , _LB_range_error) // JA _range_error + self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0 +} + +func (self *_Assembler) range_signed(i *rt.GoItab, t *rt.GoType, a int64, b int64) { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("CMPQ", _AX, jit.Imm(a)) // CMPQ AX, ${a} + self.Sjmp("JL" , _LB_range_error) // JL _range_error + self.Emit("CMPQ", _AX, jit.Imm(b)) // CMPQ AX, ${B} + self.Sjmp("JG" , _LB_range_error) // JG _range_error +} + +func (self *_Assembler) range_unsigned(i *rt.GoItab, t *rt.GoType, v uint64) { + self.Emit("MOVQ" , _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ" , jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ" , jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_range_error) // JS _range_error + self.Emit("CMPQ" , _AX, jit.Imm(int64(v))) // CMPQ AX, ${a} + self.Sjmp("JA" , _LB_range_error) // JA _range_error +} + +/** String Manipulating Routines **/ + +var ( + _F_unquote = jit.Imm(int64(native.S_unquote)) +) + +func (self *_Assembler) slice_from(p obj.Addr, d int64) { + self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI + self.slice_from_r(_SI, d) // SLICE_R SI, ${d} +} + +func (self *_Assembler) slice_from_r(p obj.Addr, d int64) { + self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI + self.Emit("NEGQ", p) // NEGQ ${p} + self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI +} + +func (self *_Assembler) unquote_once(p obj.Addr, n obj.Addr, stack bool, copy bool) { + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP" , "_escape_string") + self.Link("_noescape_{n}") // _noescape_{n}: + if copy { + self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_once_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + } + self.Link("_unquote_once_write_{n}") + self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(10, _DI, p, false, false) + } +} + +func (self *_Assembler) unquote_twice(p obj.Addr, n obj.Addr, stack bool) { + self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE" , _LB_eof_error) // JE _eof_error + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\' + self.Sjmp("JNE" , _LB_char_m3_error) // JNE _char_m3_error + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"' + self.Sjmp("JNE" , _LB_char_m2_error) // JNE _char_m2_error + self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3 + self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX + self.Emit("ADDQ" , _VAR_st_Iv, _AX) // ADDQ st.Iv, AX + self.Emit("CMPQ" , _VAR_st_Ep, _AX) // CMPQ st.Ep, AX + self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP" , "_escape_string_twice") + self.Link("_noescape_{n}") // _noescape_{n}: + self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_twice_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_unquote_twice_write_{n}") + self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(12, _DI, p, false, false) + } +} + +/** Memory Clearing Routines **/ + +var ( + _F_memclrHasPointers = jit.Func(memclrHasPointers) + _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers) +) + +func (self *_Assembler) mem_clear_fn(ptrfree bool) { + if !ptrfree { + self.call_go(_F_memclrHasPointers) + } else { + self.call_go(_F_memclrNoHeapPointers) + } +} + +func (self *_Assembler) mem_clear_rem(size int64, ptrfree bool) { + self.Emit("MOVQ", jit.Imm(size), _CX) // MOVQ ${size}, CX + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX + self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX + self.Emit("ADDQ", _AX, _CX) // ADDQ AX, CX + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 0)) // MOVQ VP, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers +} + +/** Map Assigning Routines **/ + +var ( + _F_mapassign = jit.Func(mapassign) + _F_mapassign_fast32 = jit.Func(mapassign_fast32) + _F_mapassign_faststr = jit.Func(mapassign_faststr) + _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr) +) + +var ( + _F_decodeJsonUnmarshaler obj.Addr + _F_decodeTextUnmarshaler obj.Addr +) + +func init() { + _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler) + _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler) +} + +func (self *_Assembler) mapaccess_ptr(t reflect.Type) { + if rt.MapType(rt.UnpackType(t)).IndirectElem() { + self.vfollow(t.Elem()) + } +} + +func (self *_Assembler) mapassign_std(t reflect.Type, v obj.Addr) { + self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX + self.mapassign_call(t, _F_mapassign) // MAPASSIGN ${t}, mapassign +} + +func (self *_Assembler) mapassign_str_fast(t reflect.Type, p obj.Addr, n obj.Addr) { + self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) + self.Emit("MOVQ", p, jit.Ptr(_SP, 16)) // MOVQ ${p}, 16(SP) + self.Emit("MOVQ", n, jit.Ptr(_SP, 24)) // MOVQ ${n}, 24(SP) + self.call_go(_F_mapassign_faststr) // CALL_GO ${fn} + self.Emit("MOVQ", jit.Ptr(_SP, 32), _VP) // MOVQ 32(SP), VP + self.mapaccess_ptr(t) +} + +func (self *_Assembler) mapassign_call(t reflect.Type, fn obj.Addr) { + self.Emit("MOVQ", jit.Type(t), _SI) // MOVQ ${t}, SI + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP) + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.call_go(fn) // CALL_GO ${fn} + self.Emit("MOVQ", jit.Ptr(_SP, 24), _VP) // MOVQ 24(SP), VP +} + +func (self *_Assembler) mapassign_fastx(t reflect.Type, fn obj.Addr) { + self.mapassign_call(t, fn) + self.mapaccess_ptr(t) +} + +func (self *_Assembler) mapassign_utext(t reflect.Type, addressable bool) { + pv := false + vk := t.Key() + tk := t.Key() + + /* deref pointer if needed */ + if vk.Kind() == reflect.Ptr { + pv = true + vk = vk.Elem() + } + + /* addressable value with pointer receiver */ + if addressable { + pv = false + tk = reflect.PtrTo(tk) + } + + /* allocate the key, and call the unmarshaler */ + self.valloc(vk, _DI) // VALLOC ${vk}, DI + // must spill vk pointer since next call_go may invoke GC + self.Emit("MOVQ" , _DI, _VAR_vk) + self.Emit("MOVQ" , jit.Type(tk), _AX) // MOVQ ${tk}, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ" , _DI, jit.Ptr(_SP, 8)) // MOVQ DI, 8(SP) + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP) + self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler + self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.Emit("MOVQ" , _VAR_vk, _AX) + + /* select the correct assignment function */ + if !pv { + self.mapassign_call(t, _F_mapassign) + } else { + self.mapassign_fastx(t, _F_mapassign_fast64ptr) + } +} + +/** External Unmarshaler Routines **/ + +var ( + _F_skip_one = jit.Imm(int64(native.S_skip_one)) + _F_skip_number = jit.Imm(int64(native.S_skip_number)) +) + +func (self *_Assembler) unmarshal_json(t reflect.Type, deref bool) { + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.slice_from_r(_AX, 0) // SLICE_R AX, $0 + self.Emit("MOVQ" , _DI, _VAR_sv_p) // MOVQ DI, sv.p + self.Emit("MOVQ" , _SI, _VAR_sv_n) // MOVQ SI, sv.n + self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref} +} + +func (self *_Assembler) unmarshal_text(t reflect.Type, deref bool) { + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref} +} + +func (self *_Assembler) unmarshal_func(t reflect.Type, fn obj.Addr, deref bool) { + pt := t + vk := t.Kind() + + /* allocate the field if needed */ + if deref && vk == reflect.Ptr { + self.Emit("MOVQ" , _VP, _AX) // MOVQ VP, AX + self.Emit("MOVQ" , jit.Ptr(_AX, 0), _AX) // MOVQ (AX), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_deref_{n}") // JNZ _deref_{n} + self.valloc(t.Elem(), _AX) // VALLOC ${t.Elem()}, AX + self.WritePtrAX(3, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_deref_{n}") // _deref_{n}: + } + + /* set value type */ + self.Emit("MOVQ", jit.Type(pt), _CX) // MOVQ ${pt}, CX + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 0)) // MOVQ CX, (SP) + + /* set value pointer */ + if deref && vk == reflect.Ptr { + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + } else { + self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP) + } + + /* set the source string and call the unmarshaler */ + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP) + self.call_go(fn) // CALL_GO ${fn} + self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +/** Dynamic Decoding Routine **/ + +var ( + _F_decodeTypedPointer obj.Addr +) + +func init() { + _F_decodeTypedPointer = jit.Func(decodeTypedPointer) +} + +func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) { + self.Emit("MOVQ" , _ARG_fv, _CX) // MOVQ fv, CX + self.Emit("MOVOU", _ARG_sp, _X0) // MOVOU sp, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP) + self.Emit("MOVQ" , _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP) + self.Emit("MOVQ" , vt, jit.Ptr(_SP, 24)) // MOVQ ${vt}, 24(SP) + self.Emit("MOVQ" , vp, jit.Ptr(_SP, 32)) // MOVQ ${vp}, 32(SP) + self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 40)) // MOVQ ST, 40(SP) + self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 48)) // MOVQ CX, 48(SP) + self.call_go(_F_decodeTypedPointer) // CALL_GO decodeTypedPointer + self.Emit("MOVQ" , jit.Ptr(_SP, 64), _ET) // MOVQ 64(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 72), _EP) // MOVQ 72(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.Emit("MOVQ" , jit.Ptr(_SP, 56), _IC) // MOVQ 56(SP), IC +} + +/** OpCode Assembler Functions **/ + +var ( + _F_memequal = jit.Func(memequal) + _F_memmove = jit.Func(memmove) + _F_growslice = jit.Func(growslice) + _F_makeslice = jit.Func(makeslice) + _F_makemap_small = jit.Func(makemap_small) + _F_mapassign_fast64 = jit.Func(mapassign_fast64) +) + +var ( + _F_lspace = jit.Imm(int64(native.S_lspace)) + _F_strhash = jit.Imm(int64(caching.S_strhash)) +) + +var ( + _F_b64decode = jit.Imm(int64(_subr__b64decode)) + _F_decodeValue = jit.Imm(int64(_subr_decode_value)) +) + +var ( + _F_skip_array = jit.Imm(int64(native.S_skip_array)) + _F_skip_object = jit.Imm(int64(native.S_skip_object)) +) + +var ( + _F_FieldMap_GetCaseInsensitive obj.Addr +) + +const ( + _MODE_AVX2 = 1 << 2 +) + +const ( + _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID)) + _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name)) + _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash)) +) + +const ( + _Vk_Ptr = int64(reflect.Ptr) + _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags)) +) + +func init() { + _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive) +} + +func (self *_Assembler) _asm_OP_any(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX + self.Emit("TESTQ" , _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ" , "_decode_{n}") // JZ _decode_{n} + self.Emit("CMPQ" , _CX, _VP) // CMPQ CX, VP + self.Sjmp("JE" , "_decode_{n}") // JE _decode_{n} + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX + self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE" , "_decode_{n}") // JNE _decode_{n} + self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_AX, _DI) // DECODE AX, DI + self.Sjmp("JMP" , "_decode_end_{n}") // JMP _decode_end_{n} + self.Link("_decode_{n}") // _decode_{n}: + self.Emit("MOVQ" , _ARG_fv, _DF) // MOVQ fv, DF + self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) + self.call(_F_decodeValue) // CALL decodeValue + self.Emit("TESTQ" , _EP, _EP) // TESTQ EP, EP + self.Sjmp("JNZ" , _LB_parsing_error) // JNZ _parsing_error + self.Link("_decode_end_{n}") // _decode_end_{n}: +} + +func (self *_Assembler) _asm_OP_dyn(p *_Instr) { + self.Emit("MOVQ" , jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET + self.Emit("CMPQ" , jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0 + self.Sjmp("JE" , _LB_type_error) // JE _type_error + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("MOVQ" , jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX + self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX + self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE" , _LB_type_error) // JNE _type_error + self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_AX, _DI) // DECODE AX, DI + self.Link("_decode_end_{n}") // _decode_end_{n}: +} + +func (self *_Assembler) _asm_OP_str(_ *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP) +} + +func (self *_Assembler) _asm_OP_bin(_ *_Instr) { + self.parse_string() // PARSE STRING + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("MOVQ" , _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP) + self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.Emit("SHRQ" , jit.Imm(2), _SI) // SHRQ $2, SI + self.Emit("LEAQ" , jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI + self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) + self.malloc(_SI, _SI) // MALLOC SI, SI + + // TODO: due to base64x's bug, only use AVX mode now + self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX + + /* call the decoder */ + self.Emit("XORL" , _DX, _DX) // XORL DX, DX + self.Emit("MOVQ" , _VP, _DI) // MOVQ VP, DI + + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _R9) // MOVQ SI, (VP) + self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP) + self.Emit("MOVQ" , _R9, _SI) + + self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP) + self.call(_F_b64decode) // CALL b64decode + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_base64_error) // JS _base64_error + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) +} + +func (self *_Assembler) _asm_OP_bool(_ *_Instr) { + self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , _LB_eof_error) // JA _eof_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f' + self.Sjmp("JE" , "_false_{n}") // JE _false_{n} + self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JE" , "_bool_true_{n}") + + // try to skip the value + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", _T_bool, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) + + self.Link("_bool_true_{n}") + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP) + self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , _LB_eof_error) // JA _eof_error + self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JNE" , _LB_im_error) // JNE _im_error + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link("_end_{n}") // _end_{n}: +} + +func (self *_Assembler) _asm_OP_num(_ *_Instr) { + self.Emit("MOVQ", jit.Imm(0), _VAR_fl) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Emit("MOVQ", _IC, _BP) + self.Sjmp("JNE", "_skip_number_{n}") + self.Emit("MOVQ", jit.Imm(1), _VAR_fl) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_skip_number_{n}") + + /* call skip_number */ + self.call_sf(_F_skip_number) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS" , "_num_next_{n}") + + /* call skip one */ + self.Emit("MOVQ", _BP, _VAR_ic) + self.Emit("MOVQ", _T_number, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) + self.Sref("_num_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) + + /* assgin string */ + self.Link("_num_next_{n}") + self.slice_from_r(_AX, 0) + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_num_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_num_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_num_write_{n}") + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false) + + /* check if quoted */ + self.Emit("CMPQ", _VAR_fl, jit.Imm(1)) + self.Sjmp("JNE", "_num_end_{n}") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Sjmp("JNE", _LB_char_0_error) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_num_end_{n}") +} + +func (self *_Assembler) _asm_OP_i8(ins *_Instr) { + var pin = "_i8_end_{n}" + self.parse_signed(int8Type, pin, -1) // PARSE int8 + self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_i16(ins *_Instr) { + var pin = "_i16_end_{n}" + self.parse_signed(int16Type, pin, -1) // PARSE int16 + self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_i32(ins *_Instr) { + var pin = "_i32_end_{n}" + self.parse_signed(int32Type, pin, -1) // PARSE int32 + self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_i64(ins *_Instr) { + var pin = "_i64_end_{n}" + self.parse_signed(int64Type, pin, -1) // PARSE int64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u8(ins *_Instr) { + var pin = "_u8_end_{n}" + self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8 + self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u16(ins *_Instr) { + var pin = "_u16_end_{n}" + self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16 + self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u32(ins *_Instr) { + var pin = "_u32_end_{n}" + self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32 + self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u64(ins *_Instr) { + var pin = "_u64_end_{n}" + self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_f32(ins *_Instr) { + var pin = "_f32_end_{n}" + self.parse_number(float32Type, pin, -1) // PARSE NUMBER + self.range_single() // RANGE float32 + self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_f64(ins *_Instr) { + var pin = "_f64_end_{n}" + self.parse_number(float64Type, pin, -1) // PARSE NUMBER + self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_unquote(ins *_Instr) { + self.check_eof(2) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\' + self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"' + self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error + self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC + self.parse_string() // PARSE STRING + self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP) +} + +func (self *_Assembler) _asm_OP_nil_1(_ *_Instr) { + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) +} + +func (self *_Assembler) _asm_OP_nil_2(_ *_Instr) { + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) +} + +func (self *_Assembler) _asm_OP_nil_3(_ *_Instr) { + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 16)) // MOVOU X0, 16(VP) +} + +func (self *_Assembler) _asm_OP_deref(p *_Instr) { + self.vfollow(p.vt()) +} + +func (self *_Assembler) _asm_OP_index(p *_Instr) { + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX + self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP +} + +func (self *_Assembler) _asm_OP_is_null(p *_Instr) { + self.Emit("LEAQ" , jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , "_not_null_{n}") // JA _not_null_{n} + self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.Link("_not_null_{n}") // _not_null_{n}: +} + +func (self *_Assembler) _asm_OP_is_null_quote(p *_Instr) { + self.Emit("LEAQ" , jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , "_not_null_quote_{n}") // JA _not_null_quote_{n} + self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Sjmp("JNE" , "_not_null_quote_{n}") // JNE _not_null_quote_{n} + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"' + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.Link("_not_null_quote_{n}") // _not_null_quote_{n}: +} + +func (self *_Assembler) _asm_OP_map_init(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} + self.call_go(_F_makemap_small) // CALL_GO makemap_small + self.Emit("MOVQ" , jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX + self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP +} + +func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) { + self.parse_signed(int8Type, "", p.vi()) // PARSE int8 + self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv +} + +func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) { + self.parse_signed(int16Type, "", p.vi()) // PARSE int16 + self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv +} + +func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) { + self.parse_signed(int32Type, "", p.vi()) // PARSE int32 + self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv + } else { + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32 + } +} + +func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) { + self.parse_signed(int64Type, "", p.vi()) // PARSE int64 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64 + } +} + +func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) { + self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8 + self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv +} + +func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) { + self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16 + self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv +} + +func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) { + self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32 + self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv + } else { + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32 + } +} + +func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) { + self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64 + } +} + +func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) { + self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER + self.range_single() // RANGE float32 + self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv +} + +func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) { + self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv +} + +func (self *_Assembler) _asm_OP_map_key_str(p *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + if vt := p.vt(); !mapfast(vt) { + self.valloc(vt.Key(), _DI) + self.Emit("MOVOU", _VAR_sv, _X0) + self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0)) + self.mapassign_std(vt, jit.Ptr(_DI, 0)) + } else { + self.Emit("MOVQ", _VAR_sv_p, _DI) // MOVQ sv.p, DI + self.Emit("MOVQ", _VAR_sv_n, _SI) // MOVQ sv.n, SI + self.mapassign_str_fast(vt, _DI, _SI) // MAPASSIGN string, DI, SI + } +} + +func (self *_Assembler) _asm_OP_map_key_utext(p *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false +} + +func (self *_Assembler) _asm_OP_map_key_utext_p(p *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true +} + +func (self *_Assembler) _asm_OP_array_skip(_ *_Instr) { + self.call_sf(_F_skip_array) // CALL_SF skip_array + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v +} + +func (self *_Assembler) _asm_OP_array_clear(p *_Instr) { + self.mem_clear_rem(p.i64(), true) +} + +func (self *_Assembler) _asm_OP_array_clear_p(p *_Instr) { + self.mem_clear_rem(p.i64(), false) +} + +func (self *_Assembler) _asm_OP_slice_init(p *_Instr) { + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("MOVQ" , jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_done_{n}") // JNZ _done_{n} + self.Emit("MOVQ" , jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX + self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) + self.Emit("MOVQ" , jit.Type(p.vt()), _DX) // MOVQ ${p.vt()}, DX + self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP) + self.call_go(_F_makeslice) // CALL_GO makeslice + self.Emit("MOVQ" , jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX + self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_done_{n}") // _done_{n}: + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) +} + +func (self *_Assembler) _asm_OP_slice_append(p *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX + self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP) + self.Sjmp("JB" , "_index_{n}") // JB _index_{n} + self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", jit.Ptr(_VP, 0), _X0) // MOVOU (VP), X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.Emit("MOVQ" , jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP) + self.Emit("SHLQ" , jit.Imm(1), _AX) // SHLQ $1, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_go(_F_growslice) // CALL_GO growslice + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVQ 40(SP), DI + self.Emit("MOVQ" , jit.Ptr(_SP, 48), _AX) // MOVQ 48(SP), AX + self.Emit("MOVQ" , jit.Ptr(_SP, 56), _SI) // MOVQ 56(SP), SI + self.WriteRecNotAX(8, _DI, jit.Ptr(_VP, 0), true, true)// MOVQ DI, (VP) + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) + self.Link("_index_{n}") // _index_{n}: + self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP + self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX + self.From("MULQ" , _CX) // MULQ CX + self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP +} + +func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) { + self.call_sf(_F_skip_object) // CALL_SF skip_object + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v +} + +func (self *_Assembler) _asm_OP_object_next(_ *_Instr) { + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v +} + +func (self *_Assembler) _asm_OP_struct_field(p *_Instr) { + assert_eq(caching.FieldEntrySize, 32, "invalid field entry size") + self.Emit("MOVQ" , jit.Imm(-1), _AX) // MOVQ $-1, AX + self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, sr + self.parse_string() // PARSE STRING + self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n + self.Emit("LEAQ" , _VAR_sv, _AX) // LEAQ sv, AX + self.Emit("XORL" , _CX, _CX) // XORL CX, CX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.call_go(_F_strhash) // CALL_GO strhash + self.Emit("MOVQ" , jit.Ptr(_SP, 16), _AX) // MOVQ 16(SP), AX + self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ" , jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX + self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI + self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Link("_loop_{n}") // _loop_{n}: + self.Emit("XORL" , _DX, _DX) // XORL DX, DX + self.From("DIVQ" , _CX) // DIVQ CX + self.Emit("LEAQ" , jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX + self.Emit("SHLQ" , jit.Imm(5), _DX) // SHLQ $5, DX + self.Emit("LEAQ" , jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8 + self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8 + self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Emit("CMPQ" , _R8, _R9) // CMPQ R8, R9 + self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name + 8), _DX) // MOVQ FieldEntry.Name+8(DI), DX + self.Emit("CMPQ" , _DX, _VAR_sv_n) // CMPQ DX, sv.n + self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8 + self.Emit("MOVQ" , _AX, _VAR_ss_AX) // MOVQ AX, ss.AX + self.Emit("MOVQ" , _CX, _VAR_ss_CX) // MOVQ CX, ss.CX + self.Emit("MOVQ" , _SI, _VAR_ss_SI) // MOVQ SI, ss.SI + self.Emit("MOVQ" , _R8, _VAR_ss_R8) // MOVQ R8, ss.R8 + self.Emit("MOVQ" , _R9, _VAR_ss_R9) // MOVQ R9, ss.R9 + self.Emit("MOVQ" , _VAR_sv_p, _AX) // MOVQ _VAR_sv_p, AX + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 16)) // MOVQ DX, 16(SP) + self.call_go(_F_memequal) // CALL_GO memequal + self.Emit("MOVQ" , _VAR_ss_AX, _AX) // MOVQ ss.AX, AX + self.Emit("MOVQ" , _VAR_ss_CX, _CX) // MOVQ ss.CX, CX + self.Emit("MOVQ" , _VAR_ss_SI, _SI) // MOVQ ss.SI, SI + self.Emit("MOVQ" , _VAR_ss_R9, _R9) // MOVQ ss.R9, R9 + self.Emit("MOVB" , jit.Ptr(_SP, 24), _DX) // MOVB 24(SP), DX + self.Emit("TESTB", _DX, _DX) // TESTB DX, DX + self.Sjmp("JZ" , "_loop_{n}") // JZ _loop_{n} + self.Emit("MOVQ" , _VAR_ss_R8, _R8) // MOVQ ss.R8, R8 + self.Emit("MOVQ" , _R8, _VAR_sr) // MOVQ R8, sr + self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} + self.Link("_try_lowercase_{n}") // _try_lowercase_{n}: + self.Emit("MOVQ" , jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX + self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0 + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive + self.Emit("MOVQ" , jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX + self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, _VAR_sr + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS" , "_end_{n}") // JNS _end_{n} + self.Emit("BTQ" , jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv + self.Sjmp("JC" , _LB_field_error) // JC _field_error + self.Link("_end_{n}") // _end_{n}: +} + +func (self *_Assembler) _asm_OP_unmarshal(p *_Instr) { + self.unmarshal_json(p.vt(), true) +} + +func (self *_Assembler) _asm_OP_unmarshal_p(p *_Instr) { + self.unmarshal_json(p.vt(), false) +} + +func (self *_Assembler) _asm_OP_unmarshal_text(p *_Instr) { + self.unmarshal_text(p.vt(), true) +} + +func (self *_Assembler) _asm_OP_unmarshal_text_p(p *_Instr) { + self.unmarshal_text(p.vt(), false) +} + +func (self *_Assembler) _asm_OP_lspace(_ *_Instr) { + self.lspace("_{n}") +} + +func (self *_Assembler) lspace(subfix string) { + var label = "_lspace" + subfix + + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , label) // JA _nospace_{n} + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , label) // JNC _nospace_{n} + + /* test up to 4 characters */ + for i := 0; i < 3; i++ { + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , label) // JA _nospace_{n} + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , label) // JNC _nospace_{n} + } + + /* handle over to the native function */ + self.Emit("MOVQ" , _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ" , _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ" , _IC, _DX) // MOVQ IC, DX + self.call(_F_lspace) // CALL lspace + self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parsing_error_v + self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC + self.Link(label) // _nospace_{n}: +} + +func (self *_Assembler) _asm_OP_match_char(p *_Instr) { + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC +} + +func (self *_Assembler) _asm_OP_check_char(p *_Instr) { + self.check_eof(1) + self.Emit("LEAQ" , jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE" , p.vi()) // JE {p.vi()} +} + +func (self *_Assembler) _asm_OP_check_char_0(p *_Instr) { + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Xjmp("JE" , p.vi()) // JE {p.vi()} +} + +func (self *_Assembler) _asm_OP_add(p *_Instr) { + self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC +} + +func (self *_Assembler) _asm_OP_load(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP +} + +func (self *_Assembler) _asm_OP_save(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes} + self.Sjmp("JAE" , _LB_stack_error) // JA _stack_error + self.WriteRecNotAX(0 , _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX) + self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST) +} + +func (self *_Assembler) _asm_OP_drop(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("XORL", _ET, _ET) // XORL ET, ET + self.Emit("MOVQ", _ET, jit.Sib(_ST, _AX, 1, 8)) // MOVQ ET, 8(ST)(AX) +} + +func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ" , jit.Imm(16), _AX) // SUBQ $16, AX + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) +} + +func (self *_Assembler) _asm_OP_recurse(p *_Instr) { + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.decode_dynamic(_AX, _VP) // DECODE AX, VP +} + +func (self *_Assembler) _asm_OP_goto(p *_Instr) { + self.Xjmp("JMP", p.vi()) +} + +func (self *_Assembler) _asm_OP_switch(p *_Instr) { + self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX + self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())} + self.Sjmp("JAE" , "_default_{n}") // JAE _default_{n} + + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n} + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP" , _AX) // JMP AX + self.Link("_switch_table_{n}") // _switch_table_{n}: + + /* generate the jump table */ + for i, v := range p.vs() { + self.Xref(v, int64(-i) * 4) + } + + /* default case */ + self.Link("_default_{n}") + self.NOP() +} + +func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { + self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16))// MOVQ $(p2.op()), 16(SP) + self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP) + self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP) + self.call_go(_F_println) +} + +var _runtime_writeBarrier uintptr = rt.GcwbAddr() + +//go:linkname gcWriteBarrierAX runtime.gcWriteBarrier +func gcWriteBarrierAX() + +var ( + _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier)) + + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) +) + +func (self *_Assembler) WritePtrAX(i int, rec obj.Addr, saveDI bool) { + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} + +func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool, saveAX bool) { + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } else { + self.Emit("MOVQ", ptr, _AX) + } + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go new file mode 100644 index 0000000..8a70cff --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go @@ -0,0 +1,1922 @@ +//go:build go1.17 && !go1.21 +// +build go1.17,!go1.21 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding/json` + `fmt` + `math` + `reflect` + `strconv` + `unsafe` + + `github.com/bytedance/sonic/internal/caching` + `github.com/bytedance/sonic/internal/jit` + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` + `github.com/twitchyliquid64/golang-asm/obj` + `github.com/twitchyliquid64/golang-asm/obj/x86` +) + +/** Register Allocations + * + * State Registers: + * + * %r13 : stack base + * %r10 : input pointer + * %r12 : input length + * %r11 : input cursor + * %r15 : value pointer + * + * Error Registers: + * + * %rax : error type register + * %rbx : error pointer register + */ + +/** Function Prototype & Stack Map + * + * func (s string, ic int, vp unsafe.Pointer, sb *_Stack, fv uint64, sv string) (rc int, err error) + * + * s.buf : (FP) + * s.len : 8(FP) + * ic : 16(FP) + * vp : 24(FP) + * sb : 32(FP) + * fv : 40(FP) + * sv : 56(FP) + * err.vt : 72(FP) + * err.vp : 80(FP) + */ + +const ( + _FP_args = 72 // 72 bytes to pass and spill register arguements + _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions + _FP_saves = 48 // 48 bytes for saving the registers before CALL instructions + _FP_locals = 144 // 144 bytes for local variables +) + +const ( + _FP_offs = _FP_fargs + _FP_saves + _FP_locals + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address +) + +const ( + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f') +) + +const ( + _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n') +) + +const ( + _MODE_JSON = 1 << 3 // base64 mode +) + +const ( + _LB_error = "_error" + _LB_im_error = "_im_error" + _LB_eof_error = "_eof_error" + _LB_type_error = "_type_error" + _LB_field_error = "_field_error" + _LB_range_error = "_range_error" + _LB_stack_error = "_stack_error" + _LB_base64_error = "_base64_error" + _LB_unquote_error = "_unquote_error" + _LB_parsing_error = "_parsing_error" + _LB_parsing_error_v = "_parsing_error_v" + _LB_mismatch_error = "_mismatch_error" +) + +const ( + _LB_char_0_error = "_char_0_error" + _LB_char_1_error = "_char_1_error" + _LB_char_2_error = "_char_2_error" + _LB_char_3_error = "_char_3_error" + _LB_char_4_error = "_char_4_error" + _LB_char_m2_error = "_char_m2_error" + _LB_char_m3_error = "_char_m3_error" +) + +const ( + _LB_skip_one = "_skip_one" + _LB_skip_key_value = "_skip_key_value" +) + +var ( + _AX = jit.Reg("AX") + _BX = jit.Reg("BX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") + _R9 = jit.Reg("R9") + _X0 = jit.Reg("X0") + _X1 = jit.Reg("X1") +) + +var ( + _IP = jit.Reg("R10") // saved on BP when callc + _IC = jit.Reg("R11") // saved on BX when call_c + _IL = jit.Reg("R12") + _ST = jit.Reg("R13") + _VP = jit.Reg("R15") +) + +var ( + _DF = jit.Reg("AX") // reuse AX in generic decoder for flags + _ET = jit.Reg("AX") + _EP = jit.Reg("BX") +) + + + +var ( + _ARG_s = _ARG_sp + _ARG_sp = jit.Ptr(_SP, _FP_base + 0) + _ARG_sl = jit.Ptr(_SP, _FP_base + 8) + _ARG_ic = jit.Ptr(_SP, _FP_base + 16) + _ARG_vp = jit.Ptr(_SP, _FP_base + 24) + _ARG_sb = jit.Ptr(_SP, _FP_base + 32) + _ARG_fv = jit.Ptr(_SP, _FP_base + 40) +) + +var ( + _ARG_sv = _ARG_sv_p + _ARG_sv_p = jit.Ptr(_SP, _FP_base + 48) + _ARG_sv_n = jit.Ptr(_SP, _FP_base + 56) + _ARG_vk = jit.Ptr(_SP, _FP_base + 64) +) + +var ( + _VAR_st = _VAR_st_Vt + _VAR_sr = jit.Ptr(_SP, _FP_fargs + _FP_saves) +) + +var ( + _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs + _FP_saves + 0) + _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) + _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) + _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs + _FP_saves + 24) + _VAR_st_Db = jit.Ptr(_SP, _FP_fargs + _FP_saves + 32) + _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 40) +) + +var ( + _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 48) + _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 56) + _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs + _FP_saves + 64) + _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 72) + _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 80) +) + +var ( + _VAR_bs_p = jit.Ptr(_SP, _FP_fargs + _FP_saves + 88) + _VAR_bs_n = jit.Ptr(_SP, _FP_fargs + _FP_saves + 96) + _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs + _FP_saves + 104) +) + +var _VAR_fl = jit.Ptr(_SP, _FP_fargs + _FP_saves + 112) + +var ( + _VAR_et = jit.Ptr(_SP, _FP_fargs + _FP_saves + 120) // save dismatched type + _VAR_pc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 128) // save skip return pc + _VAR_ic = jit.Ptr(_SP, _FP_fargs + _FP_saves + 136) // save dismatched position +) + +type _Assembler struct { + jit.BaseAssembler + p _Program + name string +} + +func newAssembler(p _Program) *_Assembler { + return new(_Assembler).Init(p) +} + +/** Assembler Interface **/ + +func (self *_Assembler) Load() _Decoder { + return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) +} + +func (self *_Assembler) Init(p _Program) *_Assembler { + self.p = p + self.BaseAssembler.Init(self.compile) + return self +} + +func (self *_Assembler) compile() { + self.prologue() + self.instrs() + self.epilogue() + self.copy_string() + self.escape_string() + self.escape_string_twice() + self.skip_one() + self.skip_key_value() + self.type_error() + self.mismatch_error() + self.field_error() + self.range_error() + self.stack_error() + self.base64_error() + self.parsing_error() +} + +/** Assembler Stages **/ + +var _OpFuncTab = [256]func(*_Assembler, *_Instr) { + _OP_any : (*_Assembler)._asm_OP_any, + _OP_dyn : (*_Assembler)._asm_OP_dyn, + _OP_str : (*_Assembler)._asm_OP_str, + _OP_bin : (*_Assembler)._asm_OP_bin, + _OP_bool : (*_Assembler)._asm_OP_bool, + _OP_num : (*_Assembler)._asm_OP_num, + _OP_i8 : (*_Assembler)._asm_OP_i8, + _OP_i16 : (*_Assembler)._asm_OP_i16, + _OP_i32 : (*_Assembler)._asm_OP_i32, + _OP_i64 : (*_Assembler)._asm_OP_i64, + _OP_u8 : (*_Assembler)._asm_OP_u8, + _OP_u16 : (*_Assembler)._asm_OP_u16, + _OP_u32 : (*_Assembler)._asm_OP_u32, + _OP_u64 : (*_Assembler)._asm_OP_u64, + _OP_f32 : (*_Assembler)._asm_OP_f32, + _OP_f64 : (*_Assembler)._asm_OP_f64, + _OP_unquote : (*_Assembler)._asm_OP_unquote, + _OP_nil_1 : (*_Assembler)._asm_OP_nil_1, + _OP_nil_2 : (*_Assembler)._asm_OP_nil_2, + _OP_nil_3 : (*_Assembler)._asm_OP_nil_3, + _OP_deref : (*_Assembler)._asm_OP_deref, + _OP_index : (*_Assembler)._asm_OP_index, + _OP_is_null : (*_Assembler)._asm_OP_is_null, + _OP_is_null_quote : (*_Assembler)._asm_OP_is_null_quote, + _OP_map_init : (*_Assembler)._asm_OP_map_init, + _OP_map_key_i8 : (*_Assembler)._asm_OP_map_key_i8, + _OP_map_key_i16 : (*_Assembler)._asm_OP_map_key_i16, + _OP_map_key_i32 : (*_Assembler)._asm_OP_map_key_i32, + _OP_map_key_i64 : (*_Assembler)._asm_OP_map_key_i64, + _OP_map_key_u8 : (*_Assembler)._asm_OP_map_key_u8, + _OP_map_key_u16 : (*_Assembler)._asm_OP_map_key_u16, + _OP_map_key_u32 : (*_Assembler)._asm_OP_map_key_u32, + _OP_map_key_u64 : (*_Assembler)._asm_OP_map_key_u64, + _OP_map_key_f32 : (*_Assembler)._asm_OP_map_key_f32, + _OP_map_key_f64 : (*_Assembler)._asm_OP_map_key_f64, + _OP_map_key_str : (*_Assembler)._asm_OP_map_key_str, + _OP_map_key_utext : (*_Assembler)._asm_OP_map_key_utext, + _OP_map_key_utext_p : (*_Assembler)._asm_OP_map_key_utext_p, + _OP_array_skip : (*_Assembler)._asm_OP_array_skip, + _OP_array_clear : (*_Assembler)._asm_OP_array_clear, + _OP_array_clear_p : (*_Assembler)._asm_OP_array_clear_p, + _OP_slice_init : (*_Assembler)._asm_OP_slice_init, + _OP_slice_append : (*_Assembler)._asm_OP_slice_append, + _OP_object_skip : (*_Assembler)._asm_OP_object_skip, + _OP_object_next : (*_Assembler)._asm_OP_object_next, + _OP_struct_field : (*_Assembler)._asm_OP_struct_field, + _OP_unmarshal : (*_Assembler)._asm_OP_unmarshal, + _OP_unmarshal_p : (*_Assembler)._asm_OP_unmarshal_p, + _OP_unmarshal_text : (*_Assembler)._asm_OP_unmarshal_text, + _OP_unmarshal_text_p : (*_Assembler)._asm_OP_unmarshal_text_p, + _OP_lspace : (*_Assembler)._asm_OP_lspace, + _OP_match_char : (*_Assembler)._asm_OP_match_char, + _OP_check_char : (*_Assembler)._asm_OP_check_char, + _OP_load : (*_Assembler)._asm_OP_load, + _OP_save : (*_Assembler)._asm_OP_save, + _OP_drop : (*_Assembler)._asm_OP_drop, + _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, + _OP_recurse : (*_Assembler)._asm_OP_recurse, + _OP_goto : (*_Assembler)._asm_OP_goto, + _OP_switch : (*_Assembler)._asm_OP_switch, + _OP_check_char_0 : (*_Assembler)._asm_OP_check_char_0, + _OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err, + _OP_go_skip : (*_Assembler)._asm_OP_go_skip, + _OP_add : (*_Assembler)._asm_OP_add, + _OP_debug : (*_Assembler)._asm_OP_debug, +} + +func (self *_Assembler) _asm_OP_debug(_ *_Instr) { + self.Byte(0xcc) +} + +func (self *_Assembler) instr(v *_Instr) { + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } +} + +func (self *_Assembler) instrs() { + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } +} + +func (self *_Assembler) epilogue() { + self.Mark(len(self.p)) + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error + self.Link(_LB_error) // _error: + self.Emit("MOVQ", _EP, _CX) // MOVQ BX, CX + self.Emit("MOVQ", _ET, _BX) // MOVQ AX, BX + self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX + self.Emit("MOVQ", jit.Imm(0), _ARG_sp) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET +} + +func (self *_Assembler) prologue() { + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.Emit("MOVQ", _AX, _ARG_sp) // MOVQ AX, s.p<>+0(FP) + self.Emit("MOVQ", _AX, _IP) // MOVQ AX, IP + self.Emit("MOVQ", _BX, _ARG_sl) // MOVQ BX, s.l<>+8(FP) + self.Emit("MOVQ", _BX, _IL) // MOVQ BX, IL + self.Emit("MOVQ", _CX, _ARG_ic) // MOVQ CX, ic<>+16(FP) + self.Emit("MOVQ", _CX, _IC) // MOVQ CX, IC + self.Emit("MOVQ", _DI, _ARG_vp) // MOVQ DI, vp<>+24(FP) + self.Emit("MOVQ", _DI, _VP) // MOVQ DI, VP + self.Emit("MOVQ", _SI, _ARG_sb) // MOVQ SI, sb<>+32(FP) + self.Emit("MOVQ", _SI, _ST) // MOVQ SI, ST + self.Emit("MOVQ", _R8, _ARG_fv) // MOVQ R8, fv<>+40(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sv_n) // MOVQ $0, sv.n<>+56(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP) + self.Emit("MOVQ", jit.Imm(0), _VAR_et) // MOVQ $0, et<>+120(FP) + // initialize digital buffer first + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf +} + +/** Function Calling Helpers **/ + +var ( + _REG_go = []obj.Addr { _ST, _VP, _IP, _IL, _IC } + _REG_rt = []obj.Addr { _ST, _VP, _IP, _IL, _IC, _IL } +) + +func (self *_Assembler) save(r ...obj.Addr) { + for i, v := range r { + if i > _FP_saves / 8 - 1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) + } + } +} + +func (self *_Assembler) load(r ...obj.Addr) { + for i, v := range r { + if i > _FP_saves / 8 - 1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) + } + } +} + +func (self *_Assembler) call(fn obj.Addr) { + self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, R11 + self.Rjmp("CALL", _R9) // CALL R11 +} + +func (self *_Assembler) call_go(fn obj.Addr) { + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) + self.load(_REG_go...) // LOAD $REG_go +} + +func (self *_Assembler) callc(fn obj.Addr) { + self.Emit("XCHGQ", _IP, _BP) + self.call(fn) + self.Emit("XCHGQ", _IP, _BP) +} + +func (self *_Assembler) call_c(fn obj.Addr) { + self.Emit("XCHGQ", _IC, _BX) + self.callc(fn) + self.Emit("XCHGQ", _IC, _BX) +} + +func (self *_Assembler) call_sf(fn obj.Addr) { + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX + self.Emit("MOVQ", _ARG_fv, _CX) + self.callc(fn) + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC +} + +func (self *_Assembler) call_vf(fn obj.Addr) { + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX + self.callc(fn) + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC +} + +/** Assembler Error Handlers **/ + +var ( + _F_convT64 = jit.Func(convT64) + _F_error_wrap = jit.Func(error_wrap) + _F_error_type = jit.Func(error_type) + _F_error_field = jit.Func(error_field) + _F_error_value = jit.Func(error_value) + _F_error_mismatch = jit.Func(error_mismatch) +) + +var ( + _I_int8 , _T_int8 = rtype(reflect.TypeOf(int8(0))) + _I_int16 , _T_int16 = rtype(reflect.TypeOf(int16(0))) + _I_int32 , _T_int32 = rtype(reflect.TypeOf(int32(0))) + _I_uint8 , _T_uint8 = rtype(reflect.TypeOf(uint8(0))) + _I_uint16 , _T_uint16 = rtype(reflect.TypeOf(uint16(0))) + _I_uint32 , _T_uint32 = rtype(reflect.TypeOf(uint32(0))) + _I_float32 , _T_float32 = rtype(reflect.TypeOf(float32(0))) +) + +var ( + _T_error = rt.UnpackType(errorType) + _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError) +) + +var ( + _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow)))) + _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError))) +) + +func (self *_Assembler) type_error() { + self.Link(_LB_type_error) // _type_error: + self.call_go(_F_error_type) // CALL_GO error_type + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) mismatch_error() { + self.Link(_LB_mismatch_error) // _type_error: + self.Emit("MOVQ", _ARG_sp, _AX) + self.Emit("MOVQ", _ARG_sl, _BX) + self.Emit("MOVQ", _VAR_ic, _CX) + self.Emit("MOVQ", _VAR_et, _DI) + self.call_go(_F_error_mismatch) // CALL_GO error_type + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) field_error() { + self.Link(_LB_field_error) // _field_error: + self.Emit("MOVQ", _ARG_sv_p, _AX) // MOVQ sv.p, AX + self.Emit("MOVQ", _ARG_sv_n, _BX) // MOVQ sv.n, BX + self.call_go(_F_error_field) // CALL_GO error_field + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) range_error() { + self.Link(_LB_range_error) // _range_error: + self.Emit("MOVQ", _ET, _CX) // MOVQ ET, CX + self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0 + self.Emit("MOVQ", _DI, _AX) // MOVQ DI, AX + self.Emit("MOVQ", _EP, _DI) // MOVQ EP, DI + self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX + self.call_go(_F_error_value) // CALL_GO error_value + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) stack_error() { + self.Link(_LB_stack_error) // _stack_error: + self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) base64_error() { + self.Link(_LB_base64_error) + self.Emit("NEGQ", _AX) // NEGQ AX + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.call_go(_F_convT64) // CALL_GO convT64 + self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP + self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) parsing_error() { + self.Link(_LB_eof_error) // _eof_error: + self.Emit("MOVQ" , _IL, _IC) // MOVQ IL, IC + self.Emit("MOVL" , jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP + self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_unquote_error) // _unquote_error: + self.Emit("SUBQ" , _VAR_sr, _SI) // SUBQ sr, SI + self.Emit("SUBQ" , _SI, _IC) // SUBQ IL, IC + self.Link(_LB_parsing_error_v) // _parsing_error_v: + self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP + self.Emit("NEGQ" , _EP) // NEGQ EP + self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error + self.Link(_LB_char_m3_error) // _char_m3_error: + self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC + self.Link(_LB_char_m2_error) // _char_m2_error: + self.Emit("SUBQ" , jit.Imm(2), _IC) // SUBQ $2, IC + self.Sjmp("JMP" , _LB_char_0_error) // JMP _char_0_error + self.Link(_LB_im_error) // _im_error: + self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC) + self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error + self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC) + self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error + self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX + self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC) + self.Sjmp("JNE" , _LB_char_2_error) // JNE _char_2_error + self.Sjmp("JMP" , _LB_char_3_error) // JNE _char_3_error + self.Link(_LB_char_4_error) // _char_4_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_3_error) // _char_3_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_2_error) // _char_2_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_1_error) // _char_1_error: + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Link(_LB_char_0_error) // _char_0_error: + self.Emit("MOVL" , jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP + self.Link(_LB_parsing_error) // _parsing_error: + self.Emit("MOVQ" , _EP, _DI) // MOVQ EP, DI + self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sl, BX + self.Emit("MOVQ" , _IC, _CX) // MOVQ IC, CX + self.call_go(_F_error_wrap) // CALL_GO error_wrap + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) _asm_OP_dismatch_err(p *_Instr) { + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", jit.Type(p.vt()), _ET) + self.Emit("MOVQ", _ET, _VAR_et) +} + +func (self *_Assembler) _asm_OP_go_skip(p *_Instr) { + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(p.vi(), 4) + // self.Byte(0xcc) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) // JMP _skip_one +} + +func (self *_Assembler) skip_one() { + self.Link(_LB_skip_one) // _skip: + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 + // self.Byte(0xcc) + self.Rjmp("JMP" , _R9) // JMP (R9) +} + +func (self *_Assembler) skip_key_value() { + self.Link(_LB_skip_key_value) // _skip: + // skip the key + self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + // match char ':' + self.lspace("_global_1") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':')) + self.Sjmp("JNE" , _LB_parsing_error_v) // JNE _parse_error_v + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.lspace("_global_2") + // skip the value + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + // jump back to specified address + self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9 + self.Rjmp("JMP" , _R9) // JMP (R9) +} + + +/** Memory Management Routines **/ + +var ( + _T_byte = jit.Type(byteType) + _F_mallocgc = jit.Func(mallocgc) +) + +func (self *_Assembler) malloc_AX(nb obj.Addr, ret obj.Addr) { + self.Emit("MOVQ", nb, _AX) // MOVQ ${nb}, AX + self.Emit("MOVQ", _T_byte, _BX) // MOVQ ${type(byte)}, BX + self.Emit("XORL", _CX, _CX) // XORL CX, CX + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret} +} + +func (self *_Assembler) valloc(vt reflect.Type, ret obj.Addr) { + self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX + self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX + self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX + self.call_go(_F_mallocgc) // CALL_GO mallocgc + self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret} +} + +func (self *_Assembler) valloc_AX(vt reflect.Type) { + self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX + self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX + self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX + self.call_go(_F_mallocgc) // CALL_GO mallocgc +} + +func (self *_Assembler) vfollow(vt reflect.Type) { + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} + self.valloc_AX(vt) // VALLOC ${vt}, AX + self.WritePtrAX(1, jit.Ptr(_VP, 0), true) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP +} + +/** Value Parsing Routines **/ + +var ( + _F_vstring = jit.Imm(int64(native.S_vstring)) + _F_vnumber = jit.Imm(int64(native.S_vnumber)) + _F_vsigned = jit.Imm(int64(native.S_vsigned)) + _F_vunsigned = jit.Imm(int64(native.S_vunsigned)) +) + +func (self *_Assembler) check_err(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ" , _VAR_st_Vt, _AX) // MOVQ st.Vt, AX + self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING} + // try to skip the value + if vt != nil { + self.Sjmp("JNS" , "_check_err_{n}") // JNE _parsing_error_v + self.Emit("MOVQ", jit.Type(vt), _ET) + self.Emit("MOVQ", _ET, _VAR_et) + if pin2 != -1 { + self.Emit("SUBQ", jit.Imm(1), _BX) + self.Emit("MOVQ", _BX, _VAR_ic) + self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 + self.Xref(pin2, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_key_value) + } else { + self.Emit("MOVQ", _BX, _VAR_ic) + self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref(pin, 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) + } + self.Link("_check_err_{n}") + } else { + self.Sjmp("JS" , _LB_parsing_error_v) // JNE _parsing_error_v + } +} + +func (self *_Assembler) check_eof(d int64) { + if d == 1 { + self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + } else { + self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , _LB_eof_error) // JA _eof_error + } +} + + +func (self *_Assembler) parse_string() { + self.Emit("MOVQ", _ARG_fv, _CX) + self.call_vf(_F_vstring) + self.check_err(nil, "", -1) +} + +func (self *_Assembler) parse_number(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ", _IC, _BX) // save ic when call native func + self.call_vf(_F_vnumber) + self.check_err(vt, pin, pin2) +} + +func (self *_Assembler) parse_signed(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ", _IC, _BX) // save ic when call native func + self.call_vf(_F_vsigned) + self.check_err(vt, pin, pin2) +} + +func (self *_Assembler) parse_unsigned(vt reflect.Type, pin string, pin2 int) { + self.Emit("MOVQ", _IC, _BX) // save ic when call native func + self.call_vf(_F_vunsigned) + self.check_err(vt, pin, pin2) +} + +// Pointer: DI, Size: SI, Return: R9 +func (self *_Assembler) copy_string() { + self.Link("_copy_string") + self.Emit("MOVQ", _DI, _VAR_bs_p) + self.Emit("MOVQ", _SI, _VAR_bs_n) + self.Emit("MOVQ", _R9, _VAR_bs_LR) + self.malloc_AX(_SI, _ARG_sv_p) + self.Emit("MOVQ", _VAR_bs_p, _BX) + self.Emit("MOVQ", _VAR_bs_n, _CX) + self.call_go(_F_memmove) + self.Emit("MOVQ", _ARG_sv_p, _DI) + self.Emit("MOVQ", _VAR_bs_n, _SI) + self.Emit("MOVQ", _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) +} + +// Pointer: DI, Size: SI, Return: R9 +func (self *_Assembler) escape_string() { + self.Link("_escape_string") + self.Emit("MOVQ" , _DI, _VAR_bs_p) + self.Emit("MOVQ" , _SI, _VAR_bs_n) + self.Emit("MOVQ" , _R9, _VAR_bs_LR) + self.malloc_AX(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ" , _DX, _ARG_sv_p) + self.Emit("MOVQ" , _VAR_bs_p, _DI) + self.Emit("MOVQ" , _VAR_bs_n, _SI) + self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("XORL" , _R8, _R8) // XORL R8, R8 + self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + self.call_c(_F_unquote) // CALL unquote + self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ" , jit.Imm(1), _SI) // ADDQ $1, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ" , _AX, _SI) + self.Emit("MOVQ" , _ARG_sv_p, _DI) + self.Emit("MOVQ" , _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) +} + +func (self *_Assembler) escape_string_twice() { + self.Link("_escape_string_twice") + self.Emit("MOVQ" , _DI, _VAR_bs_p) + self.Emit("MOVQ" , _SI, _VAR_bs_n) + self.Emit("MOVQ" , _R9, _VAR_bs_LR) + self.malloc_AX(_SI, _DX) // MALLOC SI, DX + self.Emit("MOVQ" , _DX, _ARG_sv_p) + self.Emit("MOVQ" , _VAR_bs_p, _DI) + self.Emit("MOVQ" , _VAR_bs_n, _SI) + self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX + self.Emit("MOVL" , jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("SETCC", _AX) // SETCC AX + self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX + self.Emit("ORQ" , _AX, _R8) // ORQ AX, R8 + self.call_c(_F_unquote) // CALL unquote + self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI + self.Emit("ADDQ" , jit.Imm(3), _SI) // ADDQ $3, SI + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error + self.Emit("MOVQ" , _AX, _SI) + self.Emit("MOVQ" , _ARG_sv_p, _DI) + self.Emit("MOVQ" , _VAR_bs_LR, _R9) + self.Rjmp("JMP", _R9) +} + +/** Range Checking Routines **/ + +var ( + _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32)))) + _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32)))) +) + +var ( + _Vp_max_f32 = new(float64) + _Vp_min_f32 = new(float64) +) + +func init() { + *_Vp_max_f32 = math.MaxFloat32 + *_Vp_min_f32 = -math.MaxFloat32 +} + +func (self *_Assembler) range_single_X0() { + self.Emit("MOVSD" , _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVQ" , _V_max_f32, _CX) // MOVQ _max_f32, CX + self.Emit("MOVQ" , jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET + self.Emit("MOVQ" , jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP + self.Emit("UCOMISD" , jit.Ptr(_CX, 0), _X0) // UCOMISD (CX), X0 + self.Sjmp("JA" , _LB_range_error) // JA _range_error + self.Emit("MOVQ" , _V_min_f32, _CX) // MOVQ _min_f32, CX + self.Emit("MOVSD" , jit.Ptr(_CX, 0), _X1) // MOVSD (CX), X1 + self.Emit("UCOMISD" , _X0, _X1) // UCOMISD X0, X1 + self.Sjmp("JA" , _LB_range_error) // JA _range_error + self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0 +} + +func (self *_Assembler) range_signed_CX(i *rt.GoItab, t *rt.GoType, a int64, b int64) { + self.Emit("MOVQ", _VAR_st_Iv, _CX) // MOVQ st.Iv, CX + self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("CMPQ", _CX, jit.Imm(a)) // CMPQ CX, ${a} + self.Sjmp("JL" , _LB_range_error) // JL _range_error + self.Emit("CMPQ", _CX, jit.Imm(b)) // CMPQ CX, ${B} + self.Sjmp("JG" , _LB_range_error) // JG _range_error +} + +func (self *_Assembler) range_unsigned_CX(i *rt.GoItab, t *rt.GoType, v uint64) { + self.Emit("MOVQ" , _VAR_st_Iv, _CX) // MOVQ st.Iv, CX + self.Emit("MOVQ" , jit.Gitab(i), _ET) // MOVQ ${i}, ET + self.Emit("MOVQ" , jit.Gtype(t), _EP) // MOVQ ${t}, EP + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JS" , _LB_range_error) // JS _range_error + self.Emit("CMPQ" , _CX, jit.Imm(int64(v))) // CMPQ CX, ${a} + self.Sjmp("JA" , _LB_range_error) // JA _range_error +} + +/** String Manipulating Routines **/ + +var ( + _F_unquote = jit.Imm(int64(native.S_unquote)) +) + +func (self *_Assembler) slice_from(p obj.Addr, d int64) { + self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI + self.slice_from_r(_SI, d) // SLICE_R SI, ${d} +} + +func (self *_Assembler) slice_from_r(p obj.Addr, d int64) { + self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI + self.Emit("NEGQ", p) // NEGQ ${p} + self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI +} + +func (self *_Assembler) unquote_once(p obj.Addr, n obj.Addr, stack bool, copy bool) { + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("CMPQ", _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE" , "_noescape_{n}") // JE _escape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP" , "_escape_string") + self.Link("_noescape_{n}") + if copy { + self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_once_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_once_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + } + self.Link("_unquote_once_write_{n}") + self.Emit("MOVQ", _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(10, _DI, p, false, false) + } +} + +func (self *_Assembler) unquote_twice(p obj.Addr, n obj.Addr, stack bool) { + self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1 + self.Sjmp("JE" , _LB_eof_error) // JE _eof_error + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\' + self.Sjmp("JNE" , _LB_char_m3_error) // JNE _char_m3_error + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"' + self.Sjmp("JNE" , _LB_char_m2_error) // JNE _char_m2_error + self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3 + self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX + self.Emit("ADDQ" , _VAR_st_Iv, _AX) // ADDQ st.Iv, AX + self.Emit("CMPQ" , _VAR_st_Ep, _AX) // CMPQ st.Ep, AX + self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n} + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP" , "_escape_string_twice") + self.Link("_noescape_{n}") // _noescape_{n}: + self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_unquote_twice_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_unquote_twice_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_unquote_twice_write_{n}") + self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n} + if stack { + self.Emit("MOVQ", _DI, p) + } else { + self.WriteRecNotAX(12, _DI, p, false, false) + } + self.Link("_unquote_twice_end_{n}") +} + +/** Memory Clearing Routines **/ + +var ( + _F_memclrHasPointers = jit.Func(memclrHasPointers) + _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers) +) + +func (self *_Assembler) mem_clear_fn(ptrfree bool) { + if !ptrfree { + self.call_go(_F_memclrHasPointers) + } else { + self.call_go(_F_memclrNoHeapPointers) + } +} + +func (self *_Assembler) mem_clear_rem(size int64, ptrfree bool) { + self.Emit("MOVQ", jit.Imm(size), _BX) // MOVQ ${size}, BX + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX + self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX + self.Emit("ADDQ", _AX, _BX) // ADDQ AX, BX + self.Emit("MOVQ", _VP, _AX) // MOVQ VP, (SP) + self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers +} + +/** Map Assigning Routines **/ + +var ( + _F_mapassign = jit.Func(mapassign) + _F_mapassign_fast32 = jit.Func(mapassign_fast32) + _F_mapassign_faststr = jit.Func(mapassign_faststr) + _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr) +) + +var ( + _F_decodeJsonUnmarshaler obj.Addr + _F_decodeTextUnmarshaler obj.Addr +) + +func init() { + _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler) + _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler) +} + +func (self *_Assembler) mapaccess_ptr(t reflect.Type) { + if rt.MapType(rt.UnpackType(t)).IndirectElem() { + self.vfollow(t.Elem()) + } +} + +func (self *_Assembler) mapassign_std(t reflect.Type, v obj.Addr) { + self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX + self.mapassign_call_from_AX(t, _F_mapassign) // MAPASSIGN ${t}, mapassign +} + +func (self *_Assembler) mapassign_str_fast(t reflect.Type, p obj.Addr, n obj.Addr) { + self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX + self.Emit("MOVQ", _VP, _BX) // MOVQ VP, BX + self.Emit("MOVQ", p, _CX) // MOVQ ${p}, CX + self.Emit("MOVQ", n, _DI) // MOVQ ${n}, DI + self.call_go(_F_mapassign_faststr) // CALL_GO ${fn} + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP + self.mapaccess_ptr(t) +} + +func (self *_Assembler) mapassign_call_from_AX(t reflect.Type, fn obj.Addr) { + self.Emit("MOVQ", _AX, _CX) + self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX + self.Emit("MOVQ", _VP, _BX) // MOVQ VP, _BX + self.call_go(fn) // CALL_GO ${fn} + self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP +} + +func (self *_Assembler) mapassign_fastx(t reflect.Type, fn obj.Addr) { + self.mapassign_call_from_AX(t, fn) + self.mapaccess_ptr(t) +} + +func (self *_Assembler) mapassign_utext(t reflect.Type, addressable bool) { + pv := false + vk := t.Key() + tk := t.Key() + + /* deref pointer if needed */ + if vk.Kind() == reflect.Ptr { + pv = true + vk = vk.Elem() + } + + /* addressable value with pointer receiver */ + if addressable { + pv = false + tk = reflect.PtrTo(tk) + } + + /* allocate the key, and call the unmarshaler */ + self.valloc(vk, _BX) // VALLOC ${vk}, BX + // must spill vk pointer since next call_go may invoke GC + self.Emit("MOVQ" , _BX, _ARG_vk) + self.Emit("MOVQ" , jit.Type(tk), _AX) // MOVQ ${tk}, AX + self.Emit("MOVQ" , _ARG_sv_p, _CX) // MOVQ sv.p, CX + self.Emit("MOVQ" , _ARG_sv_n, _DI) // MOVQ sv.n, DI + self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.Emit("MOVQ" , _ARG_vk, _AX) // MOVQ VAR.vk, AX + self.Emit("MOVQ", jit.Imm(0), _ARG_vk) + + /* select the correct assignment function */ + if !pv { + self.mapassign_call_from_AX(t, _F_mapassign) + } else { + self.mapassign_fastx(t, _F_mapassign_fast64ptr) + } +} + +/** External Unmarshaler Routines **/ + +var ( + _F_skip_one = jit.Imm(int64(native.S_skip_one)) + _F_skip_array = jit.Imm(int64(native.S_skip_array)) + _F_skip_object = jit.Imm(int64(native.S_skip_object)) + _F_skip_number = jit.Imm(int64(native.S_skip_number)) +) + +func (self *_Assembler) unmarshal_json(t reflect.Type, deref bool) { + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v + self.slice_from_r(_AX, 0) // SLICE_R AX, $0 + self.Emit("MOVQ" , _DI, _ARG_sv_p) // MOVQ DI, sv.p + self.Emit("MOVQ" , _SI, _ARG_sv_n) // MOVQ SI, sv.n + self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref} +} + +func (self *_Assembler) unmarshal_text(t reflect.Type, deref bool) { + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref} +} + +func (self *_Assembler) unmarshal_func(t reflect.Type, fn obj.Addr, deref bool) { + pt := t + vk := t.Kind() + + /* allocate the field if needed */ + if deref && vk == reflect.Ptr { + self.Emit("MOVQ" , _VP, _BX) // MOVQ VP, BX + self.Emit("MOVQ" , jit.Ptr(_BX, 0), _BX) // MOVQ (BX), BX + self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX + self.Sjmp("JNZ" , "_deref_{n}") // JNZ _deref_{n} + self.valloc(t.Elem(), _BX) // VALLOC ${t.Elem()}, BX + self.WriteRecNotAX(3, _BX, jit.Ptr(_VP, 0), false, false) // MOVQ BX, (VP) + self.Link("_deref_{n}") // _deref_{n}: + } else { + /* set value pointer */ + self.Emit("MOVQ", _VP, _BX) // MOVQ (VP), BX + } + + /* set value type */ + self.Emit("MOVQ", jit.Type(pt), _AX) // MOVQ ${pt}, AX + + /* set the source string and call the unmarshaler */ + self.Emit("MOVQ" , _ARG_sv_p, _CX) // MOVQ sv.p, CX + self.Emit("MOVQ" , _ARG_sv_n, _DI) // MOVQ sv.n, DI + self.call_go(fn) // CALL_GO ${fn} + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +/** Dynamic Decoding Routine **/ + +var ( + _F_decodeTypedPointer obj.Addr +) + +func init() { + _F_decodeTypedPointer = jit.Func(decodeTypedPointer) +} + +func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) { + self.Emit("MOVQ" , vp, _SI) // MOVQ ${vp}, SI + self.Emit("MOVQ" , vt, _DI) // MOVQ ${vt}, DI + self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sp, BX + self.Emit("MOVQ" , _IC, _CX) // MOVQ IC, CX + self.Emit("MOVQ" , _ST, _R8) // MOVQ ST, R8 + self.Emit("MOVQ" , _ARG_fv, _R9) // MOVQ fv, R9 + self.save(_REG_rt...) + self.Emit("MOVQ", _F_decodeTypedPointer, _IL) // MOVQ ${fn}, R11 + self.Rjmp("CALL", _IL) // CALL R11 + self.load(_REG_rt...) + self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC + self.Emit("MOVQ" , _BX, _ET) // MOVQ BX, ET + self.Emit("MOVQ" , _CX, _EP) // MOVQ CX, EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +/** OpCode Assembler Functions **/ + +var ( + _F_memequal = jit.Func(memequal) + _F_memmove = jit.Func(memmove) + _F_growslice = jit.Func(growslice) + _F_makeslice = jit.Func(makeslice) + _F_makemap_small = jit.Func(makemap_small) + _F_mapassign_fast64 = jit.Func(mapassign_fast64) +) + +var ( + _F_lspace = jit.Imm(int64(native.S_lspace)) + _F_strhash = jit.Imm(int64(caching.S_strhash)) +) + +var ( + _F_b64decode = jit.Imm(int64(_subr__b64decode)) + _F_decodeValue = jit.Imm(int64(_subr_decode_value)) +) + +var ( + _F_FieldMap_GetCaseInsensitive obj.Addr +) + +const ( + _MODE_AVX2 = 1 << 2 +) + +const ( + _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID)) + _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name)) + _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash)) +) + +const ( + _Vk_Ptr = int64(reflect.Ptr) + _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags)) +) + +func init() { + _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive) +} + +func (self *_Assembler) _asm_OP_any(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX + self.Emit("TESTQ" , _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ" , "_decode_{n}") // JZ _decode_{n} + self.Emit("CMPQ" , _CX, _VP) // CMPQ CX, VP + self.Sjmp("JE" , "_decode_{n}") // JE _decode_{n} + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX + self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE" , "_decode_{n}") // JNE _decode_{n} + self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_AX, _DI) // DECODE AX, DI + self.Sjmp("JMP" , "_decode_end_{n}") // JMP _decode_end_{n} + self.Link("_decode_{n}") // _decode_{n}: + self.Emit("MOVQ" , _ARG_fv, _DF) // MOVQ fv, DF + self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) + self.call(_F_decodeValue) // CALL decodeValue + self.Emit("MOVQ" , jit.Imm(0), jit.Ptr(_SP, 0)) // MOVQ _ST, (SP) + self.Emit("TESTQ" , _EP, _EP) // TESTQ EP, EP + self.Sjmp("JNZ" , _LB_parsing_error) // JNZ _parsing_error + self.Link("_decode_end_{n}") // _decode_end_{n}: +} + +func (self *_Assembler) _asm_OP_dyn(p *_Instr) { + self.Emit("MOVQ" , jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET + self.Emit("CMPQ" , jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0 + self.Sjmp("JE" , _LB_type_error) // JE _type_error + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _CX) // MOVQ (VP), CX + self.Emit("MOVQ" , jit.Ptr(_CX, 8), _CX) // MOVQ 8(CX), CX + self.Emit("MOVBLZX", jit.Ptr(_CX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(CX), DX + self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX + self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr} + self.Sjmp("JNE" , _LB_type_error) // JNE _type_error + self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI + self.decode_dynamic(_CX, _DI) // DECODE CX, DI + self.Link("_decode_end_{n}") // _decode_end_{n}: +} + +func (self *_Assembler) _asm_OP_str(_ *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP) +} + +func (self *_Assembler) _asm_OP_bin(_ *_Instr) { + self.parse_string() // PARSE STRING + self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1 + self.Emit("MOVQ" , _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP) + self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.Emit("SHRQ" , jit.Imm(2), _SI) // SHRQ $2, SI + self.Emit("LEAQ" , jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI + self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP) + self.malloc_AX(_SI, _SI) // MALLOC SI, SI + + // TODO: due to base64x's bug, only use AVX mode now + self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX + + /* call the decoder */ + self.Emit("XORL" , _DX, _DX) // XORL DX, DX + self.Emit("MOVQ" , _VP, _DI) // MOVQ VP, DI + + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _R8) // MOVQ SI, (VP) + self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP) + self.Emit("MOVQ" , _R8, _SI) + + self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP) + self.call_c(_F_b64decode) // CALL b64decode + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_base64_error) // JS _base64_error + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) +} + +func (self *_Assembler) _asm_OP_bool(_ *_Instr) { + self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , _LB_eof_error) // JA _eof_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f' + self.Sjmp("JE" , "_false_{n}") // JE _false_{n} + self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JE" , "_bool_true_{n}") + // try to skip the value + self.Emit("MOVQ", _IC, _VAR_ic) + self.Emit("MOVQ", _T_bool, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) + + self.Link("_bool_true_{n}") + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP) + self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , _LB_eof_error) // JA _eof_error + self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX + self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC) + self.Sjmp("JNE" , _LB_im_error) // JNE _im_error + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP) + self.Link("_end_{n}") // _end_{n}: +} + +func (self *_Assembler) _asm_OP_num(_ *_Instr) { + self.Emit("MOVQ", jit.Imm(0), _VAR_fl) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Emit("MOVQ", _IC, _BX) + self.Sjmp("JNE", "_skip_number_{n}") + self.Emit("MOVQ", jit.Imm(1), _VAR_fl) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_skip_number_{n}") + + /* call skip_number */ + self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI + self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP) + self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI + self.callc(_F_skip_number) // CALL _F_skip_number + self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS" , "_num_next_{n}") + + /* call skip one */ + self.Emit("MOVQ", _BX, _VAR_ic) + self.Emit("MOVQ", _T_number, _ET) + self.Emit("MOVQ", _ET, _VAR_et) + self.Byte(0x4c, 0x8d, 0x0d) + self.Sref("_num_end_{n}", 4) + self.Emit("MOVQ", _R9, _VAR_pc) + self.Sjmp("JMP" , _LB_skip_one) + + /* assgin string */ + self.Link("_num_next_{n}") + self.slice_from_r(_AX, 0) + self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv) + self.Sjmp("JNC", "_num_write_{n}") + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9 + self.Sref("_num_write_{n}", 4) + self.Sjmp("JMP", "_copy_string") + self.Link("_num_write_{n}") + self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP) + self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false) + self.Emit("CMPQ", _VAR_fl, jit.Imm(1)) + self.Sjmp("JNE", "_num_end_{n}") + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"')) + self.Sjmp("JNE", _LB_char_0_error) + self.Emit("ADDQ", jit.Imm(1), _IC) + self.Link("_num_end_{n}") +} + +func (self *_Assembler) _asm_OP_i8(_ *_Instr) { + var pin = "_i8_end_{n}" + self.parse_signed(int8Type, pin, -1) // PARSE int8 + self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_i16(_ *_Instr) { + var pin = "_i16_end_{n}" + self.parse_signed(int16Type, pin, -1) // PARSE int16 + self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_i32(_ *_Instr) { + var pin = "_i32_end_{n}" + self.parse_signed(int32Type, pin, -1) // PARSE int32 + self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_i64(_ *_Instr) { + var pin = "_i64_end_{n}" + self.parse_signed(int64Type, pin, -1) // PARSE int64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u8(_ *_Instr) { + var pin = "_u8_end_{n}" + self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8 + self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u16(_ *_Instr) { + var pin = "_u16_end_{n}" + self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16 + self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u32(_ *_Instr) { + var pin = "_u32_end_{n}" + self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32 + self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_u64(_ *_Instr) { + var pin = "_u64_end_{n}" + self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64 + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_f32(_ *_Instr) { + var pin = "_f32_end_{n}" + self.parse_number(float32Type, pin, -1) // PARSE NUMBER + self.range_single_X0() // RANGE float32 + self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_f64(_ *_Instr) { + var pin = "_f64_end_{n}" + self.parse_number(float64Type, pin, -1) // PARSE NUMBER + self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0 + self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP) + self.Link(pin) +} + +func (self *_Assembler) _asm_OP_unquote(_ *_Instr) { + self.check_eof(2) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\' + self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"' + self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error + self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC + self.parse_string() // PARSE STRING + self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP) +} + +func (self *_Assembler) _asm_OP_nil_1(_ *_Instr) { + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP) +} + +func (self *_Assembler) _asm_OP_nil_2(_ *_Instr) { + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) +} + +func (self *_Assembler) _asm_OP_nil_3(_ *_Instr) { + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 16)) // MOVOU AX, 16(VP) +} + +func (self *_Assembler) _asm_OP_deref(p *_Instr) { + self.vfollow(p.vt()) +} + +func (self *_Assembler) _asm_OP_index(p *_Instr) { + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX + self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP +} + +func (self *_Assembler) _asm_OP_is_null(p *_Instr) { + self.Emit("LEAQ" , jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , "_not_null_{n}") // JA _not_null_{n} + self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.Link("_not_null_{n}") // _not_null_{n}: +} + +func (self *_Assembler) _asm_OP_is_null_quote(p *_Instr) { + self.Emit("LEAQ" , jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX + self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL + self.Sjmp("JA" , "_not_null_quote_{n}") // JA _not_null_quote_{n} + self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null" + self.Sjmp("JNE" , "_not_null_quote_{n}") // JNE _not_null_quote_{n} + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"' + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE" , p.vi()) // JE {p.vi()} + self.Link("_not_null_quote_{n}") // _not_null_quote_{n}: +} + +func (self *_Assembler) _asm_OP_map_init(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n} + self.call_go(_F_makemap_small) // CALL_GO makemap_small + self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Link("_end_{n}") // _end_{n}: + self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP +} + +func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) { + self.parse_signed(int8Type, "", p.vi()) // PARSE int8 + self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv +} + +func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) { + self.parse_signed(int16Type, "", p.vi()) // PARSE int16 + self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv +} + +func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) { + self.parse_signed(int32Type, "", p.vi()) // PARSE int32 + self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv + } else { + self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32 + } +} + +func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) { + self.parse_signed(int64Type, "", p.vi()) // PARSE int64 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64 + } +} + +func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) { + self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8 + self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv +} + +func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) { + self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16 + self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16 + self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv +} + +func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) { + self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32 + self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv + } else { + self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX + self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32 + } +} + +func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) { + self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64 + if vt := p.vt(); !mapfast(vt) { + self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv + } else { + self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX + self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64 + } +} + +func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) { + self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER + self.range_single_X0() // RANGE float32 + self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv +} + +func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) { + self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER + self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv +} + +func (self *_Assembler) _asm_OP_map_key_str(p *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + if vt := p.vt(); !mapfast(vt) { + self.valloc(vt.Key(), _DI) + self.Emit("MOVOU", _ARG_sv, _X0) + self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0)) + self.mapassign_std(vt, jit.Ptr(_DI, 0)) // MAPASSIGN string, DI, SI + } else { + self.mapassign_str_fast(vt, _ARG_sv_p, _ARG_sv_n) // MAPASSIGN string, DI, SI + } +} + +func (self *_Assembler) _asm_OP_map_key_utext(p *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false +} + +func (self *_Assembler) _asm_OP_map_key_utext_p(p *_Instr) { + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n + self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true +} + +func (self *_Assembler) _asm_OP_array_skip(_ *_Instr) { + self.call_sf(_F_skip_array) // CALL_SF skip_array + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v +} + +func (self *_Assembler) _asm_OP_array_clear(p *_Instr) { + self.mem_clear_rem(p.i64(), true) +} + +func (self *_Assembler) _asm_OP_array_clear_p(p *_Instr) { + self.mem_clear_rem(p.i64(), false) +} + +func (self *_Assembler) _asm_OP_slice_init(p *_Instr) { + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Emit("MOVQ" , jit.Ptr(_VP, 16), _BX) // MOVQ 16(VP), BX + self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX + self.Sjmp("JNZ" , "_done_{n}") // JNZ _done_{n} + self.Emit("MOVQ" , jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX + self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) + self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, DX + self.call_go(_F_makeslice) // CALL_GO makeslice + self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Emit("XORL" , _AX, _AX) // XORL AX, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP) + self.Link("_done_{n}") // _done_{n} +} + +func (self *_Assembler) _asm_OP_slice_append(p *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX + self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP) + self.Sjmp("JB" , "_index_{n}") // JB _index_{n} + self.Emit("MOVQ" , _AX, _SI) // MOVQ AX, SI + self.Emit("SHLQ" , jit.Imm(1), _SI) // SHLQ $1, SI + self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _BX) // MOVQ (VP), BX + self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX + self.Emit("MOVQ" , jit.Ptr(_VP, 16), _DI) // MOVQ 16(VP), DI + self.call_go(_F_growslice) // CALL_GO growslice + self.WritePtrAX(8, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP) + self.Emit("MOVQ" , _BX, jit.Ptr(_VP, 8)) // MOVQ BX, 8(VP) + self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP) + self.Emit("MOVQ" , _BX, _AX) // MOVQ BX, AX + self.Link("_index_{n}") // _index_{n}: + self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP) + self.Emit("MOVQ" , jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP + self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX + self.From("MULQ" , _CX) // MULQ CX + self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP +} + +func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) { + self.call_sf(_F_skip_object) // CALL_SF skip_object + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v +} + +func (self *_Assembler) _asm_OP_object_next(_ *_Instr) { + self.call_sf(_F_skip_one) // CALL_SF skip_one + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v +} + +func (self *_Assembler) _asm_OP_struct_field(p *_Instr) { + assert_eq(caching.FieldEntrySize, 32, "invalid field entry size") + self.Emit("MOVQ" , jit.Imm(-1), _AX) // MOVQ $-1, AX + self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, sr + self.parse_string() // PARSE STRING + self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, false) // UNQUOTE once, sv.p, sv.n + self.Emit("LEAQ" , _ARG_sv, _AX) // LEAQ sv, AX + self.Emit("XORL" , _BX, _BX) // XORL BX, BX + self.call_go(_F_strhash) // CALL_GO strhash + self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ" , jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX + self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI + self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Link("_loop_{n}") // _loop_{n}: + self.Emit("XORL" , _DX, _DX) // XORL DX, DX + self.From("DIVQ" , _CX) // DIVQ CX + self.Emit("LEAQ" , jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX + self.Emit("SHLQ" , jit.Imm(5), _DX) // SHLQ $5, DX + self.Emit("LEAQ" , jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8 + self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8 + self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n} + self.Emit("CMPQ" , _R8, _R9) // CMPQ R8, R9 + self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name + 8), _DX) // MOVQ FieldEntry.Name+8(DI), DX + self.Emit("CMPQ" , _DX, _ARG_sv_n) // CMPQ DX, sv.n + self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n} + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8 + self.Emit("MOVQ" , _AX, _VAR_ss_AX) // MOVQ AX, ss.AX + self.Emit("MOVQ" , _CX, _VAR_ss_CX) // MOVQ CX, ss.CX + self.Emit("MOVQ" , _SI, _VAR_ss_SI) // MOVQ SI, ss.SI + self.Emit("MOVQ" , _R8, _VAR_ss_R8) // MOVQ R8, ss.R8 + self.Emit("MOVQ" , _R9, _VAR_ss_R9) // MOVQ R9, ss.R9 + self.Emit("MOVQ" , _ARG_sv_p, _AX) // MOVQ _VAR_sv_p, AX + self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX + self.Emit("MOVQ" , _CX, _BX) // MOVQ CX, 8(SP) + self.Emit("MOVQ" , _DX, _CX) // MOVQ DX, 16(SP) + self.call_go(_F_memequal) // CALL_GO memequal + self.Emit("MOVB" , _AX, _DX) // MOVB 24(SP), DX + self.Emit("MOVQ" , _VAR_ss_AX, _AX) // MOVQ ss.AX, AX + self.Emit("MOVQ" , _VAR_ss_CX, _CX) // MOVQ ss.CX, CX + self.Emit("MOVQ" , _VAR_ss_SI, _SI) // MOVQ ss.SI, SI + self.Emit("MOVQ" , _VAR_ss_R9, _R9) // MOVQ ss.R9, R9 + self.Emit("TESTB", _DX, _DX) // TESTB DX, DX + self.Sjmp("JZ" , "_loop_{n}") // JZ _loop_{n} + self.Emit("MOVQ" , _VAR_ss_R8, _R8) // MOVQ ss.R8, R8 + self.Emit("MOVQ" , _R8, _VAR_sr) // MOVQ R8, sr + self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} + self.Link("_try_lowercase_{n}") // _try_lowercase_{n}: + self.Emit("MOVQ" , jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX + self.Emit("MOVQ", _ARG_sv_p, _BX) // MOVQ sv, BX + self.Emit("MOVQ", _ARG_sv_n, _CX) // MOVQ sv, CX + self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive + self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, _VAR_sr + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNS" , "_end_{n}") // JNS _end_{n} + self.Emit("BTQ" , jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv + self.Sjmp("JC" , _LB_field_error) // JC _field_error + self.Link("_end_{n}") // _end_{n}: +} + +func (self *_Assembler) _asm_OP_unmarshal(p *_Instr) { + self.unmarshal_json(p.vt(), true) +} + +func (self *_Assembler) _asm_OP_unmarshal_p(p *_Instr) { + self.unmarshal_json(p.vt(), false) +} + +func (self *_Assembler) _asm_OP_unmarshal_text(p *_Instr) { + self.unmarshal_text(p.vt(), true) +} + +func (self *_Assembler) _asm_OP_unmarshal_text_p(p *_Instr) { + self.unmarshal_text(p.vt(), false) +} + +func (self *_Assembler) _asm_OP_lspace(_ *_Instr) { + self.lspace("_{n}") +} + +func (self *_Assembler) lspace(subfix string) { + var label = "_lspace" + subfix + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , label) // JA _nospace_{n} + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , label) // JNC _nospace_{n} + + /* test up to 4 characters */ + for i := 0; i < 3; i++ { + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , label) // JA _nospace_{n} + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , label) // JNC _nospace_{n} + } + + /* handle over to the native function */ + self.Emit("MOVQ" , _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ" , _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ" , _IC, _DX) // MOVQ IC, DX + self.callc(_F_lspace) // CALL lspace + self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , _LB_parsing_error_v) // JS _parsing_error_v + self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL + self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error + self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC + self.Link(label) // _nospace_{n}: +} + +func (self *_Assembler) _asm_OP_match_char(p *_Instr) { + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error + self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC +} + +func (self *_Assembler) _asm_OP_check_char(p *_Instr) { + self.check_eof(1) + self.Emit("LEAQ" , jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX + self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC + self.Xjmp("JE" , p.vi()) // JE {p.vi()} +} + +func (self *_Assembler) _asm_OP_check_char_0(p *_Instr) { + self.check_eof(1) + self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()} + self.Xjmp("JE" , p.vi()) // JE {p.vi()} +} + +func (self *_Assembler) _asm_OP_add(p *_Instr) { + self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC +} + +func (self *_Assembler) _asm_OP_load(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP +} + +func (self *_Assembler) _asm_OP_save(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes} + self.Sjmp("JAE" , _LB_stack_error) // JA _stack_error + self.WriteRecNotAX(0 , _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX) + self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST) +} + +func (self *_Assembler) _asm_OP_drop(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("XORL", _BX, _BX) // XORL BX, BX + self.Emit("MOVQ", _BX, jit.Sib(_ST, _AX, 1, 8)) // MOVQ BX, 8(ST)(AX) +} + +func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ" , jit.Imm(16), _AX) // SUBQ $16, AX + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP + self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) +} + +func (self *_Assembler) _asm_OP_recurse(p *_Instr) { + self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX + self.decode_dynamic(_AX, _VP) // DECODE AX, VP +} + +func (self *_Assembler) _asm_OP_goto(p *_Instr) { + self.Xjmp("JMP", p.vi()) +} + +func (self *_Assembler) _asm_OP_switch(p *_Instr) { + self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX + self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())} + self.Sjmp("JAE" , "_default_{n}") // JAE _default_{n} + + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n} + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP" , _AX) // JMP AX + self.Link("_switch_table_{n}") // _switch_table_{n}: + + /* generate the jump table */ + for i, v := range p.vs() { + self.Xref(v, int64(-i) * 4) + } + + /* default case */ + self.Link("_default_{n}") + self.NOP() +} + +func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { + self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX)// MOVQ $(p2.op()), 16(SP) + self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), 8(SP) + self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), (SP) + self.call_go(_F_println) +} + +//go:linkname _runtime_writeBarrier runtime.writeBarrier +var _runtime_writeBarrier uintptr + +//go:linkname gcWriteBarrierAX runtime.gcWriteBarrier +func gcWriteBarrierAX() + +var ( + _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier)))) + + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) +) + +func (self *_Assembler) WritePtrAX(i int, rec obj.Addr, saveDI bool) { + self.Emit("MOVQ", _V_writeBarrier, _R9) + self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} + +func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool, saveAX bool) { + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R9) + self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } else { + self.Emit("MOVQ", ptr, _AX) + } + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + if saveAX { + self.Emit("XCHGQ", ptr, _AX) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/compiler.go b/vendor/github.com/bytedance/sonic/decoder/compiler.go new file mode 100644 index 0000000..b4fc2fe --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/compiler.go @@ -0,0 +1,1136 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding/json` + `fmt` + `reflect` + `sort` + `strconv` + `strings` + `unsafe` + + `github.com/bytedance/sonic/internal/caching` + `github.com/bytedance/sonic/internal/resolver` + `github.com/bytedance/sonic/internal/rt` + `github.com/bytedance/sonic/option` +) + +type _Op uint8 + +const ( + _OP_any _Op = iota + 1 + _OP_dyn + _OP_str + _OP_bin + _OP_bool + _OP_num + _OP_i8 + _OP_i16 + _OP_i32 + _OP_i64 + _OP_u8 + _OP_u16 + _OP_u32 + _OP_u64 + _OP_f32 + _OP_f64 + _OP_unquote + _OP_nil_1 + _OP_nil_2 + _OP_nil_3 + _OP_deref + _OP_index + _OP_is_null + _OP_is_null_quote + _OP_map_init + _OP_map_key_i8 + _OP_map_key_i16 + _OP_map_key_i32 + _OP_map_key_i64 + _OP_map_key_u8 + _OP_map_key_u16 + _OP_map_key_u32 + _OP_map_key_u64 + _OP_map_key_f32 + _OP_map_key_f64 + _OP_map_key_str + _OP_map_key_utext + _OP_map_key_utext_p + _OP_array_skip + _OP_array_clear + _OP_array_clear_p + _OP_slice_init + _OP_slice_append + _OP_object_skip + _OP_object_next + _OP_struct_field + _OP_unmarshal + _OP_unmarshal_p + _OP_unmarshal_text + _OP_unmarshal_text_p + _OP_lspace + _OP_match_char + _OP_check_char + _OP_load + _OP_save + _OP_drop + _OP_drop_2 + _OP_recurse + _OP_goto + _OP_switch + _OP_check_char_0 + _OP_dismatch_err + _OP_go_skip + _OP_add + _OP_debug +) + +const ( + _INT_SIZE = 32 << (^uint(0) >> 63) + _PTR_SIZE = 32 << (^uintptr(0) >> 63) + _PTR_BYTE = unsafe.Sizeof(uintptr(0)) +) + +const ( + _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions + _MAX_FIELDS = 50 // cutoff at 50 fields struct +) + +var _OpNames = [256]string { + _OP_any : "any", + _OP_dyn : "dyn", + _OP_str : "str", + _OP_bin : "bin", + _OP_bool : "bool", + _OP_num : "num", + _OP_i8 : "i8", + _OP_i16 : "i16", + _OP_i32 : "i32", + _OP_i64 : "i64", + _OP_u8 : "u8", + _OP_u16 : "u16", + _OP_u32 : "u32", + _OP_u64 : "u64", + _OP_f32 : "f32", + _OP_f64 : "f64", + _OP_unquote : "unquote", + _OP_nil_1 : "nil_1", + _OP_nil_2 : "nil_2", + _OP_nil_3 : "nil_3", + _OP_deref : "deref", + _OP_index : "index", + _OP_is_null : "is_null", + _OP_is_null_quote : "is_null_quote", + _OP_map_init : "map_init", + _OP_map_key_i8 : "map_key_i8", + _OP_map_key_i16 : "map_key_i16", + _OP_map_key_i32 : "map_key_i32", + _OP_map_key_i64 : "map_key_i64", + _OP_map_key_u8 : "map_key_u8", + _OP_map_key_u16 : "map_key_u16", + _OP_map_key_u32 : "map_key_u32", + _OP_map_key_u64 : "map_key_u64", + _OP_map_key_f32 : "map_key_f32", + _OP_map_key_f64 : "map_key_f64", + _OP_map_key_str : "map_key_str", + _OP_map_key_utext : "map_key_utext", + _OP_map_key_utext_p : "map_key_utext_p", + _OP_array_skip : "array_skip", + _OP_slice_init : "slice_init", + _OP_slice_append : "slice_append", + _OP_object_skip : "object_skip", + _OP_object_next : "object_next", + _OP_struct_field : "struct_field", + _OP_unmarshal : "unmarshal", + _OP_unmarshal_p : "unmarshal_p", + _OP_unmarshal_text : "unmarshal_text", + _OP_unmarshal_text_p : "unmarshal_text_p", + _OP_lspace : "lspace", + _OP_match_char : "match_char", + _OP_check_char : "check_char", + _OP_load : "load", + _OP_save : "save", + _OP_drop : "drop", + _OP_drop_2 : "drop_2", + _OP_recurse : "recurse", + _OP_goto : "goto", + _OP_switch : "switch", + _OP_check_char_0 : "check_char_0", + _OP_dismatch_err : "dismatch_err", + _OP_add : "add", +} + +func (self _Op) String() string { + if ret := _OpNames[self]; ret != "" { + return ret + } else { + return "" + } +} + +func _OP_int() _Op { + switch _INT_SIZE { + case 32: return _OP_i32 + case 64: return _OP_i64 + default: panic("unsupported int size") + } +} + +func _OP_uint() _Op { + switch _INT_SIZE { + case 32: return _OP_u32 + case 64: return _OP_u64 + default: panic("unsupported uint size") + } +} + +func _OP_uintptr() _Op { + switch _PTR_SIZE { + case 32: return _OP_u32 + case 64: return _OP_u64 + default: panic("unsupported pointer size") + } +} + +func _OP_map_key_int() _Op { + switch _INT_SIZE { + case 32: return _OP_map_key_i32 + case 64: return _OP_map_key_i64 + default: panic("unsupported int size") + } +} + +func _OP_map_key_uint() _Op { + switch _INT_SIZE { + case 32: return _OP_map_key_u32 + case 64: return _OP_map_key_u64 + default: panic("unsupported uint size") + } +} + +func _OP_map_key_uintptr() _Op { + switch _PTR_SIZE { + case 32: return _OP_map_key_u32 + case 64: return _OP_map_key_u64 + default: panic("unsupported pointer size") + } +} + +type _Instr struct { + u uint64 // union {op: 8, vb: 8, vi: 48}, iv maybe int or len([]int) + p unsafe.Pointer // maybe GoSlice.Data, *GoType or *caching.FieldMap +} + +func packOp(op _Op) uint64 { + return uint64(op) << 56 +} + +func newInsOp(op _Op) _Instr { + return _Instr{u: packOp(op)} +} + +func newInsVi(op _Op, vi int) _Instr { + return _Instr{u: packOp(op) | rt.PackInt(vi)} +} + +func newInsVb(op _Op, vb byte) _Instr { + return _Instr{u: packOp(op) | (uint64(vb) << 48)} +} + +func newInsVs(op _Op, vs []int) _Instr { + return _Instr { + u: packOp(op) | rt.PackInt(len(vs)), + p: (*rt.GoSlice)(unsafe.Pointer(&vs)).Ptr, + } +} + +func newInsVt(op _Op, vt reflect.Type) _Instr { + return _Instr { + u: packOp(op), + p: unsafe.Pointer(rt.UnpackType(vt)), + } +} + +func newInsVf(op _Op, vf *caching.FieldMap) _Instr { + return _Instr { + u: packOp(op), + p: unsafe.Pointer(vf), + } +} + +func (self _Instr) op() _Op { + return _Op(self.u >> 56) +} + +func (self _Instr) vi() int { + return rt.UnpackInt(self.u) +} + +func (self _Instr) vb() byte { + return byte(self.u >> 48) +} + +func (self _Instr) vs() (v []int) { + (*rt.GoSlice)(unsafe.Pointer(&v)).Ptr = self.p + (*rt.GoSlice)(unsafe.Pointer(&v)).Cap = self.vi() + (*rt.GoSlice)(unsafe.Pointer(&v)).Len = self.vi() + return +} + +func (self _Instr) vf() *caching.FieldMap { + return (*caching.FieldMap)(self.p) +} + +func (self _Instr) vk() reflect.Kind { + return (*rt.GoType)(self.p).Kind() +} + +func (self _Instr) vt() reflect.Type { + return (*rt.GoType)(self.p).Pack() +} + +func (self _Instr) i64() int64 { + return int64(self.vi()) +} + +func (self _Instr) vlen() int { + return int((*rt.GoType)(self.p).Size) +} + +func (self _Instr) isBranch() bool { + switch self.op() { + case _OP_goto : fallthrough + case _OP_switch : fallthrough + case _OP_is_null : fallthrough + case _OP_is_null_quote : fallthrough + case _OP_check_char : return true + default : return false + } +} + +func (self _Instr) disassemble() string { + switch self.op() { + case _OP_dyn : fallthrough + case _OP_deref : fallthrough + case _OP_map_key_i8 : fallthrough + case _OP_map_key_i16 : fallthrough + case _OP_map_key_i32 : fallthrough + case _OP_map_key_i64 : fallthrough + case _OP_map_key_u8 : fallthrough + case _OP_map_key_u16 : fallthrough + case _OP_map_key_u32 : fallthrough + case _OP_map_key_u64 : fallthrough + case _OP_map_key_f32 : fallthrough + case _OP_map_key_f64 : fallthrough + case _OP_map_key_str : fallthrough + case _OP_map_key_utext : fallthrough + case _OP_map_key_utext_p : fallthrough + case _OP_slice_init : fallthrough + case _OP_slice_append : fallthrough + case _OP_unmarshal : fallthrough + case _OP_unmarshal_p : fallthrough + case _OP_unmarshal_text : fallthrough + case _OP_unmarshal_text_p : fallthrough + case _OP_recurse : return fmt.Sprintf("%-18s%s", self.op(), self.vt()) + case _OP_goto : fallthrough + case _OP_is_null_quote : fallthrough + case _OP_is_null : return fmt.Sprintf("%-18sL_%d", self.op(), self.vi()) + case _OP_index : fallthrough + case _OP_array_clear : fallthrough + case _OP_array_clear_p : return fmt.Sprintf("%-18s%d", self.op(), self.vi()) + case _OP_switch : return fmt.Sprintf("%-18s%s", self.op(), self.formatSwitchLabels()) + case _OP_struct_field : return fmt.Sprintf("%-18s%s", self.op(), self.formatStructFields()) + case _OP_match_char : return fmt.Sprintf("%-18s%s", self.op(), strconv.QuoteRune(rune(self.vb()))) + case _OP_check_char : return fmt.Sprintf("%-18sL_%d, %s", self.op(), self.vi(), strconv.QuoteRune(rune(self.vb()))) + default : return self.op().String() + } +} + +func (self _Instr) formatSwitchLabels() string { + var i int + var v int + var m []string + + /* format each label */ + for i, v = range self.vs() { + m = append(m, fmt.Sprintf("%d=L_%d", i, v)) + } + + /* join them with "," */ + return strings.Join(m, ", ") +} + +func (self _Instr) formatStructFields() string { + var i uint64 + var r []string + var m []struct{i int; n string} + + /* extract all the fields */ + for i = 0; i < self.vf().N; i++ { + if v := self.vf().At(i); v.Hash != 0 { + m = append(m, struct{i int; n string}{i: v.ID, n: v.Name}) + } + } + + /* sort by field name */ + sort.Slice(m, func(i, j int) bool { + return m[i].n < m[j].n + }) + + /* format each field */ + for _, v := range m { + r = append(r, fmt.Sprintf("%s=%d", v.n, v.i)) + } + + /* join them with "," */ + return strings.Join(r, ", ") +} + +type ( + _Program []_Instr +) + +func (self _Program) pc() int { + return len(self) +} + +func (self _Program) tag(n int) { + if n >= _MaxStack { + panic("type nesting too deep") + } +} + +func (self _Program) pin(i int) { + v := &self[i] + v.u &= 0xffff000000000000 + v.u |= rt.PackInt(self.pc()) +} + +func (self _Program) rel(v []int) { + for _, i := range v { + self.pin(i) + } +} + +func (self *_Program) add(op _Op) { + *self = append(*self, newInsOp(op)) +} + +func (self *_Program) int(op _Op, vi int) { + *self = append(*self, newInsVi(op, vi)) +} + +func (self *_Program) chr(op _Op, vb byte) { + *self = append(*self, newInsVb(op, vb)) +} + +func (self *_Program) tab(op _Op, vs []int) { + *self = append(*self, newInsVs(op, vs)) +} + +func (self *_Program) rtt(op _Op, vt reflect.Type) { + *self = append(*self, newInsVt(op, vt)) +} + +func (self *_Program) fmv(op _Op, vf *caching.FieldMap) { + *self = append(*self, newInsVf(op, vf)) +} + +func (self _Program) disassemble() string { + nb := len(self) + tab := make([]bool, nb + 1) + ret := make([]string, 0, nb + 1) + + /* prescan to get all the labels */ + for _, ins := range self { + if ins.isBranch() { + if ins.op() != _OP_switch { + tab[ins.vi()] = true + } else { + for _, v := range ins.vs() { + tab[v] = true + } + } + } + } + + /* disassemble each instruction */ + for i, ins := range self { + if !tab[i] { + ret = append(ret, "\t" + ins.disassemble()) + } else { + ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble())) + } + } + + /* add the last label, if needed */ + if tab[nb] { + ret = append(ret, fmt.Sprintf("L_%d:", nb)) + } + + /* add an "end" indicator, and join all the strings */ + return strings.Join(append(ret, "\tend"), "\n") +} + +type _Compiler struct { + opts option.CompileOptions + tab map[reflect.Type]bool + rec map[reflect.Type]bool +} + +func newCompiler() *_Compiler { + return &_Compiler { + opts: option.DefaultCompileOptions(), + tab: map[reflect.Type]bool{}, + rec: map[reflect.Type]bool{}, + } +} + +func (self *_Compiler) apply(opts option.CompileOptions) *_Compiler { + self.opts = opts + return self +} + +func (self *_Compiler) rescue(ep *error) { + if val := recover(); val != nil { + if err, ok := val.(error); ok { + *ep = err + } else { + panic(val) + } + } +} + +func (self *_Compiler) compile(vt reflect.Type) (ret _Program, err error) { + defer self.rescue(&err) + self.compileOne(&ret, 0, vt) + return +} + +func (self *_Compiler) compileOne(p *_Program, sp int, vt reflect.Type) { + /* check for recursive nesting */ + ok := self.tab[vt] + if ok { + p.rtt(_OP_recurse, vt) + return + } + + pt := reflect.PtrTo(vt) + + /* check for `json.Unmarshaler` with pointer receiver */ + if pt.Implements(jsonUnmarshalerType) { + p.rtt(_OP_unmarshal_p, pt) + return + } + + /* check for `json.Unmarshaler` */ + if vt.Implements(jsonUnmarshalerType) { + p.add(_OP_lspace) + self.compileUnmarshalJson(p, vt) + return + } + + /* check for `encoding.TextMarshaler` with pointer receiver */ + if pt.Implements(encodingTextUnmarshalerType) { + p.add(_OP_lspace) + self.compileUnmarshalTextPtr(p, pt) + return + } + + /* check for `encoding.TextUnmarshaler` */ + if vt.Implements(encodingTextUnmarshalerType) { + p.add(_OP_lspace) + self.compileUnmarshalText(p, vt) + return + } + + /* enter the recursion */ + p.add(_OP_lspace) + self.tab[vt] = true + self.compileOps(p, sp, vt) + delete(self.tab, vt) +} + +func (self *_Compiler) compileOps(p *_Program, sp int, vt reflect.Type) { + switch vt.Kind() { + case reflect.Bool : self.compilePrimitive (vt, p, _OP_bool) + case reflect.Int : self.compilePrimitive (vt, p, _OP_int()) + case reflect.Int8 : self.compilePrimitive (vt, p, _OP_i8) + case reflect.Int16 : self.compilePrimitive (vt, p, _OP_i16) + case reflect.Int32 : self.compilePrimitive (vt, p, _OP_i32) + case reflect.Int64 : self.compilePrimitive (vt, p, _OP_i64) + case reflect.Uint : self.compilePrimitive (vt, p, _OP_uint()) + case reflect.Uint8 : self.compilePrimitive (vt, p, _OP_u8) + case reflect.Uint16 : self.compilePrimitive (vt, p, _OP_u16) + case reflect.Uint32 : self.compilePrimitive (vt, p, _OP_u32) + case reflect.Uint64 : self.compilePrimitive (vt, p, _OP_u64) + case reflect.Uintptr : self.compilePrimitive (vt, p, _OP_uintptr()) + case reflect.Float32 : self.compilePrimitive (vt, p, _OP_f32) + case reflect.Float64 : self.compilePrimitive (vt, p, _OP_f64) + case reflect.String : self.compileString (p, vt) + case reflect.Array : self.compileArray (p, sp, vt) + case reflect.Interface : self.compileInterface (p, vt) + case reflect.Map : self.compileMap (p, sp, vt) + case reflect.Ptr : self.compilePtr (p, sp, vt) + case reflect.Slice : self.compileSlice (p, sp, vt) + case reflect.Struct : self.compileStruct (p, sp, vt) + default : panic (&json.UnmarshalTypeError{Type: vt}) + } +} + +func (self *_Compiler) compileMap(p *_Program, sp int, vt reflect.Type) { + if reflect.PtrTo(vt.Key()).Implements(encodingTextUnmarshalerType) { + self.compileMapOp(p, sp, vt, _OP_map_key_utext_p) + } else if vt.Key().Implements(encodingTextUnmarshalerType) { + self.compileMapOp(p, sp, vt, _OP_map_key_utext) + } else { + self.compileMapUt(p, sp, vt) + } +} + +func (self *_Compiler) compileMapUt(p *_Program, sp int, vt reflect.Type) { + switch vt.Key().Kind() { + case reflect.Int : self.compileMapOp(p, sp, vt, _OP_map_key_int()) + case reflect.Int8 : self.compileMapOp(p, sp, vt, _OP_map_key_i8) + case reflect.Int16 : self.compileMapOp(p, sp, vt, _OP_map_key_i16) + case reflect.Int32 : self.compileMapOp(p, sp, vt, _OP_map_key_i32) + case reflect.Int64 : self.compileMapOp(p, sp, vt, _OP_map_key_i64) + case reflect.Uint : self.compileMapOp(p, sp, vt, _OP_map_key_uint()) + case reflect.Uint8 : self.compileMapOp(p, sp, vt, _OP_map_key_u8) + case reflect.Uint16 : self.compileMapOp(p, sp, vt, _OP_map_key_u16) + case reflect.Uint32 : self.compileMapOp(p, sp, vt, _OP_map_key_u32) + case reflect.Uint64 : self.compileMapOp(p, sp, vt, _OP_map_key_u64) + case reflect.Uintptr : self.compileMapOp(p, sp, vt, _OP_map_key_uintptr()) + case reflect.Float32 : self.compileMapOp(p, sp, vt, _OP_map_key_f32) + case reflect.Float64 : self.compileMapOp(p, sp, vt, _OP_map_key_f64) + case reflect.String : self.compileMapOp(p, sp, vt, _OP_map_key_str) + default : panic(&json.UnmarshalTypeError{Type: vt}) + } +} + +func (self *_Compiler) compileMapOp(p *_Program, sp int, vt reflect.Type, op _Op) { + i := p.pc() + p.add(_OP_is_null) + p.tag(sp + 1) + skip := self.checkIfSkip(p, vt, '{') + p.add(_OP_save) + p.add(_OP_map_init) + p.add(_OP_save) + p.add(_OP_lspace) + j := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, '"') + skip2 := p.pc() + p.rtt(op, vt) + + /* match the closing quote if needed */ + if op != _OP_map_key_str && op != _OP_map_key_utext && op != _OP_map_key_utext_p { + p.chr(_OP_match_char, '"') + } + + /* match the value separator */ + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + self.compileOne(p, sp + 2, vt.Elem()) + p.pin(skip2) + p.add(_OP_load) + k0 := p.pc() + p.add(_OP_lspace) + k1 := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, ',') + p.add(_OP_lspace) + p.chr(_OP_match_char, '"') + skip3 := p.pc() + p.rtt(op, vt) + + /* match the closing quote if needed */ + if op != _OP_map_key_str && op != _OP_map_key_utext && op != _OP_map_key_utext_p { + p.chr(_OP_match_char, '"') + } + + /* match the value separator */ + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + self.compileOne(p, sp + 2, vt.Elem()) + p.pin(skip3) + p.add(_OP_load) + p.int(_OP_goto, k0) + p.pin(j) + p.pin(k1) + p.add(_OP_drop_2) + x := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_1) + p.pin(skip) + p.pin(x) +} + +func (self *_Compiler) compilePtr(p *_Program, sp int, et reflect.Type) { + i := p.pc() + p.add(_OP_is_null) + + /* dereference all the way down */ + for et.Kind() == reflect.Ptr { + et = et.Elem() + p.rtt(_OP_deref, et) + } + + /* compile the element type */ + self.compileOne(p, sp + 1, et) + j := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_1) + p.pin(j) +} + +func (self *_Compiler) compileArray(p *_Program, sp int, vt reflect.Type) { + x := p.pc() + p.add(_OP_is_null) + p.tag(sp) + skip := self.checkIfSkip(p, vt, '[') + + p.add(_OP_save) + p.add(_OP_lspace) + v := []int{p.pc()} + p.chr(_OP_check_char, ']') + + /* decode every item */ + for i := 1; i <= vt.Len(); i++ { + self.compileOne(p, sp + 1, vt.Elem()) + p.add(_OP_load) + p.int(_OP_index, i * int(vt.Elem().Size())) + p.add(_OP_lspace) + v = append(v, p.pc()) + p.chr(_OP_check_char, ']') + p.chr(_OP_match_char, ',') + } + + /* drop rest of the array */ + p.add(_OP_array_skip) + w := p.pc() + p.add(_OP_goto) + p.rel(v) + + /* check for pointer data */ + if rt.UnpackType(vt.Elem()).PtrData == 0 { + p.int(_OP_array_clear, int(vt.Size())) + } else { + p.int(_OP_array_clear_p, int(vt.Size())) + } + + /* restore the stack */ + p.pin(w) + p.add(_OP_drop) + + p.pin(skip) + p.pin(x) +} + +func (self *_Compiler) compileSlice(p *_Program, sp int, vt reflect.Type) { + if vt.Elem().Kind() == byteType.Kind() { + self.compileSliceBin(p, sp, vt) + } else { + self.compileSliceList(p, sp, vt) + } +} + +func (self *_Compiler) compileSliceBin(p *_Program, sp int, vt reflect.Type) { + i := p.pc() + p.add(_OP_is_null) + j := p.pc() + p.chr(_OP_check_char, '[') + skip := self.checkIfSkip(p, vt, '"') + k := p.pc() + p.chr(_OP_check_char, '"') + p.add(_OP_bin) + x := p.pc() + p.add(_OP_goto) + p.pin(j) + self.compileSliceBody(p, sp, vt.Elem()) + y := p.pc() + p.add(_OP_goto) + p.pin(i) + p.pin(k) + p.add(_OP_nil_3) + p.pin(x) + p.pin(skip) + p.pin(y) +} + +func (self *_Compiler) compileSliceList(p *_Program, sp int, vt reflect.Type) { + i := p.pc() + p.add(_OP_is_null) + p.tag(sp) + skip := self.checkIfSkip(p, vt, '[') + self.compileSliceBody(p, sp, vt.Elem()) + x := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_3) + p.pin(x) + p.pin(skip) +} + +func (self *_Compiler) compileSliceBody(p *_Program, sp int, et reflect.Type) { + p.rtt(_OP_slice_init, et) + p.add(_OP_save) + p.add(_OP_lspace) + j := p.pc() + p.chr(_OP_check_char, ']') + p.rtt(_OP_slice_append, et) + self.compileOne(p, sp + 1, et) + p.add(_OP_load) + k0 := p.pc() + p.add(_OP_lspace) + k1 := p.pc() + p.chr(_OP_check_char, ']') + p.chr(_OP_match_char, ',') + p.rtt(_OP_slice_append, et) + self.compileOne(p, sp + 1, et) + p.add(_OP_load) + p.int(_OP_goto, k0) + p.pin(j) + p.pin(k1) + p.add(_OP_drop) +} + +func (self *_Compiler) compileString(p *_Program, vt reflect.Type) { + if vt == jsonNumberType { + self.compilePrimitive(vt, p, _OP_num) + } else { + self.compileStringBody(vt, p) + } +} + +func (self *_Compiler) compileStringBody(vt reflect.Type, p *_Program) { + i := p.pc() + p.add(_OP_is_null) + skip := self.checkIfSkip(p, vt, '"') + p.add(_OP_str) + p.pin(i) + p.pin(skip) +} + +func (self *_Compiler) compileStruct(p *_Program, sp int, vt reflect.Type) { + if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) { + p.rtt(_OP_recurse, vt) + if self.opts.RecursiveDepth > 0 { + self.rec[vt] = true + } + } else { + self.compileStructBody(p, sp, vt) + } +} + +func (self *_Compiler) compileStructBody(p *_Program, sp int, vt reflect.Type) { + fv := resolver.ResolveStruct(vt) + fm, sw := caching.CreateFieldMap(len(fv)), make([]int, len(fv)) + + /* start of object */ + p.tag(sp) + n := p.pc() + p.add(_OP_is_null) + + skip := self.checkIfSkip(p, vt, '{') + + p.add(_OP_save) + p.add(_OP_lspace) + x := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, '"') + p.fmv(_OP_struct_field, fm) + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + p.tab(_OP_switch, sw) + p.add(_OP_object_next) + y0 := p.pc() + p.add(_OP_lspace) + y1 := p.pc() + p.chr(_OP_check_char, '}') + p.chr(_OP_match_char, ',') + + /* special case of an empty struct */ + if len(fv) == 0 { + p.add(_OP_object_skip) + goto end_of_object + } + + /* match the remaining fields */ + p.add(_OP_lspace) + p.chr(_OP_match_char, '"') + p.fmv(_OP_struct_field, fm) + p.add(_OP_lspace) + p.chr(_OP_match_char, ':') + p.tab(_OP_switch, sw) + p.add(_OP_object_next) + p.int(_OP_goto, y0) + + /* process each field */ + for i, f := range fv { + sw[i] = p.pc() + fm.Set(f.Name, i) + + /* index to the field */ + for _, o := range f.Path { + if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref { + p.rtt(_OP_deref, o.Type) + } + } + + /* check for "stringnize" option */ + if (f.Opts & resolver.F_stringize) == 0 { + self.compileOne(p, sp + 1, f.Type) + } else { + self.compileStructFieldStr(p, sp + 1, f.Type) + } + + /* load the state, and try next field */ + p.add(_OP_load) + p.int(_OP_goto, y0) + } + +end_of_object: + p.pin(x) + p.pin(y1) + p.add(_OP_drop) + p.pin(n) + p.pin(skip) +} + +func (self *_Compiler) compileStructFieldStr(p *_Program, sp int, vt reflect.Type) { + n1 := -1 + ft := vt + sv := false + + /* dereference the pointer if needed */ + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + /* check if it can be stringized */ + switch ft.Kind() { + case reflect.Bool : sv = true + case reflect.Int : sv = true + case reflect.Int8 : sv = true + case reflect.Int16 : sv = true + case reflect.Int32 : sv = true + case reflect.Int64 : sv = true + case reflect.Uint : sv = true + case reflect.Uint8 : sv = true + case reflect.Uint16 : sv = true + case reflect.Uint32 : sv = true + case reflect.Uint64 : sv = true + case reflect.Uintptr : sv = true + case reflect.Float32 : sv = true + case reflect.Float64 : sv = true + case reflect.String : sv = true + } + + /* if it's not, ignore the "string" and follow the regular path */ + if !sv { + self.compileOne(p, sp, vt) + return + } + + /* remove the leading space, and match the leading quote */ + vk := vt.Kind() + p.add(_OP_lspace) + n0 := p.pc() + p.add(_OP_is_null) + + skip := self.checkIfSkip(p, stringType, '"') + + /* also check for inner "null" */ + n1 = p.pc() + p.add(_OP_is_null_quote) + + /* dereference the pointer only when it is not null */ + if vk == reflect.Ptr { + vt = vt.Elem() + p.rtt(_OP_deref, vt) + } + + n2 := p.pc() + p.chr(_OP_check_char_0, '"') + + /* string opcode selector */ + _OP_string := func() _Op { + if ft == jsonNumberType { + return _OP_num + } else { + return _OP_unquote + } + } + + /* compile for each type */ + switch vt.Kind() { + case reflect.Bool : p.add(_OP_bool) + case reflect.Int : p.add(_OP_int()) + case reflect.Int8 : p.add(_OP_i8) + case reflect.Int16 : p.add(_OP_i16) + case reflect.Int32 : p.add(_OP_i32) + case reflect.Int64 : p.add(_OP_i64) + case reflect.Uint : p.add(_OP_uint()) + case reflect.Uint8 : p.add(_OP_u8) + case reflect.Uint16 : p.add(_OP_u16) + case reflect.Uint32 : p.add(_OP_u32) + case reflect.Uint64 : p.add(_OP_u64) + case reflect.Uintptr : p.add(_OP_uintptr()) + case reflect.Float32 : p.add(_OP_f32) + case reflect.Float64 : p.add(_OP_f64) + case reflect.String : p.add(_OP_string()) + default : panic("not reachable") + } + + /* the closing quote is not needed when parsing a pure string */ + if vt == jsonNumberType || vt.Kind() != reflect.String { + p.chr(_OP_match_char, '"') + } + + /* pin the `is_null_quote` jump location */ + if n1 != -1 && vk != reflect.Ptr { + p.pin(n1) + } + + /* "null" but not a pointer, act as if the field is not present */ + if vk != reflect.Ptr { + pc2 := p.pc() + p.add(_OP_goto) + p.pin(n2) + p.rtt(_OP_dismatch_err, vt) + p.int(_OP_add, 1) + p.pin(pc2) + p.pin(n0) + return + } + + /* the "null" case of the pointer */ + pc := p.pc() + p.add(_OP_goto) + p.pin(n0) // `is_null` jump location + p.pin(n1) // `is_null_quote` jump location + p.add(_OP_nil_1) + pc2 := p.pc() + p.add(_OP_goto) + p.pin(n2) + p.rtt(_OP_dismatch_err, vt) + p.int(_OP_add, 1) + p.pin(pc) + p.pin(pc2) + p.pin(skip) +} + +func (self *_Compiler) compileInterface(p *_Program, vt reflect.Type) { + i := p.pc() + p.add(_OP_is_null) + + /* check for empty interface */ + if vt.NumMethod() == 0 { + p.add(_OP_any) + } else { + p.rtt(_OP_dyn, vt) + } + + /* finish the OpCode */ + j := p.pc() + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_2) + p.pin(j) +} + +func (self *_Compiler) compilePrimitive(vt reflect.Type, p *_Program, op _Op) { + i := p.pc() + p.add(_OP_is_null) + // skip := self.checkPrimitive(p, vt) + p.add(op) + p.pin(i) + // p.pin(skip) +} + +func (self *_Compiler) compileUnmarshalEnd(p *_Program, vt reflect.Type, i int) { + j := p.pc() + k := vt.Kind() + + /* not a pointer */ + if k != reflect.Ptr { + p.pin(i) + return + } + + /* it seems that in Go JSON library, "null" takes priority over any kind of unmarshaler */ + p.add(_OP_goto) + p.pin(i) + p.add(_OP_nil_1) + p.pin(j) +} + +func (self *_Compiler) compileUnmarshalJson(p *_Program, vt reflect.Type) { + i := p.pc() + v := _OP_unmarshal + p.add(_OP_is_null) + + /* check for dynamic interface */ + if vt.Kind() == reflect.Interface { + v = _OP_dyn + } + + /* call the unmarshaler */ + p.rtt(v, vt) + self.compileUnmarshalEnd(p, vt, i) +} + +func (self *_Compiler) compileUnmarshalText(p *_Program, vt reflect.Type) { + i := p.pc() + v := _OP_unmarshal_text + p.add(_OP_is_null) + + /* check for dynamic interface */ + if vt.Kind() == reflect.Interface { + v = _OP_dyn + } else { + p.chr(_OP_match_char, '"') + } + + /* call the unmarshaler */ + p.rtt(v, vt) + self.compileUnmarshalEnd(p, vt, i) +} + +func (self *_Compiler) compileUnmarshalTextPtr(p *_Program, vt reflect.Type) { + i := p.pc() + p.add(_OP_is_null) + p.chr(_OP_match_char, '"') + p.rtt(_OP_unmarshal_text_p, vt) + p.pin(i) +} + +func (self *_Compiler) checkIfSkip(p *_Program, vt reflect.Type, c byte) int { + j := p.pc() + p.chr(_OP_check_char_0, c) + p.rtt(_OP_dismatch_err, vt) + s := p.pc() + p.add(_OP_go_skip) + p.pin(j) + p.int(_OP_add, 1) + return s +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/debug.go b/vendor/github.com/bytedance/sonic/decoder/debug.go new file mode 100644 index 0000000..9cf3a6a --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/debug.go @@ -0,0 +1,70 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `os` + `runtime` + `runtime/debug` + `strings` + + `github.com/bytedance/sonic/internal/jit` +) + + +var ( + debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" + debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" +) + +var ( + _Instr_End _Instr = newInsOp(_OP_nil_1) + + _F_gc = jit.Func(runtime.GC) + _F_force_gc = jit.Func(debug.FreeOSMemory) + _F_println = jit.Func(println_wrapper) + _F_print = jit.Func(print) +) + +func println_wrapper(i int, op1 int, op2 int){ + println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) +} + +func print(i int){ + println(i) +} + +func (self *_Assembler) force_gc() { + self.call_go(_F_gc) + self.call_go(_F_force_gc) +} + +func (self *_Assembler) debug_instr(i int, v *_Instr) { + if debugSyncGC { + if (i+1 == len(self.p)) { + self.print_gc(i, v, &_Instr_End) + } else { + next := &(self.p[i+1]) + self.print_gc(i, v, next) + name := _OpNames[next.op()] + if strings.Contains(name, "save") { + return + } + } + self.force_gc() + } +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/decoder.go b/vendor/github.com/bytedance/sonic/decoder/decoder.go new file mode 100644 index 0000000..5326f97 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/decoder.go @@ -0,0 +1,245 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding/json` + `reflect` + `runtime` + + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` + `github.com/bytedance/sonic/option` + `github.com/bytedance/sonic/utf8` +) + +const ( + _F_use_int64 = iota + _F_use_number + _F_disable_urc + _F_disable_unknown + _F_copy_string + _F_validate_string + + _F_allow_control = 31 +) + +type Options uint64 + +const ( + OptionUseInt64 Options = 1 << _F_use_int64 + OptionUseNumber Options = 1 << _F_use_number + OptionUseUnicodeErrors Options = 1 << _F_disable_urc + OptionDisableUnknown Options = 1 << _F_disable_unknown + OptionCopyString Options = 1 << _F_copy_string + OptionValidateString Options = 1 << _F_validate_string +) + +func (self *Decoder) SetOptions(opts Options) { + if (opts & OptionUseNumber != 0) && (opts & OptionUseInt64 != 0) { + panic("can't set OptionUseInt64 and OptionUseNumber both!") + } + self.f = uint64(opts) +} + + +// Decoder is the decoder context object +type Decoder struct { + i int + f uint64 + s string +} + +// NewDecoder creates a new decoder instance. +func NewDecoder(s string) *Decoder { + return &Decoder{s: s} +} + +// Pos returns the current decoding position. +func (self *Decoder) Pos() int { + return self.i +} + +func (self *Decoder) Reset(s string) { + self.s = s + self.i = 0 + // self.f = 0 +} + +func (self *Decoder) CheckTrailings() error { + pos := self.i + buf := self.s + /* skip all the trailing spaces */ + if pos != len(buf) { + for pos < len(buf) && (types.SPACE_MASK & (1 << buf[pos])) != 0 { + pos++ + } + } + + /* then it must be at EOF */ + if pos == len(buf) { + return nil + } + + /* junk after JSON value */ + return SyntaxError { + Src : buf, + Pos : pos, + Code : types.ERR_INVALID_CHAR, + } +} + + +// Decode parses the JSON-encoded data from current position and stores the result +// in the value pointed to by val. +func (self *Decoder) Decode(val interface{}) error { + /* validate json if needed */ + if (self.f & (1 << _F_validate_string)) != 0 && !utf8.ValidateString(self.s){ + dbuf := utf8.CorrectWith(nil, rt.Str2Mem(self.s), "\ufffd") + self.s = rt.Mem2Str(dbuf) + } + + vv := rt.UnpackEface(val) + vp := vv.Value + + /* check for nil type */ + if vv.Type == nil { + return &json.InvalidUnmarshalError{} + } + + /* must be a non-nil pointer */ + if vp == nil || vv.Type.Kind() != reflect.Ptr { + return &json.InvalidUnmarshalError{Type: vv.Type.Pack()} + } + + /* create a new stack, and call the decoder */ + sb, etp := newStack(), rt.PtrElem(vv.Type) + nb, err := decodeTypedPointer(self.s, self.i, etp, vp, sb, self.f) + /* return the stack back */ + self.i = nb + freeStack(sb) + + /* avoid GC ahead */ + runtime.KeepAlive(vv) + return err +} + +// UseInt64 indicates the Decoder to unmarshal an integer into an interface{} as an +// int64 instead of as a float64. +func (self *Decoder) UseInt64() { + self.f |= 1 << _F_use_int64 + self.f &^= 1 << _F_use_number +} + +// UseNumber indicates the Decoder to unmarshal a number into an interface{} as a +// json.Number instead of as a float64. +func (self *Decoder) UseNumber() { + self.f &^= 1 << _F_use_int64 + self.f |= 1 << _F_use_number +} + +// UseUnicodeErrors indicates the Decoder to return an error when encounter invalid +// UTF-8 escape sequences. +func (self *Decoder) UseUnicodeErrors() { + self.f |= 1 << _F_disable_urc +} + +// DisallowUnknownFields indicates the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (self *Decoder) DisallowUnknownFields() { + self.f |= 1 << _F_disable_unknown +} + +// CopyString indicates the Decoder to decode string values by copying instead of referring. +func (self *Decoder) CopyString() { + self.f |= 1 << _F_copy_string +} + +// ValidateString causes the Decoder to validate string values when decoding string value +// in JSON. Validation is that, returning error when unescaped control chars(0x00-0x1f) or +// invalid UTF-8 chars in the string value of JSON. +func (self *Decoder) ValidateString() { + self.f |= 1 << _F_validate_string +} + +// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in +// order to reduce the first-hit latency. +// +// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is +// a compile option to set the depth of recursive compile for the nested struct type. +func Pretouch(vt reflect.Type, opts ...option.CompileOption) error { + cfg := option.DefaultCompileOptions() + for _, opt := range opts { + opt(&cfg) + } + return pretouchRec(map[reflect.Type]bool{vt:true}, cfg) +} + +func pretouchType(_vt reflect.Type, opts option.CompileOptions) (map[reflect.Type]bool, error) { + /* compile function */ + compiler := newCompiler().apply(opts) + decoder := func(vt *rt.GoType, _ ...interface{}) (interface{}, error) { + if pp, err := compiler.compile(_vt); err != nil { + return nil, err + } else { + as := newAssembler(pp) + as.name = _vt.String() + return as.Load(), nil + } + } + + /* find or compile */ + vt := rt.UnpackType(_vt) + if val := programCache.Get(vt); val != nil { + return nil, nil + } else if _, err := programCache.Compute(vt, decoder); err == nil { + return compiler.rec, nil + } else { + return nil, err + } +} + +func pretouchRec(vtm map[reflect.Type]bool, opts option.CompileOptions) error { + if opts.RecursiveDepth < 0 || len(vtm) == 0 { + return nil + } + next := make(map[reflect.Type]bool) + for vt := range(vtm) { + sub, err := pretouchType(vt, opts) + if err != nil { + return err + } + for svt := range(sub) { + next[svt] = true + } + } + opts.RecursiveDepth -= 1 + return pretouchRec(next, opts) +} + +// Skip skips only one json value, and returns first non-blank character position and its ending position if it is valid. +// Otherwise, returns negative error code using start and invalid character position using end +func Skip(data []byte) (start int, end int) { + s := rt.Mem2Str(data) + p := 0 + m := types.NewStateMachine() + ret := native.SkipOne(&s, &p, m, uint64(0)) + types.FreeStateMachine(m) + return ret, p +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/errors.go b/vendor/github.com/bytedance/sonic/decoder/errors.go new file mode 100644 index 0000000..c905fdf --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/errors.go @@ -0,0 +1,181 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding/json` + `errors` + `fmt` + `reflect` + `strconv` + `strings` + + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` +) + +type SyntaxError struct { + Pos int + Src string + Code types.ParsingError + Msg string +} + +func (self SyntaxError) Error() string { + return fmt.Sprintf("%q", self.Description()) +} + +func (self SyntaxError) Description() string { + return "Syntax error " + self.description() +} + +func (self SyntaxError) description() string { + i := 16 + p := self.Pos - i + q := self.Pos + i + + /* check for empty source */ + if self.Src == "" { + return fmt.Sprintf("no sources available: %#v", self) + } + + /* prevent slicing before the beginning */ + if p < 0 { + p, q, i = 0, q - p, i + p + } + + /* prevent slicing beyond the end */ + if n := len(self.Src); q > n { + n = q - n + q = len(self.Src) + + /* move the left bound if possible */ + if p > n { + i += n + p -= n + } + } + + /* left and right length */ + x := clamp_zero(i) + y := clamp_zero(q - p - i - 1) + + /* compose the error description */ + return fmt.Sprintf( + "at index %d: %s\n\n\t%s\n\t%s^%s\n", + self.Pos, + self.Message(), + self.Src[p:q], + strings.Repeat(".", x), + strings.Repeat(".", y), + ) +} + +func (self SyntaxError) Message() string { + if self.Msg == "" { + return self.Code.Message() + } + return self.Msg +} + +func clamp_zero(v int) int { + if v < 0 { + return 0 + } else { + return v + } +} + +/** JIT Error Helpers **/ + +var stackOverflow = &json.UnsupportedValueError { + Str : "Value nesting too deep", + Value : reflect.ValueOf("..."), +} + +//go:nosplit +func error_wrap(src string, pos int, code types.ParsingError) error { + return SyntaxError { + Pos : pos, + Src : src, + Code : code, + } +} + +//go:nosplit +func error_type(vt *rt.GoType) error { + return &json.UnmarshalTypeError{Type: vt.Pack()} +} + +type MismatchTypeError struct { + Pos int + Src string + Type reflect.Type +} + +func swithchJSONType (src string, pos int) string { + var val string + switch src[pos] { + case 'f': fallthrough + case 't': val = "bool" + case '"': val = "string" + case '{': val = "object" + case '[': val = "array" + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': val = "number" + } + return val +} + +func (self MismatchTypeError) Error() string { + se := SyntaxError { + Pos : self.Pos, + Src : self.Src, + Code : types.ERR_MISMATCH, + } + return fmt.Sprintf("Mismatch type %s with value %s %q", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) +} + +func (self MismatchTypeError) Description() string { + se := SyntaxError { + Pos : self.Pos, + Src : self.Src, + Code : types.ERR_MISMATCH, + } + return fmt.Sprintf("Mismatch type %s with value %s %s", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description()) +} + +//go:nosplit +func error_mismatch(src string, pos int, vt *rt.GoType) error { + return &MismatchTypeError { + Pos : pos, + Src : src, + Type : vt.Pack(), + } +} + +//go:nosplit +func error_field(name string) error { + return errors.New("json: unknown field " + strconv.Quote(name)) +} + +//go:nosplit +func error_value(value string, vtype reflect.Type) error { + return &json.UnmarshalTypeError { + Type : vtype, + Value : value, + } +} diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go new file mode 100644 index 0000000..b597043 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go @@ -0,0 +1,776 @@ +// +build go1.15,!go1.17 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding/json` + `fmt` + `reflect` + `strconv` + + `github.com/bytedance/sonic/internal/jit` + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/native/types` + `github.com/twitchyliquid64/golang-asm/obj` + `github.com/twitchyliquid64/golang-asm/obj/x86` +) + +/** Crucial Registers: + * + * ST(BX) : ro, decoder stack + * DF(R10) : ro, decoder flags + * EP(R11) : wo, error pointer + * IP(R12) : ro, input pointer + * IL(R13) : ro, input length + * IC(R14) : rw, input cursor + * VP(R15) : ro, value pointer (to an interface{}) + */ + +const ( + _VD_args = 8 // 8 bytes for passing arguments to this functions + _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions + _VD_saves = 40 // 40 bytes for saving the registers before CALL instructions + _VD_locals = 88 // 88 bytes for local variables +) + +const ( + _VD_offs = _VD_fargs + _VD_saves + _VD_locals + _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer +) + +var ( + _VAR_ss = _VAR_ss_Vt + _VAR_df = jit.Ptr(_SP, _VD_fargs + _VD_saves) +) + +var ( + _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs + _VD_saves + 8) + _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 16) + _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 24) + _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs + _VD_saves + 32) + _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs + _VD_saves + 40) + _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs + _VD_saves + 48) +) + +var ( + _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs + _VD_saves + 56) + _VAR_cs_p = jit.Ptr(_SP, _VD_fargs + _VD_saves + 64) + _VAR_cs_n = jit.Ptr(_SP, _VD_fargs + _VD_saves + 72) + _VAR_cs_d = jit.Ptr(_SP, _VD_fargs + _VD_saves + 80) +) + +type _ValueDecoder struct { + jit.BaseAssembler +} + +func (self *_ValueDecoder) build() uintptr { + self.Init(self.compile) + return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic)) +} + +/** Function Calling Helpers **/ + +func (self *_ValueDecoder) save(r ...obj.Addr) { + for i, v := range r { + if i > _VD_saves / 8 - 1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs + int64(i) * 8)) + } + } +} + +func (self *_ValueDecoder) load(r ...obj.Addr) { + for i, v := range r { + if i > _VD_saves / 8 - 1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs + int64(i) * 8), v) + } + } +} + +func (self *_ValueDecoder) call(fn obj.Addr) { + self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX + self.Rjmp("CALL", _AX) // CALL AX +} + +func (self *_ValueDecoder) call_go(fn obj.Addr) { + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) // CALL ${fn} + self.load(_REG_go...) // LOAD $REG_go +} + +/** Decoder Assembler **/ + +const ( + _S_val = iota + 1 + _S_arr + _S_arr_0 + _S_obj + _S_obj_0 + _S_obj_delim + _S_obj_sep +) + +const ( + _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep) + _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj) + _S_vmask = (1 << _S_val) | (1 << _S_arr_0) +) + +const ( + _A_init_len = 1 + _A_init_cap = 16 +) + +const ( + _ST_Sp = 0 + _ST_Vt = _PtrBytes + _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1) +) + +var ( + _V_true = jit.Imm(int64(pbool(true))) + _V_false = jit.Imm(int64(pbool(false))) + _F_value = jit.Imm(int64(native.S_value)) +) + +var ( + _V_max = jit.Imm(int64(types.V_MAX)) + _E_eof = jit.Imm(int64(types.ERR_EOF)) + _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR)) + _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX)) +) + +var ( + _F_convTslice = jit.Func(convTslice) + _F_convTstring = jit.Func(convTstring) + _F_invalid_vtype = jit.Func(invalid_vtype) +) + +var ( + _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil))) + _T_bool = jit.Type(reflect.TypeOf(false)) + _T_int64 = jit.Type(reflect.TypeOf(int64(0))) + _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem()) + _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil))) + _T_string = jit.Type(reflect.TypeOf("")) + _T_number = jit.Type(reflect.TypeOf(json.Number(""))) + _T_float64 = jit.Type(reflect.TypeOf(float64(0))) +) + +var _R_tab = map[int]string { + '[': "_decode_V_ARRAY", + '{': "_decode_V_OBJECT", + ':': "_decode_V_KEY_SEP", + ',': "_decode_V_ELEM_SEP", + ']': "_decode_V_ARRAY_END", + '}': "_decode_V_OBJECT_END", +} + +func (self *_ValueDecoder) compile() { + self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP + + /* initialize the state machine */ + self.Emit("XORL", _CX, _CX) // XORL CX, CX + self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df + /* initialize digital buffer first */ + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf + /* add ST offset */ + self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0] + self.Sjmp("JMP" , "_next") // JMP _next + + /* set the value from previous round */ + self.Link("_set_value") // _set_value: + self.Emit("MOVL" , jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , "_vtype_error") // JNC _vtype_error + self.Emit("XORL" , _SI, _SI) // XORL SI, SI + self.Emit("SUBQ" , jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI + self.Emit("MOVQ" , _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI) + self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI) + + /* check for value stack */ + self.Link("_next") // _next: + self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_return") // JS _return + + /* fast path: test up to 4 characters manually */ + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , "_decode_fast") // JA _decode_fast + self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + + /* at least 1 to 3 spaces */ + for i := 0; i < 3; i++ { + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , "_decode_fast") // JA _decode_fast + self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + } + + /* at least 4 spaces */ + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + + /* fast path: use lookup table to select decoder */ + self.Link("_decode_fast") // _decode_fast: + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_decode_tab", 4) // .... &_decode_tab + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ" , "_decode_native") // JZ _decode_native + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP" , _AX) // JMP AX + + /* decode with native decoder */ + self.Link("_decode_native") // _decode_native: + self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX + self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX + self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8 + self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8 + self.call(_F_value) // CALL value + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + + /* check for errors */ + self.Emit("MOVQ" , _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_parsing_error") + self.Sjmp("JZ" , "_invalid_vtype") // JZ _invalid_vtype + self.Emit("CMPQ" , _AX, _V_max) // CMPQ AX, _V_max + self.Sjmp("JA" , "_invalid_vtype") // JA _invalid_vtype + + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table", 4) // .... &_switch_table + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX + self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP" , _AX) // JMP AX + + /** V_EOF **/ + self.Link("_decode_V_EOF") // _decode_V_EOF: + self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP + self.Sjmp("JMP" , "_error") // JMP _error + + /** V_NULL **/ + self.Link("_decode_V_NULL") // _decode_V_NULL: + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("XORL", _R9, _R9) // XORL R9, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_TRUE **/ + self.Link("_decode_V_TRUE") // _decode_V_TRUE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + // TODO: maybe modified by users? + self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_FALSE **/ + self.Link("_decode_V_FALSE") // _decode_V_FALSE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_ARRAY **/ + self.Link("_decode_V_ARRAY") // _decode_V_ARRAY + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char + + /* create a new array */ + self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) + self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) + self.call_go(_F_makeslice) // CALL_GO runtime.makeslice + self.Emit("MOVQ", jit.Ptr(_SP, 24), _DX) // MOVQ 24(SP), DX + + /* pack into an interface */ + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) + self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP) + self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP) + self.call_go(_F_convTslice) // CALL_GO runtime.convTslice + self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) // MOVQ 24(SP), R8 + + /* replace current state with an array */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX] + self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI) + self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI) + + /* add a new slot for the first element */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_OBJECT **/ + self.Link("_decode_V_OBJECT") // _decode_V_OBJECT: + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char + self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small + self.Emit("MOVQ", jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj, ST.Vt[CX] + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI) + self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI) + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_STRING **/ + self.Link("_decode_V_STRING") // _decode_V_STRING: + self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX + self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX + + /* check for escapes */ + self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1 + self.Sjmp("JNE" , "_unquote") // JNE _unquote + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8 + self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI + self.Sref("_copy_string_end", 4) + self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df) + self.Sjmp("JC", "copy_string") + self.Link("_copy_string_end") + self.Emit("XORL", _DX, _DX) // XORL DX, DX + /* strings with no escape sequences */ + self.Link("_noescape") // _noescape: + self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI + self.Emit("BTQ" , _SI, _DI) // BTQ SI, DI + self.Sjmp("JC" , "_object_key") // JC _object_key + + /* check for pre-packed strings, avoid 1 allocation */ + self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX + self.Sjmp("JNZ" , "_packed_str") // JNZ _packed_str + self.Emit("MOVQ" , _R8, jit.Ptr(_SP, 0)) // MOVQ R8, (SP) + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ" , jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 + + /* packed string already in R9 */ + self.Link("_packed_str") // _packed_str: + self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8 + self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI + self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /* the string is an object key, get the map */ + self.Link("_object_key") + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + + /* add a new delimiter */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX] + + /* add a new slot int the map */ + self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP) + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP) + self.Emit("MOVQ", _R8, jit.Ptr(_SP, 16)) // MOVQ R9, 16(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP) + self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr + self.Emit("MOVQ", jit.Ptr(_SP, 32), _AX) // MOVQ 32(SP), AX + + /* add to the pointer stack */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /* allocate memory to store the string header and unquoted result */ + self.Link("_unquote") // _unquote: + self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX + self.Emit("MOVQ", _T_byte, _CX) // MOVQ _T_byte, CX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVB", jit.Imm(0), jit.Ptr(_SP, 16)) // MOVB $0, 16(SP) + self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc + self.Emit("MOVQ", jit.Ptr(_SP, 24), _R9) // MOVQ 24(SP), R9 + + /* prepare the unquoting parameters */ + self.Emit("MOVQ" , _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("LEAQ" , jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI + self.Emit("NEGQ" , _CX) // NEGQ CX + self.Emit("LEAQ" , jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI + self.Emit("LEAQ" , jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX + self.Emit("LEAQ" , _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX + self.Emit("XORL" , _R8, _R8) // XORL R8, R8 + self.Emit("BTQ" , jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + + /* unquote the string, with R9 been preserved */ + self.save(_R9) // SAVE R9 + self.call(_F_unquote) // CALL unquote + self.load(_R9) // LOAD R9 + + /* check for errors */ + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_unquote_error") // JS _unquote_error + self.Emit("MOVL" , jit.Imm(1), _DX) // MOVL $1, DX + self.Emit("LEAQ" , jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8 + self.Emit("MOVQ" , _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9) + self.Emit("MOVQ" , _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9) + self.Sjmp("JMP" , "_noescape") // JMP _noescape + + /** V_DOUBLE **/ + self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE: + self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC" , "_use_number") // JC _use_number + self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 + self.Sjmp("JMP" , "_use_float64") // JMP _use_float64 + + /** V_INTEGER **/ + self.Link("_decode_V_INTEGER") // _decode_V_INTEGER: + self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC" , "_use_number") // JC _use_number + self.Emit("BTQ" , jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df + self.Sjmp("JC" , "_use_int64") // JC _use_int64 + self.Emit("MOVQ" , _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX + self.Emit("CVTSQ2SD", _AX, _X0) // CVTSQ2SD AX, X0 + + /* represent numbers as `float64` */ + self.Link("_use_float64") // _use_float64: + self.Emit("MOVSD", _X0, jit.Ptr(_SP, 0)) // MOVSD X0, (SP) + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ" , _T_float64, _R8) // MOVQ _T_float64, R8 + self.Emit("MOVQ" , jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 + self.Emit("MOVQ" , _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /* represent numbers as `json.Number` */ + self.Link("_use_number") // _use_number + self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX + self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI + self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX + self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX + self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP) + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8 + self.Emit("MOVQ", jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /* represent numbers as `int64` */ + self.Link("_use_int64") // _use_int64: + self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8 + self.Emit("MOVQ", jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_KEY_SEP **/ + self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP: + // self.Byte(0xcc) + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim + self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX] + self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt - 8)) // MOVQ _S_obj, ST.Vt[CX - 1] + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_ELEM_SEP **/ + self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP: + self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ" , _AX, jit.Imm(_S_arr)) // CMPQ _AX, _S_arr + self.Sjmp("JE" , "_array_sep") // JZ _next + self.Emit("CMPQ" , _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr + self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ" , jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt)) + self.Sjmp("JMP" , "_next") // JMP _next + + /* arrays */ + self.Link("_array_sep") + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX + self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI) + self.Sjmp("JAE" , "_array_more") // JAE _array_more + + /* add a slot for the new element */ + self.Link("_array_append") // _array_append: + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI) + self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE" , "_stack_overflow") + self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX + self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(7 , _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX} + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_ARRAY_END **/ + self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END: + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0 + self.Sjmp("JE" , "_first_item") // JE _first_item + self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr + self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /* first element of an array */ + self.Link("_first_item") // _first_item: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp - 8), _SI) // MOVQ ST.Vp[CX - 1], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp - 8)) // MOVQ DX, ST.Vp[CX - 1] + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_OBJECT_END **/ + self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END: + self.Emit("MOVL", jit.Imm(_S_omask_end), _DX) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DX) + self.Sjmp("JNC" , "_invalid_char") // JNE _invalid_char + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /* return from decoder */ + self.Link("_return") // _return: + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0] + self.Link("_epilogue") // _epilogue: + self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST + self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP + self.Emit("RET") // RET + + /* array expand */ + self.Link("_array_more") // _array_more: + self.Emit("MOVQ" , _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVOU", jit.Ptr(_SI, 0), _X0) // MOVOU (SI), X0 + self.Emit("MOVQ" , jit.Ptr(_SI, 16), _DX) // MOVQ 16(SI), DX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 24)) // MOVQ DX, 24(SP) + self.Emit("SHLQ" , jit.Imm(1), _DX) // SHLQ $1, DX + self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 32)) // MOVQ DX, 32(SP) + self.call_go(_F_growslice) // CALL_GO runtime.growslice + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVOU 40(SP), DI + self.Emit("MOVQ" , jit.Ptr(_SP, 48), _DX) // MOVOU 48(SP), DX + self.Emit("MOVQ" , jit.Ptr(_SP, 56), _AX) // MOVQ 56(SP), AX + + /* update the slice */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX) + self.WriteRecNotAX(8 , _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI) + self.Sjmp("JMP" , "_array_append") // JMP _array_append + + /* copy string */ + self.Link("copy_string") // pointer: R8, length: AX, return addr: DI + // self.Byte(0xcc) + self.Emit("MOVQ", _R8, _VAR_cs_p) + self.Emit("MOVQ", _AX, _VAR_cs_n) + self.Emit("MOVQ", _DI, _VAR_cs_LR) + self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0)) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) + self.call_go(_F_makeslice) + self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) + self.Emit("MOVQ", _R8, _VAR_cs_d) + self.Emit("MOVQ", _R8, jit.Ptr(_SP, 0)) + self.Emit("MOVQ", _VAR_cs_p, _R8) + self.Emit("MOVQ", _R8, jit.Ptr(_SP, 8)) + self.Emit("MOVQ", _VAR_cs_n, _AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) + self.call_go(_F_memmove) + self.Emit("MOVQ", _VAR_cs_d, _R8) + self.Emit("MOVQ", _VAR_cs_n, _AX) + self.Emit("MOVQ", _VAR_cs_LR, _DI) + // self.Byte(0xcc) + self.Rjmp("JMP", _DI) + + /* error handlers */ + self.Link("_stack_overflow") + self.Emit("MOVL" , _E_recurse, _EP) // MOVQ _E_recurse, EP + self.Sjmp("JMP" , "_error") // JMP _error + self.Link("_vtype_error") // _vtype_error: + self.Emit("MOVQ" , _DI, _IC) // MOVQ DI, IC + self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP" , "_error") // JMP _error + self.Link("_invalid_char") // _invalid_char: + self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC + self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP" , "_error") // JMP _error + self.Link("_unquote_error") // _unquote_error: + self.Emit("MOVQ" , _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC + self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC + self.Link("_parsing_error") // _parsing_error: + self.Emit("NEGQ" , _AX) // NEGQ AX + self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP + self.Link("_error") // _error: + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Sjmp("JMP" , "_epilogue") // JMP _epilogue + + /* invalid value type, never returns */ + self.Link("_invalid_vtype") + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.call(_F_invalid_vtype) // CALL invalid_type + self.Emit("UD2") // UD2 + + /* switch jump table */ + self.Link("_switch_table") // _switch_table: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4 + self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8 + self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12 + self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16 + self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20 + self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24 + self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28 + self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32 + self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36 + self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40 + self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44 + self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48 + + /* fast character lookup table */ + self.Link("_decode_tab") // _decode_tab: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + + /* generate rest of the tabs */ + for i := 1; i < 256; i++ { + if to, ok := _R_tab[i]; ok { + self.Sref(to, -int64(i) * 4) + } else { + self.Byte(0x00, 0x00, 0x00, 0x00) + } + } +} + +func (self *_ValueDecoder) WritePtrAX(i int, rec obj.Addr, saveDI bool) { + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} + +func (self *_ValueDecoder) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool) { + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, _AX) + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} + +/** Generic Decoder **/ + +var ( + _subr_decode_value = new(_ValueDecoder).build() +) + +//go:nosplit +func invalid_vtype(vt types.ValueType) { + throw(fmt.Sprintf("invalid value type: %d", vt)) +} diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go new file mode 100644 index 0000000..df1cd9f --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go @@ -0,0 +1,772 @@ +//go:build go1.17 && !go1.21 +// +build go1.17,!go1.21 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding/json` + `fmt` + `reflect` + `strconv` + + `github.com/bytedance/sonic/internal/jit` + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/native/types` + `github.com/twitchyliquid64/golang-asm/obj` + `github.com/twitchyliquid64/golang-asm/obj/x86` +) + +/** Crucial Registers: + * + * ST(R13) && 0(SP) : ro, decoder stack + * DF(AX) : ro, decoder flags + * EP(BX) : wo, error pointer + * IP(R10) : ro, input pointer + * IL(R12) : ro, input length + * IC(R11) : rw, input cursor + * VP(R15) : ro, value pointer (to an interface{}) + */ + +const ( + _VD_args = 8 // 8 bytes for passing arguments to this functions + _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions + _VD_saves = 48 // 48 bytes for saving the registers before CALL instructions + _VD_locals = 96 // 96 bytes for local variables +) + +const ( + _VD_offs = _VD_fargs + _VD_saves + _VD_locals + _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer +) + +var ( + _VAR_ss = _VAR_ss_Vt + _VAR_df = jit.Ptr(_SP, _VD_fargs + _VD_saves) +) + +var ( + _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs + _VD_saves + 8) + _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 16) + _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 24) + _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs + _VD_saves + 32) + _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs + _VD_saves + 40) + _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs + _VD_saves + 48) +) + +var ( + _VAR_R9 = jit.Ptr(_SP, _VD_fargs + _VD_saves + 56) +) +type _ValueDecoder struct { + jit.BaseAssembler +} + +var ( + _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs + _VD_saves + 64) + _VAR_cs_p = jit.Ptr(_SP, _VD_fargs + _VD_saves + 72) + _VAR_cs_n = jit.Ptr(_SP, _VD_fargs + _VD_saves + 80) + _VAR_cs_d = jit.Ptr(_SP, _VD_fargs + _VD_saves + 88) +) + +func (self *_ValueDecoder) build() uintptr { + self.Init(self.compile) + return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic)) +} + +/** Function Calling Helpers **/ + +func (self *_ValueDecoder) save(r ...obj.Addr) { + for i, v := range r { + if i > _VD_saves / 8 - 1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs + int64(i) * 8)) + } + } +} + +func (self *_ValueDecoder) load(r ...obj.Addr) { + for i, v := range r { + if i > _VD_saves / 8 - 1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs + int64(i) * 8), v) + } + } +} + +func (self *_ValueDecoder) call(fn obj.Addr) { + self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R9) // CALL AX +} + +func (self *_ValueDecoder) call_go(fn obj.Addr) { + self.save(_REG_go...) // SAVE $REG_go + self.call(fn) // CALL ${fn} + self.load(_REG_go...) // LOAD $REG_go +} + +func (self *_ValueDecoder) callc(fn obj.Addr) { + self.Emit("XCHGQ", _IP, _BP) + self.call(fn) + self.Emit("XCHGQ", _IP, _BP) +} + +func (self *_ValueDecoder) call_c(fn obj.Addr) { + self.Emit("XCHGQ", _IC, _BX) + self.callc(fn) + self.Emit("XCHGQ", _IC, _BX) +} + +/** Decoder Assembler **/ + +const ( + _S_val = iota + 1 + _S_arr + _S_arr_0 + _S_obj + _S_obj_0 + _S_obj_delim + _S_obj_sep +) + +const ( + _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep) + _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj) + _S_vmask = (1 << _S_val) | (1 << _S_arr_0) +) + +const ( + _A_init_len = 1 + _A_init_cap = 16 +) + +const ( + _ST_Sp = 0 + _ST_Vt = _PtrBytes + _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1) +) + +var ( + _V_true = jit.Imm(int64(pbool(true))) + _V_false = jit.Imm(int64(pbool(false))) + _F_value = jit.Imm(int64(native.S_value)) +) + +var ( + _V_max = jit.Imm(int64(types.V_MAX)) + _E_eof = jit.Imm(int64(types.ERR_EOF)) + _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR)) + _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX)) +) + +var ( + _F_convTslice = jit.Func(convTslice) + _F_convTstring = jit.Func(convTstring) + _F_invalid_vtype = jit.Func(invalid_vtype) +) + +var ( + _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil))) + _T_bool = jit.Type(reflect.TypeOf(false)) + _T_int64 = jit.Type(reflect.TypeOf(int64(0))) + _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem()) + _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil))) + _T_string = jit.Type(reflect.TypeOf("")) + _T_number = jit.Type(reflect.TypeOf(json.Number(""))) + _T_float64 = jit.Type(reflect.TypeOf(float64(0))) +) + +var _R_tab = map[int]string { + '[': "_decode_V_ARRAY", + '{': "_decode_V_OBJECT", + ':': "_decode_V_KEY_SEP", + ',': "_decode_V_ELEM_SEP", + ']': "_decode_V_ARRAY_END", + '}': "_decode_V_OBJECT_END", +} + +func (self *_ValueDecoder) compile() { + self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP + + /* initialize the state machine */ + self.Emit("XORL", _CX, _CX) // XORL CX, CX + self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df + /* initialize digital buffer first */ + self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap + self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX + self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf + /* add ST offset */ + self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0] + self.Sjmp("JMP" , "_next") // JMP _next + + /* set the value from previous round */ + self.Link("_set_value") // _set_value: + self.Emit("MOVL" , jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , "_vtype_error") // JNC _vtype_error + self.Emit("XORL" , _SI, _SI) // XORL SI, SI + self.Emit("SUBQ" , jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI + self.Emit("MOVQ" , _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI) + self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI) + + /* check for value stack */ + self.Link("_next") // _next: + self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_return") // JS _return + + /* fast path: test up to 4 characters manually */ + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , "_decode_fast") // JA _decode_fast + self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + + /* at least 1 to 3 spaces */ + for i := 0; i < 3; i++ { + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' ' + self.Sjmp("JA" , "_decode_fast") // JA _decode_fast + self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX + self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + } + + /* at least 4 spaces */ + self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL + self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF + self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX + + /* fast path: use lookup table to select decoder */ + self.Link("_decode_fast") // _decode_fast: + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_decode_tab", 4) // .... &_decode_tab + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX + self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ" , "_decode_native") // JZ _decode_native + self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC + self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP" , _AX) // JMP AX + + /* decode with native decoder */ + self.Link("_decode_native") // _decode_native: + self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI + self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI + self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX + self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX + self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8 + self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8 + self.callc(_F_value) // CALL value + self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC + + /* check for errors */ + self.Emit("MOVQ" , _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_parsing_error") + self.Sjmp("JZ" , "_invalid_vtype") // JZ _invalid_vtype + self.Emit("CMPQ" , _AX, _V_max) // CMPQ AX, _V_max + self.Sjmp("JA" , "_invalid_vtype") // JA _invalid_vtype + + /* jump table selector */ + self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI + self.Sref("_switch_table", 4) // .... &_switch_table + self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX + self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX + self.Rjmp("JMP" , _AX) // JMP AX + + /** V_EOF **/ + self.Link("_decode_V_EOF") // _decode_V_EOF: + self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP + self.Sjmp("JMP" , "_error") // JMP _error + + /** V_NULL **/ + self.Link("_decode_V_NULL") // _decode_V_NULL: + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + self.Emit("XORL", _R9, _R9) // XORL R9, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_TRUE **/ + self.Link("_decode_V_TRUE") // _decode_V_TRUE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + // TODO: maybe modified by users? + self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_FALSE **/ + self.Link("_decode_V_FALSE") // _decode_V_FALSE: + self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8 + self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9 + self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_ARRAY **/ + self.Link("_decode_V_ARRAY") // _decode_V_ARRAY + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char + + /* create a new array */ + self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX + self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX + self.call_go(_F_makeslice) // CALL_GO runtime.makeslice + + /* pack into an interface */ + self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX + self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX + self.call_go(_F_convTslice) // CALL_GO runtime.convTslice + self.Emit("MOVQ", _AX, _R8) // MOVQ AX, R8 + + /* replace current state with an array */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX] + self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI) + self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI) + + /* add a new slot for the first element */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_OBJECT **/ + self.Link("_decode_V_OBJECT") // _decode_V_OBJECT: + self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX + self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char + self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_0, ST.Vt[CX] + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI) + self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI) + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_STRING **/ + self.Link("_decode_V_STRING") // _decode_V_STRING: + self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX + self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX + + /* check for escapes */ + self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1 + self.Sjmp("JNE" , "_unquote") // JNE _unquote + self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX + self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8 + self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI + self.Sref("_copy_string_end", 4) + self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df) + self.Sjmp("JC", "copy_string") + self.Link("_copy_string_end") + self.Emit("XORL", _DX, _DX) + + /* strings with no escape sequences */ + self.Link("_noescape") // _noescape: + self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI + self.Emit("BTQ" , _SI, _DI) // BTQ SI, DI + self.Sjmp("JC" , "_object_key") // JC _object_key + + /* check for pre-packed strings, avoid 1 allocation */ + self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX + self.Sjmp("JNZ" , "_packed_str") // JNZ _packed_str + self.Emit("MOVQ" , _AX, _BX) // MOVQ AX, BX + self.Emit("MOVQ" , _R8, _AX) // MOVQ R8, AX + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 + + /* packed string already in R9 */ + self.Link("_packed_str") // _packed_str: + self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8 + self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI + self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /* the string is an object key, get the map */ + self.Link("_object_key") + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + + /* add a new delimiter */ + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX] + + /* add a new slot int the map */ + self.Emit("MOVQ", _AX, _DI) // MOVQ AX, DI + self.Emit("MOVQ", _T_map, _AX) // MOVQ _T_map, AX + self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX + self.Emit("MOVQ", _R8, _CX) // MOVQ R9, CX + self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr + + /* add to the pointer stack */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /* allocate memory to store the string header and unquoted result */ + self.Link("_unquote") // _unquote: + self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX + self.Emit("MOVQ", _T_byte, _BX) // MOVQ _T_byte, BX + self.Emit("MOVB", jit.Imm(0), _CX) // MOVB $0, CX + self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + + /* prepare the unquoting parameters */ + self.Emit("MOVQ" , _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX + self.Emit("LEAQ" , jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI + self.Emit("NEGQ" , _CX) // NEGQ CX + self.Emit("LEAQ" , jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI + self.Emit("LEAQ" , jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX + self.Emit("LEAQ" , _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX + self.Emit("XORL" , _R8, _R8) // XORL R8, R8 + self.Emit("BTQ" , jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv + self.Emit("SETCC", _R8) // SETCC R8 + self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8 + + /* unquote the string, with R9 been preserved */ + self.Emit("MOVQ", _R9, _VAR_R9) // SAVE R9 + self.call_c(_F_unquote) // CALL unquote + self.Emit("MOVQ", _VAR_R9, _R9) // LOAD R9 + + /* check for errors */ + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_unquote_error") // JS _unquote_error + self.Emit("MOVL" , jit.Imm(1), _DX) // MOVL $1, DX + self.Emit("LEAQ" , jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8 + self.Emit("MOVQ" , _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9) + self.Emit("MOVQ" , _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9) + self.Sjmp("JMP" , "_noescape") // JMP _noescape + + /** V_DOUBLE **/ + self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE: + self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC" , "_use_number") // JC _use_number + self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 + self.Sjmp("JMP" , "_use_float64") // JMP _use_float64 + + /** V_INTEGER **/ + self.Link("_decode_V_INTEGER") // _decode_V_INTEGER: + self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df + self.Sjmp("JC" , "_use_number") // JC _use_number + self.Emit("BTQ" , jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df + self.Sjmp("JC" , "_use_int64") // JC _use_int64 + //TODO: use ss.Dv directly + self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0 + + /* represent numbers as `float64` */ + self.Link("_use_float64") // _use_float64: + self.Emit("MOVQ" , _X0, _AX) // MOVQ X0, AX + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ" , _T_float64, _R8) // MOVQ _T_float64, R8 + self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ" , _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /* represent numbers as `json.Number` */ + self.Link("_use_number") // _use_number + self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX + self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI + self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX + self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX + self.Emit("MOVQ", _SI, _AX) // MOVQ SI, AX + self.Emit("MOVQ", _CX, _BX) // MOVQ CX, BX + self.call_go(_F_convTstring) // CALL_GO runtime.convTstring + self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8 + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /* represent numbers as `int64` */ + self.Link("_use_int64") // _use_int64: + self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX + self.call_go(_F_convT64) // CALL_GO runtime.convT64 + self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8 + self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9 + self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI + self.Sjmp("JMP" , "_set_value") // JMP _set_value + + /** V_KEY_SEP **/ + self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim + self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX] + self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt - 8)) // MOVQ _S_obj, ST.Vt[CX - 1] + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_ELEM_SEP **/ + self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP: + self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ" , _AX, jit.Imm(_S_arr)) + self.Sjmp("JE" , "_array_sep") // JZ _next + self.Emit("CMPQ" , _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr + self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char + self.Emit("MOVQ" , jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt)) + self.Sjmp("JMP" , "_next") // JMP _next + + /* arrays */ + self.Link("_array_sep") + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX + self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI) + self.Sjmp("JAE" , "_array_more") // JAE _array_more + + /* add a slot for the new element */ + self.Link("_array_append") // _array_append: + self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI) + self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI + self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX + self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE} + self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow + self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX + self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI + self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp + self.WriteRecNotAX(7 , _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX] + self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX} + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_ARRAY_END **/ + self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END: + self.Emit("XORL", _DX, _DX) // XORL DX, DX + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0 + self.Sjmp("JE" , "_first_item") // JE _first_item + self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr + self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /* first element of an array */ + self.Link("_first_item") // _first_item: + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp - 8), _SI) // MOVQ ST.Vp[CX - 1], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp - 8)) // MOVQ DX, ST.Vp[CX - 1] + self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX] + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Sjmp("JMP" , "_next") // JMP _next + + /** V_OBJECT_END **/ + self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END: + self.Emit("MOVL", jit.Imm(_S_omask_end), _DI) // MOVL _S_omask, DI + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX + self.Emit("BTQ" , _AX, _DI) + self.Sjmp("JNC" , "_invalid_char") // JNE _invalid_char + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp + self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX] + self.Sjmp("JMP" , "_next") // JMP _next + + /* return from decoder */ + self.Link("_return") // _return: + self.Emit("XORL", _EP, _EP) // XORL EP, EP + self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0] + self.Link("_epilogue") // _epilogue: + self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST + self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP + self.Emit("RET") // RET + + /* array expand */ + self.Link("_array_more") // _array_more: + self.Emit("MOVQ" , _T_eface, _AX) // MOVQ _T_eface, AX + self.Emit("MOVQ" , jit.Ptr(_SI, 0), _BX) // MOVQ (SI), BX + self.Emit("MOVQ" , jit.Ptr(_SI, 8), _CX) // MOVQ 8(SI), CX + self.Emit("MOVQ" , jit.Ptr(_SI, 16), _DI) // MOVQ 16(SI), DI + self.Emit("MOVQ" , _DI, _SI) // MOVQ DI, 24(SP) + self.Emit("SHLQ" , jit.Imm(1), _SI) // SHLQ $1, SI + self.call_go(_F_growslice) // CALL_GO runtime.growslice + self.Emit("MOVQ" , _AX, _DI) // MOVQ AX, DI + self.Emit("MOVQ" , _BX, _DX) // MOVQ BX, DX + self.Emit("MOVQ" , _CX, _AX) // MOVQ CX, AX + + /* update the slice */ + self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX + self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI + self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI + self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI) + self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX) + self.WriteRecNotAX(8 , _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI) + self.Sjmp("JMP" , "_array_append") // JMP _array_append + + /* copy string */ + self.Link("copy_string") // pointer: R8, length: AX, return addr: DI + self.Emit("MOVQ", _R8, _VAR_cs_p) + self.Emit("MOVQ", _AX, _VAR_cs_n) + self.Emit("MOVQ", _DI, _VAR_cs_LR) + self.Emit("MOVQ", _AX, _BX) + self.Emit("MOVQ", _AX, _CX) + self.Emit("MOVQ", _T_byte, _AX) + self.call_go(_F_makeslice) + self.Emit("MOVQ", _AX, _VAR_cs_d) + self.Emit("MOVQ", _VAR_cs_p, _BX) + self.Emit("MOVQ", _VAR_cs_n, _CX) + self.call_go(_F_memmove) + self.Emit("MOVQ", _VAR_cs_d, _R8) + self.Emit("MOVQ", _VAR_cs_n, _AX) + self.Emit("MOVQ", _VAR_cs_LR, _DI) + self.Rjmp("JMP", _DI) + + /* error handlers */ + self.Link("_stack_overflow") + self.Emit("MOVL" , _E_recurse, _EP) // MOVQ _E_recurse, EP + self.Sjmp("JMP" , "_error") // JMP _error + self.Link("_vtype_error") // _vtype_error: + self.Emit("MOVQ" , _DI, _IC) // MOVQ DI, IC + self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP" , "_error") // JMP _error + self.Link("_invalid_char") // _invalid_char: + self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC + self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP + self.Sjmp("JMP" , "_error") // JMP _error + self.Link("_unquote_error") // _unquote_error: + self.Emit("MOVQ" , _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC + self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC + self.Link("_parsing_error") // _parsing_error: + self.Emit("NEGQ" , _AX) // NEGQ AX + self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP + self.Link("_error") // _error: + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP) + self.Sjmp("JMP" , "_epilogue") // JMP _epilogue + + /* invalid value type, never returns */ + self.Link("_invalid_vtype") + self.call_go(_F_invalid_vtype) // CALL invalid_type + self.Emit("UD2") // UD2 + + /* switch jump table */ + self.Link("_switch_table") // _switch_table: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4 + self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8 + self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12 + self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16 + self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20 + self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24 + self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28 + self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32 + self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36 + self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40 + self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44 + self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48 + + /* fast character lookup table */ + self.Link("_decode_tab") // _decode_tab: + self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0 + + /* generate rest of the tabs */ + for i := 1; i < 256; i++ { + if to, ok := _R_tab[i]; ok { + self.Sref(to, -int64(i) * 4) + } else { + self.Byte(0x00, 0x00, 0x00, 0x00) + } + } +} + +func (self *_ValueDecoder) WritePtrAX(i int, rec obj.Addr, saveDI bool) { + self.Emit("MOVQ", _V_writeBarrier, _R9) + self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", _AX, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} + +func (self *_ValueDecoder) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool) { + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _AX) + self.Emit("CMPL", jit.Ptr(_AX, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, _AX) + if saveDI { + self.save(_DI) + } + self.Emit("LEAQ", rec, _DI) + self.call(_F_gcWriteBarrierAX) + if saveDI { + self.load(_DI) + } + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} + +/** Generic Decoder **/ + +var ( + _subr_decode_value = new(_ValueDecoder).build() +) + +//go:nosplit +func invalid_vtype(vt types.ValueType) { + throw(fmt.Sprintf("invalid value type: %d", vt)) +} diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s new file mode 100644 index 0000000..6c2686d --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s @@ -0,0 +1,37 @@ +// +build go1.17,!go1.21 + +// +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "go_asm.h" +#include "funcdata.h" +#include "textflag.h" + +TEXT ·decodeValueStub(SB), NOSPLIT, $0 - 72 + NO_LOCAL_POINTERS + PXOR X0, X0 + MOVOU X0, rv+48(FP) + MOVQ st+0(FP) , R13 + MOVQ sp+8(FP) , R10 + MOVQ sn+16(FP), R12 + MOVQ ic+24(FP), R11 + MOVQ vp+32(FP), R15 + MOVQ df+40(FP), AX + MOVQ ·_subr_decode_value(SB), BX + CALL BX + MOVQ R11, rp+48(FP) + MOVQ BX, ex+56(FP) + RET diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s new file mode 100644 index 0000000..36cb1f5 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s @@ -0,0 +1,37 @@ +// +build go1.15,!go1.17 + +// +// Copyright 2021 ByteDance Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "go_asm.h" +#include "funcdata.h" +#include "textflag.h" + +TEXT ·decodeValueStub(SB), NOSPLIT, $0 - 72 + NO_LOCAL_POINTERS + PXOR X0, X0 + MOVOU X0, rv+48(FP) + MOVQ st+0(FP), BX + MOVQ sp+8(FP), R12 + MOVQ sn+16(FP), R13 + MOVQ ic+24(FP), R14 + MOVQ vp+32(FP), R15 + MOVQ df+40(FP), R10 + MOVQ ·_subr_decode_value(SB), AX + CALL AX + MOVQ R14, rp+48(FP) + MOVQ R11, ex+56(FP) + RET diff --git a/vendor/github.com/bytedance/sonic/decoder/pools.go b/vendor/github.com/bytedance/sonic/decoder/pools.go new file mode 100644 index 0000000..ab1e5f2 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/pools.go @@ -0,0 +1,143 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `sync` + `unsafe` + + `github.com/bytedance/sonic/internal/caching` + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` +) + +const ( + _MinSlice = 16 + _MaxStack = 4096 // 4k slots + _MaxStackBytes = _MaxStack * _PtrBytes + _MaxDigitNums = 800 // used in atof fallback algorithm +) + +const ( + _PtrBytes = _PTR_SIZE / 8 + _FsmOffset = (_MaxStack + 1) * _PtrBytes + _DbufOffset = _FsmOffset + int64(unsafe.Sizeof(types.StateMachine{})) + types.MAX_RECURSE * _PtrBytes + _StackSize = unsafe.Sizeof(_Stack{}) +) + +var ( + stackPool = sync.Pool{} + valueCache = []unsafe.Pointer(nil) + fieldCache = []*caching.FieldMap(nil) + fieldCacheMux = sync.Mutex{} + programCache = caching.CreateProgramCache() +) + +type _Stack struct { + sp uintptr + sb [_MaxStack]unsafe.Pointer + mm types.StateMachine + vp [types.MAX_RECURSE]unsafe.Pointer + dp [_MaxDigitNums]byte +} + +type _Decoder func( + s string, + i int, + vp unsafe.Pointer, + sb *_Stack, + fv uint64, + sv string, // DO NOT pass value to this arguement, since it is only used for local _VAR_sv + vk unsafe.Pointer, // DO NOT pass value to this arguement, since it is only used for local _VAR_vk +) (int, error) + +var _KeepAlive struct { + s string + i int + vp unsafe.Pointer + sb *_Stack + fv uint64 + sv string + vk unsafe.Pointer + + ret int + err error + + frame_decoder [_FP_offs]byte + frame_generic [_VD_offs]byte +} + +var ( + argPtrs = []bool{true, false, false, true, true, false, true, false, true} + localPtrs = []bool{} +) + +var ( + argPtrs_generic = []bool{true} + localPtrs_generic = []bool{} +) + +func newStack() *_Stack { + if ret := stackPool.Get(); ret == nil { + return new(_Stack) + } else { + return ret.(*_Stack) + } +} + +func resetStack(p *_Stack) { + memclrNoHeapPointers(unsafe.Pointer(p), _StackSize) +} + +func freeStack(p *_Stack) { + p.sp = 0 + stackPool.Put(p) +} + +func freezeValue(v unsafe.Pointer) uintptr { + valueCache = append(valueCache, v) + return uintptr(v) +} + +func freezeFields(v *caching.FieldMap) int64 { + fieldCacheMux.Lock() + fieldCache = append(fieldCache, v) + fieldCacheMux.Unlock() + return referenceFields(v) +} + +func referenceFields(v *caching.FieldMap) int64 { + return int64(uintptr(unsafe.Pointer(v))) +} + +func makeDecoder(vt *rt.GoType, _ ...interface{}) (interface{}, error) { + if pp, err := newCompiler().compile(vt.Pack()); err != nil { + return nil, err + } else { + return newAssembler(pp).Load(), nil + } +} + +func findOrCompile(vt *rt.GoType) (_Decoder, error) { + if val := programCache.Get(vt); val != nil { + return val.(_Decoder), nil + } else if ret, err := programCache.Compute(vt, makeDecoder); err == nil { + return ret.(_Decoder), nil + } else { + return nil, err + } +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/primitives.go b/vendor/github.com/bytedance/sonic/decoder/primitives.go new file mode 100644 index 0000000..d6053e2 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/primitives.go @@ -0,0 +1,46 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding` + `encoding/json` + `unsafe` + + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/rt` +) + +func decodeTypedPointer(s string, i int, vt *rt.GoType, vp unsafe.Pointer, sb *_Stack, fv uint64) (int, error) { + if fn, err := findOrCompile(vt); err != nil { + return 0, err + } else { + rt.MoreStack(_FP_size + _VD_size + native.MaxFrameSize) + rt.StopProf() + ret, err := fn(s, i, vp, sb, fv, "", nil) + rt.StartProf() + return ret, err + } +} + +func decodeJsonUnmarshaler(vv interface{}, s string) error { + return vv.(json.Unmarshaler).UnmarshalJSON(rt.Str2Mem(s)) +} + +func decodeTextUnmarshaler(vv interface{}, s string) error { + return vv.(encoding.TextUnmarshaler).UnmarshalText(rt.Str2Mem(s)) +} diff --git a/vendor/github.com/bytedance/sonic/decoder/stream.go b/vendor/github.com/bytedance/sonic/decoder/stream.go new file mode 100644 index 0000000..06dc818 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/stream.go @@ -0,0 +1,217 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `bytes` + `io` + `sync` + + `github.com/bytedance/sonic/internal/native/types` +) + +var ( + defaultBufferSize uint = 4096 + growSliceFactorShift uint = 1 + minLeftBufferShift uint = 2 +) + +type StreamDecoder struct { + r io.Reader + buf []byte + scanp int + scanned int64 + err error + Decoder +} + +var bufPool = sync.Pool{ + New: func () interface{} { + return make([]byte, 0, defaultBufferSize) + }, +} + +// NewStreamDecoder adapts to encoding/json.NewDecoder API. +// +// NewStreamDecoder returns a new decoder that reads from r. +func NewStreamDecoder(r io.Reader) *StreamDecoder { + return &StreamDecoder{r : r} +} + +// Decode decodes input stream into val with corresponding data. +// Redundantly bytes may be read and left in its buffer, and can be used at next call. +// Either io error from underlying io.Reader (except io.EOF) +// or syntax error from data will be recorded and stop subsequently decoding. +func (self *StreamDecoder) Decode(val interface{}) (err error) { + if self.err != nil { + return self.err + } + + var buf = self.buf[self.scanp:] + var p = 0 + var recycle bool + if cap(buf) == 0 { + buf = bufPool.Get().([]byte) + recycle = true + } + + var first = true + var repeat = true +read_more: + for { + l := len(buf) + realloc(&buf) + n, err := self.r.Read(buf[l:cap(buf)]) + buf = buf[:l+n] + if err != nil { + repeat = false + if err == io.EOF { + if len(buf) == 0 { + return err + } + break + } + self.err = err + return err + } + if n > 0 || first { + break + } + } + first = false + + l := len(buf) + if l > 0 { + self.Decoder.Reset(string(buf)) + err = self.Decoder.Decode(val) + if err != nil { + if repeat && self.repeatable(err) { + goto read_more + } + self.err = err + } + + p = self.Decoder.Pos() + self.scanned += int64(p) + self.scanp = 0 + } + + if l > p { + // remain undecoded bytes, so copy them into self.buf + self.buf = append(self.buf[:0], buf[p:]...) + } else { + self.buf = nil + recycle = true + } + + if recycle { + buf = buf[:0] + bufPool.Put(buf) + } + return err +} + +func (self StreamDecoder) repeatable(err error) bool { + if ee, ok := err.(SyntaxError); ok && + (ee.Code == types.ERR_EOF || (ee.Code == types.ERR_INVALID_CHAR && self.i >= len(self.s)-1)) { + return true + } + return false +} + +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token and the beginning of the next token. +func (self *StreamDecoder) InputOffset() int64 { + return self.scanned + int64(self.scanp) +} + +// Buffered returns a reader of the data remaining in the Decoder's buffer. +// The reader is valid until the next call to Decode. +func (self *StreamDecoder) Buffered() io.Reader { + return bytes.NewReader(self.buf[self.scanp:]) +} + +// More reports whether there is another element in the +// current array or object being parsed. +func (self *StreamDecoder) More() bool { + if self.err != nil { + return false + } + c, err := self.peek() + return err == nil && c != ']' && c != '}' +} + +func (self *StreamDecoder) peek() (byte, error) { + var err error + for { + for i := self.scanp; i < len(self.buf); i++ { + c := self.buf[i] + if isSpace(c) { + continue + } + self.scanp = i + return c, nil + } + // buffer has been scanned, now report any error + if err != nil { + if err != io.EOF { + self.err = err + } + return 0, err + } + err = self.refill() + } +} + +func isSpace(c byte) bool { + return types.SPACE_MASK & (1 << c) != 0 +} + +func (self *StreamDecoder) refill() error { + // Make room to read more into the buffer. + // First slide down data already consumed. + if self.scanp > 0 { + self.scanned += int64(self.scanp) + n := copy(self.buf, self.buf[self.scanp:]) + self.buf = self.buf[:n] + self.scanp = 0 + } + + // Grow buffer if not large enough. + realloc(&self.buf) + + // Read. Delay error for next iteration (after scan). + n, err := self.r.Read(self.buf[len(self.buf):cap(self.buf)]) + self.buf = self.buf[0 : len(self.buf)+n] + + return err +} + +func realloc(buf *[]byte) { + l := uint(len(*buf)) + c := uint(cap(*buf)) + if c - l <= c >> minLeftBufferShift { + e := l+(l>>minLeftBufferShift) + if e < defaultBufferSize { + e = defaultBufferSize + } + tmp := make([]byte, l, e) + copy(tmp, *buf) + *buf = tmp + } +} + diff --git a/vendor/github.com/bytedance/sonic/decoder/stubs_go115.go b/vendor/github.com/bytedance/sonic/decoder/stubs_go115.go new file mode 100644 index 0000000..1a0917c --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/stubs_go115.go @@ -0,0 +1,111 @@ +// +build go1.15,!go1.20 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `unsafe` + `reflect` + + _ `github.com/chenzhuoyu/base64x` + + `github.com/bytedance/sonic/internal/rt` +) + +//go:linkname _subr__b64decode github.com/chenzhuoyu/base64x._subr__b64decode +var _subr__b64decode uintptr + +// runtime.maxElementSize +const _max_map_element_size uintptr = 128 + +func mapfast(vt reflect.Type) bool { + return vt.Elem().Size() <= _max_map_element_size +} + +//go:nosplit +//go:linkname throw runtime.throw +//goland:noinspection GoUnusedParameter +func throw(s string) + +//go:linkname convT64 runtime.convT64 +//goland:noinspection GoUnusedParameter +func convT64(v uint64) unsafe.Pointer + +//go:linkname convTslice runtime.convTslice +//goland:noinspection GoUnusedParameter +func convTslice(v []byte) unsafe.Pointer + +//go:linkname convTstring runtime.convTstring +//goland:noinspection GoUnusedParameter +func convTstring(v string) unsafe.Pointer + +//go:noescape +//go:linkname memequal runtime.memequal +//goland:noinspection GoUnusedParameter +func memequal(a unsafe.Pointer, b unsafe.Pointer, size uintptr) bool + +//go:noescape +//go:linkname memmove runtime.memmove +//goland:noinspection GoUnusedParameter +func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) + +//go:linkname mallocgc runtime.mallocgc +//goland:noinspection GoUnusedParameter +func mallocgc(size uintptr, typ *rt.GoType, needzero bool) unsafe.Pointer + +//go:linkname makeslice runtime.makeslice +//goland:noinspection GoUnusedParameter +func makeslice(et *rt.GoType, len int, cap int) unsafe.Pointer + +//go:noescape +//go:linkname growslice runtime.growslice +//goland:noinspection GoUnusedParameter +func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice + +//go:linkname makemap_small runtime.makemap_small +func makemap_small() unsafe.Pointer + +//go:linkname mapassign runtime.mapassign +//goland:noinspection GoUnusedParameter +func mapassign(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer + +//go:linkname mapassign_fast32 runtime.mapassign_fast32 +//goland:noinspection GoUnusedParameter +func mapassign_fast32(t *rt.GoType, h unsafe.Pointer, k uint32) unsafe.Pointer + +//go:linkname mapassign_fast64 runtime.mapassign_fast64 +//goland:noinspection GoUnusedParameter +func mapassign_fast64(t *rt.GoType, h unsafe.Pointer, k uint64) unsafe.Pointer + +//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr +//goland:noinspection GoUnusedParameter +func mapassign_fast64ptr(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer + +//go:linkname mapassign_faststr runtime.mapassign_faststr +//goland:noinspection GoUnusedParameter +func mapassign_faststr(t *rt.GoType, h unsafe.Pointer, s string) unsafe.Pointer + +//go:nosplit +//go:linkname memclrHasPointers runtime.memclrHasPointers +//goland:noinspection GoUnusedParameter +func memclrHasPointers(ptr unsafe.Pointer, n uintptr) + +//go:noescape +//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers +//goland:noinspection GoUnusedParameter +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/stubs_go120.go b/vendor/github.com/bytedance/sonic/decoder/stubs_go120.go new file mode 100644 index 0000000..cde6a19 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/stubs_go120.go @@ -0,0 +1,111 @@ +// +build go1.20 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `unsafe` + `reflect` + + _ `github.com/chenzhuoyu/base64x` + + `github.com/bytedance/sonic/internal/rt` +) + +//go:linkname _subr__b64decode github.com/chenzhuoyu/base64x._subr__b64decode +var _subr__b64decode uintptr + +// runtime.maxElementSize +const _max_map_element_size uintptr = 128 + +func mapfast(vt reflect.Type) bool { + return vt.Elem().Size() <= _max_map_element_size +} + +//go:nosplit +//go:linkname throw runtime.throw +//goland:noinspection GoUnusedParameter +func throw(s string) + +//go:linkname convT64 runtime.convT64 +//goland:noinspection GoUnusedParameter +func convT64(v uint64) unsafe.Pointer + +//go:linkname convTslice runtime.convTslice +//goland:noinspection GoUnusedParameter +func convTslice(v []byte) unsafe.Pointer + +//go:linkname convTstring runtime.convTstring +//goland:noinspection GoUnusedParameter +func convTstring(v string) unsafe.Pointer + +//go:noescape +//go:linkname memequal runtime.memequal +//goland:noinspection GoUnusedParameter +func memequal(a unsafe.Pointer, b unsafe.Pointer, size uintptr) bool + +//go:noescape +//go:linkname memmove runtime.memmove +//goland:noinspection GoUnusedParameter +func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) + +//go:linkname mallocgc runtime.mallocgc +//goland:noinspection GoUnusedParameter +func mallocgc(size uintptr, typ *rt.GoType, needzero bool) unsafe.Pointer + +//go:linkname makeslice runtime.makeslice +//goland:noinspection GoUnusedParameter +func makeslice(et *rt.GoType, len int, cap int) unsafe.Pointer + +//go:noescape +//go:linkname growslice reflect.growslice +//goland:noinspection GoUnusedParameter +func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice + +//go:linkname makemap_small runtime.makemap_small +func makemap_small() unsafe.Pointer + +//go:linkname mapassign runtime.mapassign +//goland:noinspection GoUnusedParameter +func mapassign(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer + +//go:linkname mapassign_fast32 runtime.mapassign_fast32 +//goland:noinspection GoUnusedParameter +func mapassign_fast32(t *rt.GoType, h unsafe.Pointer, k uint32) unsafe.Pointer + +//go:linkname mapassign_fast64 runtime.mapassign_fast64 +//goland:noinspection GoUnusedParameter +func mapassign_fast64(t *rt.GoType, h unsafe.Pointer, k uint64) unsafe.Pointer + +//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr +//goland:noinspection GoUnusedParameter +func mapassign_fast64ptr(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer + +//go:linkname mapassign_faststr runtime.mapassign_faststr +//goland:noinspection GoUnusedParameter +func mapassign_faststr(t *rt.GoType, h unsafe.Pointer, s string) unsafe.Pointer + +//go:nosplit +//go:linkname memclrHasPointers runtime.memclrHasPointers +//goland:noinspection GoUnusedParameter +func memclrHasPointers(ptr unsafe.Pointer, n uintptr) + +//go:noescape +//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers +//goland:noinspection GoUnusedParameter +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/decoder/types.go b/vendor/github.com/bytedance/sonic/decoder/types.go new file mode 100644 index 0000000..6fc0e70 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/types.go @@ -0,0 +1,58 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `encoding` + `encoding/base64` + `encoding/json` + `reflect` + `unsafe` + + `github.com/bytedance/sonic/internal/rt` +) + +var ( + byteType = reflect.TypeOf(byte(0)) + intType = reflect.TypeOf(int(0)) + int8Type = reflect.TypeOf(int8(0)) + int16Type = reflect.TypeOf(int16(0)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uintType = reflect.TypeOf(uint(0)) + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + bytesType = reflect.TypeOf([]byte(nil)) + jsonNumberType = reflect.TypeOf(json.Number("")) + base64CorruptInputError = reflect.TypeOf(base64.CorruptInputError(0)) +) + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + jsonUnmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + encodingTextUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +func rtype(t reflect.Type) (*rt.GoItab, *rt.GoType) { + p := (*rt.GoIface)(unsafe.Pointer(&t)) + return p.Itab, (*rt.GoType)(p.Value) +} diff --git a/vendor/github.com/bytedance/sonic/decoder/utils.go b/vendor/github.com/bytedance/sonic/decoder/utils.go new file mode 100644 index 0000000..23ee5d5 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/decoder/utils.go @@ -0,0 +1,39 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package decoder + +import ( + `unsafe` + + `github.com/bytedance/sonic/loader` +) + +//go:nosplit +func pbool(v bool) uintptr { + return freezeValue(unsafe.Pointer(&v)) +} + +//go:nosplit +func ptodec(p loader.Function) _Decoder { + return *(*_Decoder)(unsafe.Pointer(&p)) +} + +func assert_eq(v int64, exp int64, msg string) { + if v != exp { + panic(msg) + } +} diff --git a/vendor/github.com/bytedance/sonic/encoder/asm.s b/vendor/github.com/bytedance/sonic/encoder/asm.s new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go new file mode 100644 index 0000000..9b59784 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go @@ -0,0 +1,1198 @@ +// +build go1.15,!go1.17 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encoder + +import ( + `fmt` + `reflect` + `strconv` + `unsafe` + + `github.com/bytedance/sonic/internal/cpu` + `github.com/bytedance/sonic/internal/jit` + `github.com/bytedance/sonic/internal/native/types` + `github.com/twitchyliquid64/golang-asm/obj` + `github.com/twitchyliquid64/golang-asm/obj/x86` + + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/rt` +) + +/** Register Allocations + * + * State Registers: + * + * %rbx : stack base + * %rdi : result pointer + * %rsi : result length + * %rdx : result capacity + * %r12 : sp->p + * %r13 : sp->q + * %r14 : sp->x + * %r15 : sp->f + * + * Error Registers: + * + * %r10 : error type register + * %r11 : error pointer register + */ + +/** Function Prototype & Stack Map + * + * func (buf *[]byte, p unsafe.Pointer, sb *_Stack, fv uint64) (err error) + * + * buf : (FP) + * p : 8(FP) + * sb : 16(FP) + * fv : 24(FP) + * err.vt : 32(FP) + * err.vp : 40(FP) + */ + +const ( + _S_cond = iota + _S_init +) + +const ( + _FP_args = 48 // 48 bytes for passing arguments to this function + _FP_fargs = 64 // 64 bytes for passing arguments to other Go functions + _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions + _FP_locals = 24 // 24 bytes for local variables +) + +const ( + _FP_offs = _FP_fargs + _FP_saves + _FP_locals + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address +) + +const ( + _FM_exp32 = 0x7f800000 + _FM_exp64 = 0x7ff0000000000000 +) + +const ( + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e') + _IM_open = 0x00225c22 // '"\"∅' + _IM_array = 0x5d5b // '[]' + _IM_object = 0x7d7b // '{}' + _IM_mulv = -0x5555555555555555 +) + +const ( + _LB_more_space = "_more_space" + _LB_more_space_return = "_more_space_return_" +) + +const ( + _LB_error = "_error" + _LB_error_too_deep = "_error_too_deep" + _LB_error_invalid_number = "_error_invalid_number" + _LB_error_nan_or_infinite = "_error_nan_or_infinite" + _LB_panic = "_panic" +) + +var ( + _AX = jit.Reg("AX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") +) + +var ( + _X0 = jit.Reg("X0") + _Y0 = jit.Reg("Y0") +) + +var ( + _ST = jit.Reg("BX") + _RP = jit.Reg("DI") + _RL = jit.Reg("SI") + _RC = jit.Reg("DX") +) + +var ( + _LR = jit.Reg("R9") + _R10 = jit.Reg("R10") // used for gcWriterBarrier + _ET = jit.Reg("R10") + _EP = jit.Reg("R11") +) + +var ( + _SP_p = jit.Reg("R12") + _SP_q = jit.Reg("R13") + _SP_x = jit.Reg("R14") + _SP_f = jit.Reg("R15") +) + +var ( + _ARG_rb = jit.Ptr(_SP, _FP_base) + _ARG_vp = jit.Ptr(_SP, _FP_base + 8) + _ARG_sb = jit.Ptr(_SP, _FP_base + 16) + _ARG_fv = jit.Ptr(_SP, _FP_base + 24) +) + +var ( + _RET_et = jit.Ptr(_SP, _FP_base + 32) + _RET_ep = jit.Ptr(_SP, _FP_base + 40) +) + +var ( + _VAR_sp = jit.Ptr(_SP, _FP_fargs + _FP_saves) + _VAR_dn = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) + _VAR_vp = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) +) + +var ( + _REG_ffi = []obj.Addr{_RP, _RL, _RC} + _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL} + _REG_jsr = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR} + _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC} +) + +type _Assembler struct { + jit.BaseAssembler + p _Program + x int + name string +} + +func newAssembler(p _Program) *_Assembler { + return new(_Assembler).Init(p) +} + +/** Assembler Interface **/ +func (self *_Assembler) Load() _Encoder { + return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) +} + +func (self *_Assembler) Init(p _Program) *_Assembler { + self.p = p + self.BaseAssembler.Init(self.compile) + return self +} + +func (self *_Assembler) compile() { + self.prologue() + self.instrs() + self.epilogue() + self.builtins() +} + +/** Assembler Stages **/ + +var _OpFuncTab = [256]func(*_Assembler, *_Instr) { + _OP_null : (*_Assembler)._asm_OP_null, + _OP_empty_arr : (*_Assembler)._asm_OP_empty_arr, + _OP_empty_obj : (*_Assembler)._asm_OP_empty_obj, + _OP_bool : (*_Assembler)._asm_OP_bool, + _OP_i8 : (*_Assembler)._asm_OP_i8, + _OP_i16 : (*_Assembler)._asm_OP_i16, + _OP_i32 : (*_Assembler)._asm_OP_i32, + _OP_i64 : (*_Assembler)._asm_OP_i64, + _OP_u8 : (*_Assembler)._asm_OP_u8, + _OP_u16 : (*_Assembler)._asm_OP_u16, + _OP_u32 : (*_Assembler)._asm_OP_u32, + _OP_u64 : (*_Assembler)._asm_OP_u64, + _OP_f32 : (*_Assembler)._asm_OP_f32, + _OP_f64 : (*_Assembler)._asm_OP_f64, + _OP_str : (*_Assembler)._asm_OP_str, + _OP_bin : (*_Assembler)._asm_OP_bin, + _OP_quote : (*_Assembler)._asm_OP_quote, + _OP_number : (*_Assembler)._asm_OP_number, + _OP_eface : (*_Assembler)._asm_OP_eface, + _OP_iface : (*_Assembler)._asm_OP_iface, + _OP_byte : (*_Assembler)._asm_OP_byte, + _OP_text : (*_Assembler)._asm_OP_text, + _OP_deref : (*_Assembler)._asm_OP_deref, + _OP_index : (*_Assembler)._asm_OP_index, + _OP_load : (*_Assembler)._asm_OP_load, + _OP_save : (*_Assembler)._asm_OP_save, + _OP_drop : (*_Assembler)._asm_OP_drop, + _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, + _OP_recurse : (*_Assembler)._asm_OP_recurse, + _OP_is_nil : (*_Assembler)._asm_OP_is_nil, + _OP_is_nil_p1 : (*_Assembler)._asm_OP_is_nil_p1, + _OP_is_zero_1 : (*_Assembler)._asm_OP_is_zero_1, + _OP_is_zero_2 : (*_Assembler)._asm_OP_is_zero_2, + _OP_is_zero_4 : (*_Assembler)._asm_OP_is_zero_4, + _OP_is_zero_8 : (*_Assembler)._asm_OP_is_zero_8, + _OP_is_zero_map : (*_Assembler)._asm_OP_is_zero_map, + _OP_goto : (*_Assembler)._asm_OP_goto, + _OP_map_iter : (*_Assembler)._asm_OP_map_iter, + _OP_map_stop : (*_Assembler)._asm_OP_map_stop, + _OP_map_check_key : (*_Assembler)._asm_OP_map_check_key, + _OP_map_write_key : (*_Assembler)._asm_OP_map_write_key, + _OP_map_value_next : (*_Assembler)._asm_OP_map_value_next, + _OP_slice_len : (*_Assembler)._asm_OP_slice_len, + _OP_slice_next : (*_Assembler)._asm_OP_slice_next, + _OP_marshal : (*_Assembler)._asm_OP_marshal, + _OP_marshal_p : (*_Assembler)._asm_OP_marshal_p, + _OP_marshal_text : (*_Assembler)._asm_OP_marshal_text, + _OP_marshal_text_p : (*_Assembler)._asm_OP_marshal_text_p, + _OP_cond_set : (*_Assembler)._asm_OP_cond_set, + _OP_cond_testc : (*_Assembler)._asm_OP_cond_testc, +} + +func (self *_Assembler) instr(v *_Instr) { + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } +} + +func (self *_Assembler) instrs() { + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } +} + +func (self *_Assembler) builtins() { + self.more_space() + self.error_too_deep() + self.error_invalid_number() + self.error_nan_or_infinite() + self.go_panic() +} + +func (self *_Assembler) epilogue() { + self.Mark(len(self.p)) + self.Emit("XORL", _ET, _ET) + self.Emit("XORL", _EP, _EP) + self.Link(_LB_error) + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) + self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+24(FP) + self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+32(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET +} + +func (self *_Assembler) prologue() { + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.load_buffer() // LOAD {buf} + self.Emit("MOVQ", _ARG_vp, _SP_p) // MOVQ vp<>+8(FP), SP.p + self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ sb<>+16(FP), ST + self.Emit("XORL", _SP_x, _SP_x) // XORL SP.x, SP.x + self.Emit("XORL", _SP_f, _SP_f) // XORL SP.f, SP.f + self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q +} + +/** Assembler Inline Functions **/ + +func (self *_Assembler) xsave(reg ...obj.Addr) { + for i, v := range reg { + if i > _FP_saves / 8 - 1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) + } + } +} + +func (self *_Assembler) xload(reg ...obj.Addr) { + for i, v := range reg { + if i > _FP_saves / 8 - 1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) + } + } +} + +func (self *_Assembler) rbuf_di() { + if _RP.Reg != x86.REG_DI { + panic("register allocation messed up: RP != DI") + } else { + self.Emit("ADDQ", _RL, _RP) + } +} + +func (self *_Assembler) store_int(nd int, fn obj.Addr, ins string) { + self.check_size(nd) + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI + self.call_c(fn) // CALL_C $fn + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL +} + +func (self *_Assembler) store_str(s string) { + i := 0 + m := rt.Str2Mem(s) + + /* 8-byte stores */ + for i <= len(m) - 8 { + self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX + self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL) + i += 8 + } + + /* 4-byte stores */ + if i <= len(m) - 4 { + self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL) + i += 4 + } + + /* 2-byte stores */ + if i <= len(m) - 2 { + self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL) + i += 2 + } + + /* last byte */ + if i < len(m) { + self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL) + } +} + +func (self *_Assembler) check_size(n int) { + self.check_size_rl(jit.Ptr(_RL, int64(n))) +} + +func (self *_Assembler) check_size_r(r obj.Addr, d int) { + self.check_size_rl(jit.Sib(_RL, r, 1, int64(d))) +} + +func (self *_Assembler) check_size_rl(v obj.Addr) { + idx := self.x + key := _LB_more_space_return + strconv.Itoa(idx) + + /* the following code relies on LR == R9 to work */ + if _LR.Reg != x86.REG_R9 { + panic("register allocation messed up: LR != R9") + } + + /* check for buffer capacity */ + self.x++ + self.Emit("LEAQ", v, _AX) // LEAQ $v, AX + self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC + self.Sjmp("JBE" , key) // JBE _more_space_return_{n} + self.slice_grow_ax(key) // GROW $key + self.Link(key) // _more_space_return_{n}: +} + +func (self *_Assembler) slice_grow_ax(ret string) { + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9 + self.Sref(ret, 4) // .... &ret + self.Sjmp("JMP" , _LB_more_space) // JMP _more_space +} + +/** State Stack Helpers **/ + +const ( + _StateSize = int64(unsafe.Sizeof(_State{})) + _StackLimit = _MaxStack * _StateSize +) + +func (self *_Assembler) save_state() { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R8) // LEAQ _StateSize(CX), R8 + self.Emit("CMPQ", _R8, jit.Imm(_StackLimit)) // CMPQ R8, $_StackLimit + self.Sjmp("JAE" , _LB_error_too_deep) // JA _error_too_deep + self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX) + self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX) + self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX) + self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX) + self.Emit("MOVQ", _R8, jit.Ptr(_ST, 0)) // MOVQ R8, (ST) +} + +func (self *_Assembler) drop_state(decr int64) { + self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ" , jit.Imm(decr), _AX) // SUBQ $decr, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX) +} + +/** Buffer Helpers **/ + +func (self *_Assembler) add_char(ch byte) { + self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL +} + +func (self *_Assembler) add_long(ch uint32, n int64) { + self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL +} + +func (self *_Assembler) add_text(ss string) { + self.store_str(ss) // TEXT $ss + self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL +} + +func (self *_Assembler) prep_buffer() { + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) +} + +func (self *_Assembler) prep_buffer_c() { + self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI + self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ RL, 8(DI) +} + +func (self *_Assembler) save_buffer() { + self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX + self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX) + self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) + self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX) +} + +func (self *_Assembler) load_buffer() { + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP + self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL + self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC +} + +/** Function Interface Helpers **/ + +func (self *_Assembler) call(pc obj.Addr) { + self.Emit("MOVQ", pc, _AX) // MOVQ $pc, AX + self.Rjmp("CALL", _AX) // CALL AX +} + +func (self *_Assembler) save_c() { + self.xsave(_REG_ffi...) // SAVE $REG_ffi +} + +func (self *_Assembler) call_c(pc obj.Addr) { + self.call(pc) // CALL $pc + self.xload(_REG_ffi...) // LOAD $REG_ffi +} + +func (self *_Assembler) call_go(pc obj.Addr) { + self.xsave(_REG_all...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_all...) // LOAD $REG_all +} + +func (self *_Assembler) call_encoder(pc obj.Addr) { + self.xsave(_REG_enc...) // SAVE $REG_enc + self.call(pc) // CALL $pc + self.xload(_REG_enc...) // LOAD $REG_enc + self.load_buffer() // LOAD {buf} +} + +func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Type) { + switch vt.Kind() { + case reflect.Interface : self.call_marshaler_i(fn, it) + case reflect.Ptr, reflect.Map: self.call_marshaler_v(fn, it, vt, true) + default : self.call_marshaler_v(fn, it, vt, false) + } +} + +func (self *_Assembler) call_marshaler_i(fn obj.Addr, it *rt.GoType) { + self.Emit("MOVQ" , jit.Gtype(it), _AX) // MOVQ $it, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n} + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP) + self.call_go(_F_assertI2I) // CALL_GO assertI2I + self.prep_buffer() // MOVE {buf}, (SP) + self.Emit("MOVOU", jit.Ptr(_SP, 24), _X0) // MOVOU 24(SP), X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) + self.call_encoder(fn) // CALL $fn + self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} + self.Link("_null_{n}") // _null_{n}: + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Link("_done_{n}") // _done_{n}: +} + +func (self *_Assembler) call_marshaler_v(fn obj.Addr, it *rt.GoType, vt reflect.Type, deref bool) { + self.prep_buffer() // MOVE {buf}, (SP) + self.Emit("MOVQ", jit.Itab(it, vt), _AX) // MOVQ $(itab(it, vt)), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + + /* dereference the pointer if needed */ + if !deref { + self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 16)) // MOVQ SP.p, 16(SP) + } else { + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + } + + /* call the encoder, and perform error checks */ + self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX + self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP) + self.call_encoder(fn) // CALL $fn + self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +/** Builtin: _more_space **/ + +var ( + _T_byte = jit.Type(byteType) + _F_growslice = jit.Func(growslice) +) + +func (self *_Assembler) more_space() { + self.Link(_LB_more_space) + self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0)) // MOVQ $_T_byte, (SP) + self.Emit("MOVQ", _RP, jit.Ptr(_SP, 8)) // MOVQ RP, 8(SP) + self.Emit("MOVQ", _RL, jit.Ptr(_SP, 16)) // MOVQ RL, 16(SP) + self.Emit("MOVQ", _RC, jit.Ptr(_SP, 24)) // MOVQ RC, 24(SP) + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.xsave(_REG_jsr...) // SAVE $REG_jsr + self.call(_F_growslice) // CALL $pc + self.xload(_REG_jsr...) // LOAD $REG_jsr + self.Emit("MOVQ", jit.Ptr(_SP, 40), _RP) // MOVQ 40(SP), RP + self.Emit("MOVQ", jit.Ptr(_SP, 48), _RL) // MOVQ 48(SP), RL + self.Emit("MOVQ", jit.Ptr(_SP, 56), _RC) // MOVQ 56(SP), RC + self.save_buffer() // SAVE {buf} + self.Rjmp("JMP" , _LR) // JMP LR +} + +/** Builtin Errors **/ + +var ( + _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep)))) + _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite)))) + _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType) +) + +func (self *_Assembler) error_too_deep() { + self.Link(_LB_error_too_deep) + self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) error_invalid_number() { + self.Link(_LB_error_invalid_number) + self.call_go(_F_error_number) // CALL_GO error_number + self.Emit("MOVQ", jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET + self.Emit("MOVQ", jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) error_nan_or_infinite() { + self.Link(_LB_error_nan_or_infinite) + self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +/** String Encoding Routine **/ + +var ( + _F_quote = jit.Imm(int64(native.S_quote)) + _F_panic = jit.Func(goPanic) +) + +func (self *_Assembler) go_panic() { + self.Link(_LB_panic) + self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 8)) + self.call_go(_F_panic) +} + +func (self *_Assembler) encode_string(doubleQuote bool) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ" , "_str_empty_{n}") // JZ _str_empty_{n} + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) + self.Sjmp("JNE" , "_str_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0)) + self.Sjmp("JMP", _LB_panic) + self.Link("_str_next_{n}") + + /* openning quote, check for double quote */ + if !doubleQuote { + self.check_size_r(_AX, 2) // SIZE $2 + self.add_char('"') // CHAR $'"' + } else { + self.check_size_r(_AX, 6) // SIZE $6 + self.add_long(_IM_open, 3) // TEXT $`"\"` + } + + /* quoting loop */ + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp + self.Link("_str_loop_{n}") // _str_loop_{n}: + self.save_c() // SAVE $REG_ffi + + /* load the output buffer first, and then input buffer, + * because the parameter registers collide with RP / RL / RC */ + self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX + self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX + self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn + self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX + self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX + self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI + self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI + self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI + + /* set the flags based on `doubleQuote` */ + if !doubleQuote { + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + } else { + self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + } + + /* call the native quoter */ + self.call_c(_F_quote) // CALL quote + self.Emit("ADDQ" , _VAR_dn, _RL) // ADDQ dn, RL + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_str_space_{n}") // JS _str_space_{n} + + /* close the string, check for double quote */ + if !doubleQuote { + self.check_size(1) // SIZE $1 + self.add_char('"') // CHAR $'"' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } else { + self.check_size(3) // SIZE $3 + self.add_text("\\\"\"") // TEXT $'\""' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } + + /* not enough space to contain the quoted string */ + self.Link("_str_space_{n}") // _str_space_{n}: + self.Emit("NOTQ", _AX) // NOTQ AX + self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp + self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX + self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n} + + /* empty string, check for double quote */ + if !doubleQuote { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(2) // SIZE $2 + self.add_text("\"\"") // TEXT $'""' + self.Link("_str_end_{n}") // _str_end_{n}: + } else { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(6) // SIZE $6 + self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""' + self.Link("_str_end_{n}") // _str_end_{n}: + } +} + +/** OpCode Assembler Functions **/ + +var ( + _T_json_Marshaler = rt.UnpackType(jsonMarshalerType) + _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType) +) + +var ( + _F_f64toa = jit.Imm(int64(native.S_f64toa)) + _F_f32toa = jit.Imm(int64(native.S_f32toa)) + _F_i64toa = jit.Imm(int64(native.S_i64toa)) + _F_u64toa = jit.Imm(int64(native.S_u64toa)) + _F_b64encode = jit.Imm(int64(_subr__b64encode)) +) + +var ( + _F_memmove = jit.Func(memmove) + _F_error_number = jit.Func(error_number) + _F_isValidNumber = jit.Func(isValidNumber) +) + +var ( + _F_iteratorStop = jit.Func(iteratorStop) + _F_iteratorNext = jit.Func(iteratorNext) + _F_iteratorStart = jit.Func(iteratorStart) +) + +var ( + _F_encodeTypedPointer obj.Addr + _F_encodeJsonMarshaler obj.Addr + _F_encodeTextMarshaler obj.Addr +) + +const ( + _MODE_AVX2 = 1 << 2 +) + +func init() { + _F_encodeTypedPointer = jit.Func(encodeTypedPointer) + _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler) + _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler) +} + +func (self *_Assembler) _asm_OP_null(_ *_Instr) { + self.check_size(4) + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL +} + +func (self *_Assembler) _asm_OP_empty_arr(_ *_Instr) { + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_arr_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_arr_end_{n}") + self.Link("_empty_arr_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_arr_end_{n}") +} + +func (self *_Assembler) _asm_OP_empty_obj(_ *_Instr) { + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_obj_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_obj_end_{n}") + self.Link("_empty_obj_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_obj_end_{n}") +} + +func (self *_Assembler) _asm_OP_bool(_ *_Instr) { + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Sjmp("JE" , "_false_{n}") // JE _false_{n} + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.check_size(5) // SIZE $5 + self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1) + self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1) + self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL + self.Link("_end_{n}") // _end_{n}: +} + +func (self *_Assembler) _asm_OP_i8(_ *_Instr) { + self.store_int(4, _F_i64toa, "MOVBQSX") +} + +func (self *_Assembler) _asm_OP_i16(_ *_Instr) { + self.store_int(6, _F_i64toa, "MOVWQSX") +} + +func (self *_Assembler) _asm_OP_i32(_ *_Instr) { + self.store_int(17, _F_i64toa, "MOVLQSX") +} + +func (self *_Assembler) _asm_OP_i64(_ *_Instr) { + self.store_int(21, _F_i64toa, "MOVQ") +} + +func (self *_Assembler) _asm_OP_u8(_ *_Instr) { + self.store_int(3, _F_u64toa, "MOVBQZX") +} + +func (self *_Assembler) _asm_OP_u16(_ *_Instr) { + self.store_int(5, _F_u64toa, "MOVWQZX") +} + +func (self *_Assembler) _asm_OP_u32(_ *_Instr) { + self.store_int(16, _F_u64toa, "MOVLQZX") +} + +func (self *_Assembler) _asm_OP_u64(_ *_Instr) { + self.store_int(20, _F_u64toa, "MOVQ") +} + +func (self *_Assembler) _asm_OP_f32(_ *_Instr) { + self.check_size(32) + self.Emit("MOVL" , jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX + self.Emit("ANDL" , jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX + self.Emit("XORL" , jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX + self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSS" , jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0 + self.call_c(_F_f32toa) // CALL_C f64toa + self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL +} + +func (self *_Assembler) _asm_OP_f64(_ *_Instr) { + self.check_size(32) + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ" , jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX + self.Emit("ANDQ" , _CX, _AX) // ANDQ CX, AX + self.Emit("XORQ" , _CX, _AX) // XORQ CX, AX + self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSD" , jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0 + self.call_c(_F_f64toa) // CALL_C f64toa + self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL +} + +func (self *_Assembler) _asm_OP_str(_ *_Instr) { + self.encode_string(false) +} + +func (self *_Assembler) _asm_OP_bin(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX + self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX + self.Emit("MOVQ", _DX, _R8) // MOVQ DX, R8 + self.From("MULQ", _CX) // MULQ CX + self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX + self.Emit("ORQ" , jit.Imm(2), _AX) // ORQ $2, AX + self.Emit("MOVQ", _R8, _DX) // MOVQ R8, DX + self.check_size_r(_AX, 0) // SIZE AX + self.add_char('"') // CHAR $'"' + self.save_c() // SAVE $REG_ffi + self.prep_buffer_c() // MOVE {buf}, DI + self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI + + /* check for AVX2 support */ + if !cpu.HasAVX2 { + self.Emit("XORL", _DX, _DX) // XORL DX, DX + } else { + self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX + } + + /* call the encoder */ + self.call_c(_F_b64encode) // CALL b64encode + self.load_buffer() // LOAD {buf} + self.add_char('"') // CHAR $'"' +} + +func (self *_Assembler) _asm_OP_quote(_ *_Instr) { + self.encode_string(true) +} + +func (self *_Assembler) _asm_OP_number(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ (SP.p), CX + self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX + self.Sjmp("JZ" , "_empty_{n}") // JZ _empty_{n} + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_number_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0)) + self.Sjmp("JMP", _LB_panic) + self.Link("_number_next_{n}") + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.call_go(_F_isValidNumber) // CALL_GO isValidNumber + self.Emit("CMPB" , jit.Ptr(_SP, 16), jit.Imm(0)) // CMPB 16(SP), $0 + self.Sjmp("JE" , _LB_error_invalid_number) // JE _error_invalid_number + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.check_size_r(_AX, 0) // SIZE AX + self.Emit("LEAQ" , jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX + self.Emit("ADDQ" , jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVOU", jit.Ptr(_SP_p, 0), _X0) // MOVOU (SP.p), X0 + self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP) + self.call_go(_F_memmove) // CALL_GO memmove + self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} + self.Link("_empty_{n}") // _empty_{n}: + self.check_size(1) // SIZE $1 + self.add_char('0') // CHAR $'0' + self.Link("_done_{n}") // _done_{n}: +} + +func (self *_Assembler) _asm_OP_eface(_ *_Instr) { + self.prep_buffer() // MOVE {buf}, (SP)s + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) + self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +func (self *_Assembler) _asm_OP_iface(_ *_Instr) { + self.prep_buffer() // MOVE {buf}, (SP) + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ" , jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) + self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +func (self *_Assembler) _asm_OP_byte(p *_Instr) { + self.check_size(1) + self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL +} + +func (self *_Assembler) _asm_OP_text(p *_Instr) { + self.check_size(len(p.vs())) // SIZE ${len(p.vs())} + self.add_text(p.vs()) // TEXT ${p.vs()} +} + +func (self *_Assembler) _asm_OP_deref(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p +} + +func (self *_Assembler) _asm_OP_index(p *_Instr) { + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX + self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p +} + +func (self *_Assembler) _asm_OP_load(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q +} + +func (self *_Assembler) _asm_OP_save(_ *_Instr) { + self.save_state() +} + +func (self *_Assembler) _asm_OP_drop(_ *_Instr) { + self.drop_state(_StateSize) +} + +func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { + self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX) +} + +func (self *_Assembler) _asm_OP_recurse(p *_Instr) { + self.prep_buffer() // MOVE {buf}, (SP) + vt, pv := p.vp() + self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ $(type(p.vt())), AX + self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP) + + /* check for indirection */ + if !rt.UnpackType(vt).Indirect() { + self.Emit("MOVQ", _SP_p, _AX) // MOVQ SP.p, AX + } else { + self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, 48(SP) + self.Emit("LEAQ", _VAR_vp, _AX) // LEAQ 48(SP), AX + } + + /* call the encoder */ + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP) + self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX + if pv { + self.Emit("BTCQ", jit.Imm(bitPointerValue), _AX) // BTCQ $1, AX + } + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP) + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +func (self *_Assembler) _asm_OP_is_nil(p *_Instr) { + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_nil_p1(p *_Instr) { + self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_1(p *_Instr) { + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_2(p *_Instr) { + self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_4(p *_Instr) { + self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_8(p *_Instr) { + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_map(p *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Xjmp("JZ" , p.vi()) // JZ p.vi() + self.Emit("CMPQ" , jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_goto(p *_Instr) { + self.Xjmp("JMP", p.vi()) +} + +func (self *_Assembler) _asm_OP_map_iter(p *_Instr) { + self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP) + self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP) + self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP) + self.call_go(_F_iteratorStart) // CALL_GO iteratorStart + self.Emit("MOVQ" , jit.Ptr(_SP, 24), _SP_q) // MOVQ 24(SP), SP.q + self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +func (self *_Assembler) _asm_OP_map_stop(_ *_Instr) { + self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, 0(SP) + self.call_go(_F_iteratorStop) // CALL_GO iteratorStop + self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q +} + +func (self *_Assembler) _asm_OP_map_check_key(p *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p + self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p + self.Xjmp("JZ" , p.vi()) // JNZ p.vi() +} + +func (self *_Assembler) _asm_OP_map_write_key(p *_Instr) { + self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv + self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n} + self.encode_string(false) // STR $false + self.Xjmp("JMP", p.vi()) // JMP ${p.vi()} + self.Link("_unordered_key_{n}") // _unordered_key_{n}: +} + +func (self *_Assembler) _asm_OP_map_value_next(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p + self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, (SP) + self.call_go(_F_iteratorNext) // CALL_GO iteratorNext +} + +func (self *_Assembler) _asm_OP_slice_len(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p + self.Emit("ORQ" , jit.Imm(1 << _S_init), _SP_f) // ORQ $(1<<_S_init), SP.f +} + +func (self *_Assembler) _asm_OP_slice_next(p *_Instr) { + self.Emit("TESTQ" , _SP_x, _SP_x) // TESTQ SP.x, SP.x + self.Xjmp("JZ" , p.vi()) // JZ p.vi() + self.Emit("SUBQ" , jit.Imm(1), _SP_x) // SUBQ $1, SP.x + self.Emit("BTRQ" , jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f + self.Emit("LEAQ" , jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX + self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p +} + +func (self *_Assembler) _asm_OP_marshal(p *_Instr) { + self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt()) +} + +func (self *_Assembler) _asm_OP_marshal_p(p *_Instr) { + if p.vk() != reflect.Ptr { + panic("marshal_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false) + } +} + +func (self *_Assembler) _asm_OP_marshal_text(p *_Instr) { + self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt()) +} + +func (self *_Assembler) _asm_OP_marshal_text_p(p *_Instr) { + if p.vk() != reflect.Ptr { + panic("marshal_text_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false) + } +} + +func (self *_Assembler) _asm_OP_cond_set(_ *_Instr) { + self.Emit("ORQ", jit.Imm(1 << _S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f +} + +func (self *_Assembler) _asm_OP_cond_testc(p *_Instr) { + self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f + self.Xjmp("JC" , p.vi()) +} + +func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { + self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16))// MOVQ $(p2.op()), 16(SP) + self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP) + self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP) + self.call_go(_F_println) +} + +var ( + _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier)) + + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) +) + +func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr) { + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _R10) + self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, _AX) + self.xsave(_DI) + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX + self.Rjmp("CALL", _R10) + self.xload(_DI) + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go new file mode 100644 index 0000000..8cd83e8 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go @@ -0,0 +1,1201 @@ +//go:build go1.17 && !go1.21 +// +build go1.17,!go1.21 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encoder + +import ( + `fmt` + `reflect` + `strconv` + `unsafe` + + `github.com/bytedance/sonic/internal/cpu` + `github.com/bytedance/sonic/internal/jit` + `github.com/bytedance/sonic/internal/native/types` + `github.com/twitchyliquid64/golang-asm/obj` + `github.com/twitchyliquid64/golang-asm/obj/x86` + + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/rt` +) + +/** Register Allocations + * + * State Registers: + * + * %rbx : stack base + * %rdi : result pointer + * %rsi : result length + * %rdx : result capacity + * %r12 : sp->p + * %r13 : sp->q + * %r14 : sp->x + * %r15 : sp->f + * + * Error Registers: + * + * %r10 : error type register + * %r11 : error pointer register + */ + +/** Function Prototype & Stack Map + * + * func (buf *[]byte, p unsafe.Pointer, sb *_Stack, fv uint64) (err error) + * + * buf : (FP) + * p : 8(FP) + * sb : 16(FP) + * fv : 24(FP) + * err.vt : 32(FP) + * err.vp : 40(FP) + */ + +const ( + _S_cond = iota + _S_init +) + +const ( + _FP_args = 32 // 32 bytes for spill registers of arguments + _FP_fargs = 40 // 40 bytes for passing arguments to other Go functions + _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions + _FP_locals = 24 // 24 bytes for local variables +) + +const ( + _FP_loffs = _FP_fargs + _FP_saves + _FP_offs = _FP_loffs + _FP_locals + // _FP_offs = _FP_loffs + _FP_locals + _FP_debug + _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer + _FP_base = _FP_size + 8 // 8 bytes for the return address +) + +const ( + _FM_exp32 = 0x7f800000 + _FM_exp64 = 0x7ff0000000000000 +) + +const ( + _IM_null = 0x6c6c756e // 'null' + _IM_true = 0x65757274 // 'true' + _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e') + _IM_open = 0x00225c22 // '"\"∅' + _IM_array = 0x5d5b // '[]' + _IM_object = 0x7d7b // '{}' + _IM_mulv = -0x5555555555555555 +) + +const ( + _LB_more_space = "_more_space" + _LB_more_space_return = "_more_space_return_" +) + +const ( + _LB_error = "_error" + _LB_error_too_deep = "_error_too_deep" + _LB_error_invalid_number = "_error_invalid_number" + _LB_error_nan_or_infinite = "_error_nan_or_infinite" + _LB_panic = "_panic" +) + +var ( + _AX = jit.Reg("AX") + _BX = jit.Reg("BX") + _CX = jit.Reg("CX") + _DX = jit.Reg("DX") + _DI = jit.Reg("DI") + _SI = jit.Reg("SI") + _BP = jit.Reg("BP") + _SP = jit.Reg("SP") + _R8 = jit.Reg("R8") + _R9 = jit.Reg("R9") +) + +var ( + _X0 = jit.Reg("X0") + _Y0 = jit.Reg("Y0") +) + +var ( + _ST = jit.Reg("R15") // can't use R14 since it's always scratched by Go... + _RP = jit.Reg("DI") + _RL = jit.Reg("SI") + _RC = jit.Reg("DX") +) + +var ( + _LR = jit.Reg("R9") + _ET = jit.Reg("AX") + _EP = jit.Reg("BX") +) + +var ( + _SP_p = jit.Reg("R10") // saved on BX when call_c + _SP_q = jit.Reg("R11") // saved on BP when call_c + _SP_x = jit.Reg("R12") + _SP_f = jit.Reg("R13") +) + +var ( + _ARG_rb = jit.Ptr(_SP, _FP_base) + _ARG_vp = jit.Ptr(_SP, _FP_base + 8) + _ARG_sb = jit.Ptr(_SP, _FP_base + 16) + _ARG_fv = jit.Ptr(_SP, _FP_base + 24) +) + +var ( + _RET_et = _ET + _RET_ep = _EP +) + +var ( + _VAR_sp = jit.Ptr(_SP, _FP_fargs + _FP_saves) + _VAR_dn = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8) + _VAR_vp = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16) +) + +var ( + _REG_ffi = []obj.Addr{ _RP, _RL, _RC} + _REG_b64 = []obj.Addr{_SP_p, _SP_q} + + _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC} + _REG_ms = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR} + _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL} +) + +type _Assembler struct { + jit.BaseAssembler + p _Program + x int + name string +} + +func newAssembler(p _Program) *_Assembler { + return new(_Assembler).Init(p) +} + +/** Assembler Interface **/ + +func (self *_Assembler) Load() _Encoder { + return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs)) +} + +func (self *_Assembler) Init(p _Program) *_Assembler { + self.p = p + self.BaseAssembler.Init(self.compile) + return self +} + +func (self *_Assembler) compile() { + self.prologue() + self.instrs() + self.epilogue() + self.builtins() +} + +/** Assembler Stages **/ + +var _OpFuncTab = [256]func(*_Assembler, *_Instr) { + _OP_null : (*_Assembler)._asm_OP_null, + _OP_empty_arr : (*_Assembler)._asm_OP_empty_arr, + _OP_empty_obj : (*_Assembler)._asm_OP_empty_obj, + _OP_bool : (*_Assembler)._asm_OP_bool, + _OP_i8 : (*_Assembler)._asm_OP_i8, + _OP_i16 : (*_Assembler)._asm_OP_i16, + _OP_i32 : (*_Assembler)._asm_OP_i32, + _OP_i64 : (*_Assembler)._asm_OP_i64, + _OP_u8 : (*_Assembler)._asm_OP_u8, + _OP_u16 : (*_Assembler)._asm_OP_u16, + _OP_u32 : (*_Assembler)._asm_OP_u32, + _OP_u64 : (*_Assembler)._asm_OP_u64, + _OP_f32 : (*_Assembler)._asm_OP_f32, + _OP_f64 : (*_Assembler)._asm_OP_f64, + _OP_str : (*_Assembler)._asm_OP_str, + _OP_bin : (*_Assembler)._asm_OP_bin, + _OP_quote : (*_Assembler)._asm_OP_quote, + _OP_number : (*_Assembler)._asm_OP_number, + _OP_eface : (*_Assembler)._asm_OP_eface, + _OP_iface : (*_Assembler)._asm_OP_iface, + _OP_byte : (*_Assembler)._asm_OP_byte, + _OP_text : (*_Assembler)._asm_OP_text, + _OP_deref : (*_Assembler)._asm_OP_deref, + _OP_index : (*_Assembler)._asm_OP_index, + _OP_load : (*_Assembler)._asm_OP_load, + _OP_save : (*_Assembler)._asm_OP_save, + _OP_drop : (*_Assembler)._asm_OP_drop, + _OP_drop_2 : (*_Assembler)._asm_OP_drop_2, + _OP_recurse : (*_Assembler)._asm_OP_recurse, + _OP_is_nil : (*_Assembler)._asm_OP_is_nil, + _OP_is_nil_p1 : (*_Assembler)._asm_OP_is_nil_p1, + _OP_is_zero_1 : (*_Assembler)._asm_OP_is_zero_1, + _OP_is_zero_2 : (*_Assembler)._asm_OP_is_zero_2, + _OP_is_zero_4 : (*_Assembler)._asm_OP_is_zero_4, + _OP_is_zero_8 : (*_Assembler)._asm_OP_is_zero_8, + _OP_is_zero_map : (*_Assembler)._asm_OP_is_zero_map, + _OP_goto : (*_Assembler)._asm_OP_goto, + _OP_map_iter : (*_Assembler)._asm_OP_map_iter, + _OP_map_stop : (*_Assembler)._asm_OP_map_stop, + _OP_map_check_key : (*_Assembler)._asm_OP_map_check_key, + _OP_map_write_key : (*_Assembler)._asm_OP_map_write_key, + _OP_map_value_next : (*_Assembler)._asm_OP_map_value_next, + _OP_slice_len : (*_Assembler)._asm_OP_slice_len, + _OP_slice_next : (*_Assembler)._asm_OP_slice_next, + _OP_marshal : (*_Assembler)._asm_OP_marshal, + _OP_marshal_p : (*_Assembler)._asm_OP_marshal_p, + _OP_marshal_text : (*_Assembler)._asm_OP_marshal_text, + _OP_marshal_text_p : (*_Assembler)._asm_OP_marshal_text_p, + _OP_cond_set : (*_Assembler)._asm_OP_cond_set, + _OP_cond_testc : (*_Assembler)._asm_OP_cond_testc, +} + +func (self *_Assembler) instr(v *_Instr) { + if fn := _OpFuncTab[v.op()]; fn != nil { + fn(self, v) + } else { + panic(fmt.Sprintf("invalid opcode: %d", v.op())) + } +} + +func (self *_Assembler) instrs() { + for i, v := range self.p { + self.Mark(i) + self.instr(&v) + self.debug_instr(i, &v) + } +} + +func (self *_Assembler) builtins() { + self.more_space() + self.error_too_deep() + self.error_invalid_number() + self.error_nan_or_infinite() + self.go_panic() +} + +func (self *_Assembler) epilogue() { + self.Mark(len(self.p)) + self.Emit("XORL", _ET, _ET) + self.Emit("XORL", _EP, _EP) + self.Link(_LB_error) + self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX + self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) + self.Emit("MOVQ", jit.Imm(0), _ARG_rb) // MOVQ AX, rb<>+0(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ BX, vp<>+8(FP) + self.Emit("MOVQ", jit.Imm(0), _ARG_sb) // MOVQ CX, sb<>+16(FP) + self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP + self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP + self.Emit("RET") // RET +} + +func (self *_Assembler) prologue() { + self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP + self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP) + self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP + self.Emit("MOVQ", _AX, _ARG_rb) // MOVQ AX, rb<>+0(FP) + self.Emit("MOVQ", _BX, _ARG_vp) // MOVQ BX, vp<>+8(FP) + self.Emit("MOVQ", _CX, _ARG_sb) // MOVQ CX, sb<>+16(FP) + self.Emit("MOVQ", _DI, _ARG_fv) // MOVQ DI, rb<>+24(FP) + self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX) , DI + self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX) , SI + self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), DX + self.Emit("MOVQ", _BX, _SP_p) // MOVQ BX, R10 + self.Emit("MOVQ", _CX, _ST) // MOVQ CX, R8 + self.Emit("XORL", _SP_x, _SP_x) // XORL R10, R12 + self.Emit("XORL", _SP_f, _SP_f) // XORL R11, R13 + self.Emit("XORL", _SP_q, _SP_q) // XORL R13, R11 +} + +/** Assembler Inline Functions **/ + +func (self *_Assembler) xsave(reg ...obj.Addr) { + for i, v := range reg { + if i > _FP_saves / 8 - 1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8)) + } + } +} + +func (self *_Assembler) xload(reg ...obj.Addr) { + for i, v := range reg { + if i > _FP_saves / 8 - 1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v) + } + } +} + +func (self *_Assembler) rbuf_di() { + if _RP.Reg != x86.REG_DI { + panic("register allocation messed up: RP != DI") + } else { + self.Emit("ADDQ", _RL, _RP) + } +} + +func (self *_Assembler) store_int(nd int, fn obj.Addr, ins string) { + self.check_size(nd) + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI + self.call_c(fn) // CALL_C $fn + self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL +} + +func (self *_Assembler) store_str(s string) { + i := 0 + m := rt.Str2Mem(s) + + /* 8-byte stores */ + for i <= len(m) - 8 { + self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX + self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL) + i += 8 + } + + /* 4-byte stores */ + if i <= len(m) - 4 { + self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL) + i += 4 + } + + /* 2-byte stores */ + if i <= len(m) - 2 { + self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL) + i += 2 + } + + /* last byte */ + if i < len(m) { + self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL) + } +} + +func (self *_Assembler) check_size(n int) { + self.check_size_rl(jit.Ptr(_RL, int64(n))) +} + +func (self *_Assembler) check_size_r(r obj.Addr, d int) { + self.check_size_rl(jit.Sib(_RL, r, 1, int64(d))) +} + +func (self *_Assembler) check_size_rl(v obj.Addr) { + idx := self.x + key := _LB_more_space_return + strconv.Itoa(idx) + + /* the following code relies on LR == R9 to work */ + if _LR.Reg != x86.REG_R9 { + panic("register allocation messed up: LR != R9") + } + + /* check for buffer capacity */ + self.x++ + self.Emit("LEAQ", v, _AX) // LEAQ $v, AX + self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC + self.Sjmp("JBE" , key) // JBE _more_space_return_{n} + self.slice_grow_ax(key) // GROW $key + self.Link(key) // _more_space_return_{n}: +} + +func (self *_Assembler) slice_grow_ax(ret string) { + self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9 + self.Sref(ret, 4) // .... &ret + self.Sjmp("JMP" , _LB_more_space) // JMP _more_space +} + +/** State Stack Helpers **/ + +const ( + _StateSize = int64(unsafe.Sizeof(_State{})) + _StackLimit = _MaxStack * _StateSize +) + +func (self *_Assembler) save_state() { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX + self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R9) // LEAQ _StateSize(CX), R9 + self.Emit("CMPQ", _R9, jit.Imm(_StackLimit)) // CMPQ R9, $_StackLimit + self.Sjmp("JAE" , _LB_error_too_deep) // JA _error_too_deep + self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX) + self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX) + self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX) + self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX) + self.Emit("MOVQ", _R9, jit.Ptr(_ST, 0)) // MOVQ R9, (ST) +} + +func (self *_Assembler) drop_state(decr int64) { + self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("SUBQ" , jit.Imm(decr), _AX) // SUBQ $decr, AX + self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST) + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p + self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q + self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0 + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX) +} + +/** Buffer Helpers **/ + +func (self *_Assembler) add_char(ch byte) { + self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL +} + +func (self *_Assembler) add_long(ch uint32, n int64) { + self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL) + self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL +} + +func (self *_Assembler) add_text(ss string) { + self.store_str(ss) // TEXT $ss + self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL +} + +// get *buf at AX +func (self *_Assembler) prep_buffer_AX() { + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) +} + +func (self *_Assembler) save_buffer() { + self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX + self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX) + self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX) + self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX) +} + +// get *buf at AX +func (self *_Assembler) load_buffer_AX() { + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP + self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL + self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC +} + +/** Function Interface Helpers **/ + +func (self *_Assembler) call(pc obj.Addr) { + self.Emit("MOVQ", pc, _LR) // MOVQ $pc, AX + self.Rjmp("CALL", _LR) // CALL AX +} + +func (self *_Assembler) save_c() { + self.xsave(_REG_ffi...) // SAVE $REG_ffi +} + +func (self *_Assembler) call_b64(pc obj.Addr) { + self.xsave(_REG_b64...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_b64...) // LOAD $REG_ffi +} + +func (self *_Assembler) call_c(pc obj.Addr) { + self.Emit("XCHGQ", _SP_p, _BX) + self.Emit("XCHGQ", _SP_q, _BP) + self.call(pc) // CALL $pc + self.xload(_REG_ffi...) // LOAD $REG_ffi + self.Emit("XCHGQ", _SP_p, _BX) + self.Emit("XCHGQ", _SP_q, _BP) +} + +func (self *_Assembler) call_go(pc obj.Addr) { + self.xsave(_REG_all...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_all...) // LOAD $REG_all +} + +func (self *_Assembler) call_more_space(pc obj.Addr) { + self.xsave(_REG_ms...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_ms...) // LOAD $REG_all +} + +func (self *_Assembler) call_encoder(pc obj.Addr) { + self.xsave(_REG_enc...) // SAVE $REG_all + self.call(pc) // CALL $pc + self.xload(_REG_enc...) // LOAD $REG_all +} + +func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Type) { + switch vt.Kind() { + case reflect.Interface : self.call_marshaler_i(fn, it) + case reflect.Ptr, reflect.Map : self.call_marshaler_v(fn, it, vt, true) + default : self.call_marshaler_v(fn, it, vt, false) + } +} + +func (self *_Assembler) call_marshaler_i(fn obj.Addr, it *rt.GoType) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n} + self.Emit("MOVQ" , _AX, _BX) // MOVQ AX, BX + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX + self.Emit("MOVQ" , jit.Gtype(it), _AX) // MOVQ $it, AX + self.call_go(_F_assertI2I) // CALL_GO assertI2I + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n} + self.Emit("MOVQ", _BX, _CX) // MOVQ BX, CX + self.Emit("MOVQ", _AX, _BX) // MOVQ AX, BX + self.prep_buffer_AX() + self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI + self.call_go(fn) // CALL $fn + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.load_buffer_AX() + self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} + self.Link("_null_{n}") // _null_{n}: + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Link("_done_{n}") // _done_{n}: +} + +func (self *_Assembler) call_marshaler_v(fn obj.Addr, it *rt.GoType, vt reflect.Type, deref bool) { + self.prep_buffer_AX() // MOVE {buf}, (SP) + self.Emit("MOVQ", jit.Itab(it, vt), _BX) // MOVQ $(itab(it, vt)), BX + + /* dereference the pointer if needed */ + if !deref { + self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX + } else { + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _CX) // MOVQ 0(SP.p), CX + } + + /* call the encoder, and perform error checks */ + self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI + self.call_go(fn) // CALL $fn + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.load_buffer_AX() +} + +/** Builtin: _more_space **/ + +var ( + _T_byte = jit.Type(byteType) + _F_growslice = jit.Func(growslice) +) + +// AX must saving n +func (self *_Assembler) more_space() { + self.Link(_LB_more_space) + self.Emit("MOVQ", _RP, _BX) // MOVQ DI, BX + self.Emit("MOVQ", _RL, _CX) // MOVQ SI, CX + self.Emit("MOVQ", _RC, _DI) // MOVQ DX, DI + self.Emit("MOVQ", _AX, _SI) // MOVQ AX, SI + self.Emit("MOVQ", _T_byte, _AX) // MOVQ $_T_byte, AX + self.call_more_space(_F_growslice) // CALL $pc + self.Emit("MOVQ", _AX, _RP) // MOVQ AX, DI + self.Emit("MOVQ", _BX, _RL) // MOVQ BX, SI + self.Emit("MOVQ", _CX, _RC) // MOVQ CX, DX + self.save_buffer() // SAVE {buf} + self.Rjmp("JMP" , _LR) // JMP LR +} + +/** Builtin Errors **/ + +var ( + _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep)))) + _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite)))) + _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType) +) + +func (self *_Assembler) error_too_deep() { + self.Link(_LB_error_too_deep) + self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) error_invalid_number() { + self.Link(_LB_error_invalid_number) + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ 0(SP), AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _BX) // MOVQ 8(SP), BX + self.call_go(_F_error_number) // CALL_GO error_number + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +func (self *_Assembler) error_nan_or_infinite() { + self.Link(_LB_error_nan_or_infinite) + self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP + self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET + self.Sjmp("JMP" , _LB_error) // JMP _error +} + +/** String Encoding Routine **/ + +var ( + _F_quote = jit.Imm(int64(native.S_quote)) + _F_panic = jit.Func(goPanic) +) + +func (self *_Assembler) go_panic() { + self.Link(_LB_panic) + self.Emit("MOVQ", _SP_p, _BX) + self.call_go(_F_panic) +} + +func (self *_Assembler) encode_string(doubleQuote bool) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JZ" , "_str_empty_{n}") // JZ _str_empty_{n} + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) + self.Sjmp("JNE" , "_str_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX) + self.Sjmp("JMP", _LB_panic) + self.Link("_str_next_{n}") + + /* openning quote, check for double quote */ + if !doubleQuote { + self.check_size_r(_AX, 2) // SIZE $2 + self.add_char('"') // CHAR $'"' + } else { + self.check_size_r(_AX, 6) // SIZE $6 + self.add_long(_IM_open, 3) // TEXT $`"\"` + } + + /* quoting loop */ + self.Emit("XORL", _AX, _AX) // XORL AX, AX + self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp + self.Link("_str_loop_{n}") // _str_loop_{n}: + self.save_c() // SAVE $REG_ffi + + /* load the output buffer first, and then input buffer, + * because the parameter registers collide with RP / RL / RC */ + self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX + self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX + self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn + self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX + self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX + self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI + self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI + self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI + + /* set the flags based on `doubleQuote` */ + if !doubleQuote { + self.Emit("XORL", _R8, _R8) // XORL R8, R8 + } else { + self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8 + } + + /* call the native quoter */ + self.call_c(_F_quote) // CALL quote + self.Emit("ADDQ" , _VAR_dn, _RL) // ADDQ dn, RL + + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JS" , "_str_space_{n}") // JS _str_space_{n} + + /* close the string, check for double quote */ + if !doubleQuote { + self.check_size(1) // SIZE $1 + self.add_char('"') // CHAR $'"' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } else { + self.check_size(3) // SIZE $3 + self.add_text("\\\"\"") // TEXT $'\""' + self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n} + } + + /* not enough space to contain the quoted string */ + self.Link("_str_space_{n}") // _str_space_{n}: + self.Emit("NOTQ", _AX) // NOTQ AX + self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp + self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX + self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n} + + /* empty string, check for double quote */ + if !doubleQuote { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(2) // SIZE $2 + self.add_text("\"\"") // TEXT $'""' + self.Link("_str_end_{n}") // _str_end_{n}: + } else { + self.Link("_str_empty_{n}") // _str_empty_{n}: + self.check_size(6) // SIZE $6 + self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""' + self.Link("_str_end_{n}") // _str_end_{n}: + } +} + +/** OpCode Assembler Functions **/ + +var ( + _T_json_Marshaler = rt.UnpackType(jsonMarshalerType) + _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType) +) + +var ( + _F_f64toa = jit.Imm(int64(native.S_f64toa)) + _F_f32toa = jit.Imm(int64(native.S_f32toa)) + _F_i64toa = jit.Imm(int64(native.S_i64toa)) + _F_u64toa = jit.Imm(int64(native.S_u64toa)) + _F_b64encode = jit.Imm(int64(_subr__b64encode)) +) + +var ( + _F_memmove = jit.Func(memmove) + _F_error_number = jit.Func(error_number) + _F_isValidNumber = jit.Func(isValidNumber) +) + +var ( + _F_iteratorStop = jit.Func(iteratorStop) + _F_iteratorNext = jit.Func(iteratorNext) + _F_iteratorStart = jit.Func(iteratorStart) +) + +var ( + _F_encodeTypedPointer obj.Addr + _F_encodeJsonMarshaler obj.Addr + _F_encodeTextMarshaler obj.Addr +) + +const ( + _MODE_AVX2 = 1 << 2 +) + +func init() { + _F_encodeTypedPointer = jit.Func(encodeTypedPointer) + _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler) + _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler) +} + +func (self *_Assembler) _asm_OP_null(_ *_Instr) { + self.check_size(4) + self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL +} + +func (self *_Assembler) _asm_OP_empty_arr(_ *_Instr) { + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_arr_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_arr_end_{n}") + self.Link("_empty_arr_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_arr_end_{n}") +} + +func (self *_Assembler) _asm_OP_empty_obj(_ *_Instr) { + self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv) + self.Sjmp("JC", "_empty_obj_{n}") + self._asm_OP_null(nil) + self.Sjmp("JMP", "_empty_obj_end_{n}") + self.Link("_empty_obj_{n}") + self.check_size(2) + self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0)) + self.Emit("ADDQ", jit.Imm(2), _RL) + self.Link("_empty_obj_end_{n}") +} + +func (self *_Assembler) _asm_OP_bool(_ *_Instr) { + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Sjmp("JE" , "_false_{n}") // JE _false_{n} + self.check_size(4) // SIZE $4 + self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL + self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n} + self.Link("_false_{n}") // _false_{n}: + self.check_size(5) // SIZE $5 + self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1) + self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1) + self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL + self.Link("_end_{n}") // _end_{n}: +} + +func (self *_Assembler) _asm_OP_i8(_ *_Instr) { + self.store_int(4, _F_i64toa, "MOVBQSX") +} + +func (self *_Assembler) _asm_OP_i16(_ *_Instr) { + self.store_int(6, _F_i64toa, "MOVWQSX") +} + +func (self *_Assembler) _asm_OP_i32(_ *_Instr) { + self.store_int(17, _F_i64toa, "MOVLQSX") +} + +func (self *_Assembler) _asm_OP_i64(_ *_Instr) { + self.store_int(21, _F_i64toa, "MOVQ") +} + +func (self *_Assembler) _asm_OP_u8(_ *_Instr) { + self.store_int(3, _F_u64toa, "MOVBQZX") +} + +func (self *_Assembler) _asm_OP_u16(_ *_Instr) { + self.store_int(5, _F_u64toa, "MOVWQZX") +} + +func (self *_Assembler) _asm_OP_u32(_ *_Instr) { + self.store_int(16, _F_u64toa, "MOVLQZX") +} + +func (self *_Assembler) _asm_OP_u64(_ *_Instr) { + self.store_int(20, _F_u64toa, "MOVQ") +} + +func (self *_Assembler) _asm_OP_f32(_ *_Instr) { + self.check_size(32) + self.Emit("MOVL" , jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX + self.Emit("ANDL" , jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX + self.Emit("XORL" , jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX + self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSS" , jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0 + self.call_c(_F_f32toa) // CALL_C f64toa + self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL +} + +func (self *_Assembler) _asm_OP_f64(_ *_Instr) { + self.check_size(32) + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("MOVQ" , jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX + self.Emit("ANDQ" , _CX, _AX) // ANDQ CX, AX + self.Emit("XORQ" , _CX, _AX) // XORQ CX, AX + self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite + self.save_c() // SAVE $C_regs + self.rbuf_di() // MOVQ RP, DI + self.Emit("MOVSD" , jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0 + self.call_c(_F_f64toa) // CALL_C f64toa + self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL +} + +func (self *_Assembler) _asm_OP_str(_ *_Instr) { + self.encode_string(false) +} + +func (self *_Assembler) _asm_OP_bin(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX + self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX + self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX + self.Emit("MOVQ", _DX, _BX) // MOVQ DX, BX + self.From("MULQ", _CX) // MULQ CX + self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX + self.Emit("ORQ" , jit.Imm(2), _AX) // ORQ $2, AX + self.Emit("MOVQ", _BX, _DX) // MOVQ BX, DX + self.check_size_r(_AX, 0) // SIZE AX + self.add_char('"') // CHAR $'"' + self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI + self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ SI, 8(DI) + self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI + + /* check for AVX2 support */ + if !cpu.HasAVX2 { + self.Emit("XORL", _DX, _DX) // XORL DX, DX + } else { + self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX + } + + /* call the encoder */ + self.call_b64(_F_b64encode) // CALL b64encode + self.load_buffer_AX() // LOAD {buf} + self.add_char('"') // CHAR $'"' +} + +func (self *_Assembler) _asm_OP_quote(_ *_Instr) { + self.encode_string(true) +} + +func (self *_Assembler) _asm_OP_number(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX + self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX + self.Sjmp("JZ" , "_empty_{n}") + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Sjmp("JNZ" , "_number_next_{n}") + self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX) + self.Sjmp("JMP", _LB_panic) + self.Link("_number_next_{n}") + self.call_go(_F_isValidNumber) // CALL_GO isValidNumber + self.Emit("CMPB" , _AX, jit.Imm(0)) // CMPB AX, $0 + self.Sjmp("JE" , _LB_error_invalid_number) // JE _error_invalid_number + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX + self.check_size_r(_BX, 0) // SIZE BX + self.Emit("LEAQ" , jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX + self.Emit("ADDQ" , jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _BX) // MOVOU (SP.p), BX + self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _CX) // MOVOU X0, 8(SP) + self.call_go(_F_memmove) // CALL_GO memmove + self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX + self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX) + self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n} + self.Link("_empty_{n}") // _empty_{n} + self.check_size(1) // SIZE $1 + self.add_char('0') // CHAR $'0' + self.Link("_done_{n}") // _done_{n}: +} + +func (self *_Assembler) _asm_OP_eface(_ *_Instr) { + self.prep_buffer_AX() // MOVE {buf}, AX + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX + self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX + self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI + self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ fv, AX + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.load_buffer_AX() +} + +func (self *_Assembler) _asm_OP_iface(_ *_Instr) { + self.prep_buffer_AX() // MOVE {buf}, AX + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX + self.Emit("MOVQ" , jit.Ptr(_CX, 8), _BX) // MOVQ 8(CX), BX + self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX + self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI + self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ fv, AX + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.load_buffer_AX() +} + +func (self *_Assembler) _asm_OP_byte(p *_Instr) { + self.check_size(1) + self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1) + self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL +} + +func (self *_Assembler) _asm_OP_text(p *_Instr) { + self.check_size(len(p.vs())) // SIZE ${len(p.vs())} + self.add_text(p.vs()) // TEXT ${p.vs()} +} + +func (self *_Assembler) _asm_OP_deref(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p +} + +func (self *_Assembler) _asm_OP_index(p *_Instr) { + self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX + self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p +} + +func (self *_Assembler) _asm_OP_load(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p + self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q +} + +func (self *_Assembler) _asm_OP_save(_ *_Instr) { + self.save_state() +} + +func (self *_Assembler) _asm_OP_drop(_ *_Instr) { + self.drop_state(_StateSize) +} + +func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) { + self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2) + self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX) +} + +func (self *_Assembler) _asm_OP_recurse(p *_Instr) { + self.prep_buffer_AX() // MOVE {buf}, (SP) + vt, pv := p.vp() + self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ $(type(p.vt())), BX + + /* check for indirection */ + if !rt.UnpackType(vt).Indirect() { + self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX + } else { + self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, VAR.vp + self.Emit("LEAQ", _VAR_vp, _CX) // LEAQ VAR.vp, CX + } + + /* call the encoder */ + self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI + self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ $fv, SI + if pv { + self.Emit("BTCQ", jit.Imm(bitPointerValue), _SI) // BTCQ $1, SI + } + self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error + self.load_buffer_AX() +} + +func (self *_Assembler) _asm_OP_is_nil(p *_Instr) { + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_nil_p1(p *_Instr) { + self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_1(p *_Instr) { + self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_2(p *_Instr) { + self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_4(p *_Instr) { + self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_8(p *_Instr) { + self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_is_zero_map(p *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX + self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX + self.Xjmp("JZ" , p.vi()) // JZ p.vi() + self.Emit("CMPQ" , jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0 + self.Xjmp("JE" , p.vi()) // JE p.vi() +} + +func (self *_Assembler) _asm_OP_goto(p *_Instr) { + self.Xjmp("JMP", p.vi()) +} + +func (self *_Assembler) _asm_OP_map_iter(p *_Instr) { + self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX + self.Emit("MOVQ" , _ARG_fv, _CX) // MOVQ fv, CX + self.call_go(_F_iteratorStart) // CALL_GO iteratorStart + self.Emit("MOVQ" , _AX, _SP_q) // MOVQ AX, SP.q + self.Emit("MOVQ" , _BX, _ET) // MOVQ 32(SP), ET + self.Emit("MOVQ" , _CX, _EP) // MOVQ 40(SP), EP + self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET + self.Sjmp("JNZ" , _LB_error) // JNZ _error +} + +func (self *_Assembler) _asm_OP_map_stop(_ *_Instr) { + self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX + self.call_go(_F_iteratorStop) // CALL_GO iteratorStop + self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q +} + +func (self *_Assembler) _asm_OP_map_check_key(p *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p + self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p + self.Xjmp("JZ" , p.vi()) // JNZ p.vi() +} + +func (self *_Assembler) _asm_OP_map_write_key(p *_Instr) { + self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv + self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n} + self.encode_string(false) // STR $false + self.Xjmp("JMP", p.vi()) // JMP ${p.vi()} + self.Link("_unordered_key_{n}") // _unordered_key_{n}: +} + +func (self *_Assembler) _asm_OP_map_value_next(_ *_Instr) { + self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p + self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX + self.call_go(_F_iteratorNext) // CALL_GO iteratorNext +} + +func (self *_Assembler) _asm_OP_slice_len(_ *_Instr) { + self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x + self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p + self.Emit("ORQ" , jit.Imm(1 << _S_init), _SP_f) // ORQ $(1<<_S_init), SP.f +} + +func (self *_Assembler) _asm_OP_slice_next(p *_Instr) { + self.Emit("TESTQ" , _SP_x, _SP_x) // TESTQ SP.x, SP.x + self.Xjmp("JZ" , p.vi()) // JZ p.vi() + self.Emit("SUBQ" , jit.Imm(1), _SP_x) // SUBQ $1, SP.x + self.Emit("BTRQ" , jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f + self.Emit("LEAQ" , jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX + self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p +} + +func (self *_Assembler) _asm_OP_marshal(p *_Instr) { + self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt()) +} + +func (self *_Assembler) _asm_OP_marshal_p(p *_Instr) { + if p.vk() != reflect.Ptr { + panic("marshal_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false) + } +} + +func (self *_Assembler) _asm_OP_marshal_text(p *_Instr) { + self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt()) +} + +func (self *_Assembler) _asm_OP_marshal_text_p(p *_Instr) { + if p.vk() != reflect.Ptr { + panic("marshal_text_p: invalid type") + } else { + self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false) + } +} + +func (self *_Assembler) _asm_OP_cond_set(_ *_Instr) { + self.Emit("ORQ", jit.Imm(1 << _S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f +} + +func (self *_Assembler) _asm_OP_cond_testc(p *_Instr) { + self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f + self.Xjmp("JC" , p.vi()) +} + +func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) { + self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX) // MOVQ $(p2.op()), AX + self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), BX + self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), CX + self.call_go(_F_println) +} + +var ( + _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier)))) + + _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX) +) + +func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr) { + if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX { + panic("rec contains AX!") + } + self.Emit("MOVQ", _V_writeBarrier, _BX) + self.Emit("CMPL", jit.Ptr(_BX, 0), jit.Imm(0)) + self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.xsave(_DI) + self.Emit("MOVQ", ptr, _AX) + self.Emit("LEAQ", rec, _DI) + self.Emit("MOVQ", _F_gcWriteBarrierAX, _BX) // MOVQ ${fn}, AX + self.Rjmp("CALL", _BX) + self.xload(_DI) + self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}") + self.Emit("MOVQ", ptr, rec) + self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}") +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/encoder/compiler.go b/vendor/github.com/bytedance/sonic/encoder/compiler.go new file mode 100644 index 0000000..a949c90 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/encoder/compiler.go @@ -0,0 +1,885 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encoder + +import ( + `fmt` + `reflect` + `strconv` + `strings` + `unsafe` + + `github.com/bytedance/sonic/internal/resolver` + `github.com/bytedance/sonic/internal/rt` + `github.com/bytedance/sonic/option` +) + +type _Op uint8 + +const ( + _OP_null _Op = iota + 1 + _OP_empty_arr + _OP_empty_obj + _OP_bool + _OP_i8 + _OP_i16 + _OP_i32 + _OP_i64 + _OP_u8 + _OP_u16 + _OP_u32 + _OP_u64 + _OP_f32 + _OP_f64 + _OP_str + _OP_bin + _OP_quote + _OP_number + _OP_eface + _OP_iface + _OP_byte + _OP_text + _OP_deref + _OP_index + _OP_load + _OP_save + _OP_drop + _OP_drop_2 + _OP_recurse + _OP_is_nil + _OP_is_nil_p1 + _OP_is_zero_1 + _OP_is_zero_2 + _OP_is_zero_4 + _OP_is_zero_8 + _OP_is_zero_map + _OP_goto + _OP_map_iter + _OP_map_stop + _OP_map_check_key + _OP_map_write_key + _OP_map_value_next + _OP_slice_len + _OP_slice_next + _OP_marshal + _OP_marshal_p + _OP_marshal_text + _OP_marshal_text_p + _OP_cond_set + _OP_cond_testc +) + +const ( + _INT_SIZE = 32 << (^uint(0) >> 63) + _PTR_SIZE = 32 << (^uintptr(0) >> 63) + _PTR_BYTE = unsafe.Sizeof(uintptr(0)) +) + +const ( + _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions + _MAX_FIELDS = 50 // cutoff at 50 fields struct +) + +var _OpNames = [256]string { + _OP_null : "null", + _OP_empty_arr : "empty_arr", + _OP_empty_obj : "empty_obj", + _OP_bool : "bool", + _OP_i8 : "i8", + _OP_i16 : "i16", + _OP_i32 : "i32", + _OP_i64 : "i64", + _OP_u8 : "u8", + _OP_u16 : "u16", + _OP_u32 : "u32", + _OP_u64 : "u64", + _OP_f32 : "f32", + _OP_f64 : "f64", + _OP_str : "str", + _OP_bin : "bin", + _OP_quote : "quote", + _OP_number : "number", + _OP_eface : "eface", + _OP_iface : "iface", + _OP_byte : "byte", + _OP_text : "text", + _OP_deref : "deref", + _OP_index : "index", + _OP_load : "load", + _OP_save : "save", + _OP_drop : "drop", + _OP_drop_2 : "drop_2", + _OP_recurse : "recurse", + _OP_is_nil : "is_nil", + _OP_is_nil_p1 : "is_nil_p1", + _OP_is_zero_1 : "is_zero_1", + _OP_is_zero_2 : "is_zero_2", + _OP_is_zero_4 : "is_zero_4", + _OP_is_zero_8 : "is_zero_8", + _OP_is_zero_map : "is_zero_map", + _OP_goto : "goto", + _OP_map_iter : "map_iter", + _OP_map_stop : "map_stop", + _OP_map_check_key : "map_check_key", + _OP_map_write_key : "map_write_key", + _OP_map_value_next : "map_value_next", + _OP_slice_len : "slice_len", + _OP_slice_next : "slice_next", + _OP_marshal : "marshal", + _OP_marshal_p : "marshal_p", + _OP_marshal_text : "marshal_text", + _OP_marshal_text_p : "marshal_text_p", + _OP_cond_set : "cond_set", + _OP_cond_testc : "cond_testc", +} + +func (self _Op) String() string { + if ret := _OpNames[self]; ret != "" { + return ret + } else { + return "" + } +} + +func _OP_int() _Op { + switch _INT_SIZE { + case 32: return _OP_i32 + case 64: return _OP_i64 + default: panic("unsupported int size") + } +} + +func _OP_uint() _Op { + switch _INT_SIZE { + case 32: return _OP_u32 + case 64: return _OP_u64 + default: panic("unsupported uint size") + } +} + +func _OP_uintptr() _Op { + switch _PTR_SIZE { + case 32: return _OP_u32 + case 64: return _OP_u64 + default: panic("unsupported pointer size") + } +} + +func _OP_is_zero_ints() _Op { + switch _INT_SIZE { + case 32: return _OP_is_zero_4 + case 64: return _OP_is_zero_8 + default: panic("unsupported integer size") + } +} + +type _Instr struct { + u uint64 // union {op: 8, _: 8, vi: 48}, vi maybe int or len(str) + p unsafe.Pointer // maybe GoString.Ptr, or *GoType +} + +func packOp(op _Op) uint64 { + return uint64(op) << 56 +} + +func newInsOp(op _Op) _Instr { + return _Instr{u: packOp(op)} +} + +func newInsVi(op _Op, vi int) _Instr { + return _Instr{u: packOp(op) | rt.PackInt(vi)} +} + +func newInsVs(op _Op, vs string) _Instr { + return _Instr { + u: packOp(op) | rt.PackInt(len(vs)), + p: (*rt.GoString)(unsafe.Pointer(&vs)).Ptr, + } +} + +func newInsVt(op _Op, vt reflect.Type) _Instr { + return _Instr { + u: packOp(op), + p: unsafe.Pointer(rt.UnpackType(vt)), + } +} + +func newInsVp(op _Op, vt reflect.Type, pv bool) _Instr { + i := 0 + if pv { + i = 1 + } + return _Instr { + u: packOp(op) | rt.PackInt(i), + p: unsafe.Pointer(rt.UnpackType(vt)), + } +} + +func (self _Instr) op() _Op { + return _Op(self.u >> 56) +} + +func (self _Instr) vi() int { + return rt.UnpackInt(self.u) +} + +func (self _Instr) vf() uint8 { + return (*rt.GoType)(self.p).KindFlags +} + +func (self _Instr) vs() (v string) { + (*rt.GoString)(unsafe.Pointer(&v)).Ptr = self.p + (*rt.GoString)(unsafe.Pointer(&v)).Len = self.vi() + return +} + +func (self _Instr) vk() reflect.Kind { + return (*rt.GoType)(self.p).Kind() +} + +func (self _Instr) vt() reflect.Type { + return (*rt.GoType)(self.p).Pack() +} + +func (self _Instr) vp() (vt reflect.Type, pv bool) { + return (*rt.GoType)(self.p).Pack(), rt.UnpackInt(self.u) == 1 +} + +func (self _Instr) i64() int64 { + return int64(self.vi()) +} + +func (self _Instr) vlen() int { + return int((*rt.GoType)(self.p).Size) +} + +func (self _Instr) isBranch() bool { + switch self.op() { + case _OP_goto : fallthrough + case _OP_is_nil : fallthrough + case _OP_is_nil_p1 : fallthrough + case _OP_is_zero_1 : fallthrough + case _OP_is_zero_2 : fallthrough + case _OP_is_zero_4 : fallthrough + case _OP_is_zero_8 : fallthrough + case _OP_map_check_key : fallthrough + case _OP_map_write_key : fallthrough + case _OP_slice_next : fallthrough + case _OP_cond_testc : return true + default : return false + } +} + +func (self _Instr) disassemble() string { + switch self.op() { + case _OP_byte : return fmt.Sprintf("%-18s%s", self.op().String(), strconv.QuoteRune(rune(self.vi()))) + case _OP_text : return fmt.Sprintf("%-18s%s", self.op().String(), strconv.Quote(self.vs())) + case _OP_index : return fmt.Sprintf("%-18s%d", self.op().String(), self.vi()) + case _OP_recurse : fallthrough + case _OP_map_iter : fallthrough + case _OP_marshal : fallthrough + case _OP_marshal_p : fallthrough + case _OP_marshal_text : fallthrough + case _OP_marshal_text_p : return fmt.Sprintf("%-18s%s", self.op().String(), self.vt()) + case _OP_goto : fallthrough + case _OP_is_nil : fallthrough + case _OP_is_nil_p1 : fallthrough + case _OP_is_zero_1 : fallthrough + case _OP_is_zero_2 : fallthrough + case _OP_is_zero_4 : fallthrough + case _OP_is_zero_8 : fallthrough + case _OP_is_zero_map : fallthrough + case _OP_cond_testc : fallthrough + case _OP_map_check_key : fallthrough + case _OP_map_write_key : return fmt.Sprintf("%-18sL_%d", self.op().String(), self.vi()) + case _OP_slice_next : return fmt.Sprintf("%-18sL_%d, %s", self.op().String(), self.vi(), self.vt()) + default : return self.op().String() + } +} + +type ( + _Program []_Instr +) + +func (self _Program) pc() int { + return len(self) +} + +func (self _Program) tag(n int) { + if n >= _MaxStack { + panic("type nesting too deep") + } +} + +func (self _Program) pin(i int) { + v := &self[i] + v.u &= 0xffff000000000000 + v.u |= rt.PackInt(self.pc()) +} + +func (self _Program) rel(v []int) { + for _, i := range v { + self.pin(i) + } +} + +func (self *_Program) add(op _Op) { + *self = append(*self, newInsOp(op)) +} + +func (self *_Program) key(op _Op) { + *self = append(*self, + newInsVi(_OP_byte, '"'), + newInsOp(op), + newInsVi(_OP_byte, '"'), + ) +} + +func (self *_Program) int(op _Op, vi int) { + *self = append(*self, newInsVi(op, vi)) +} + +func (self *_Program) str(op _Op, vs string) { + *self = append(*self, newInsVs(op, vs)) +} + +func (self *_Program) rtt(op _Op, vt reflect.Type) { + *self = append(*self, newInsVt(op, vt)) +} + +func (self *_Program) vp(op _Op, vt reflect.Type, pv bool) { + *self = append(*self, newInsVp(op, vt, pv)) +} + +func (self _Program) disassemble() string { + nb := len(self) + tab := make([]bool, nb + 1) + ret := make([]string, 0, nb + 1) + + /* prescan to get all the labels */ + for _, ins := range self { + if ins.isBranch() { + tab[ins.vi()] = true + } + } + + /* disassemble each instruction */ + for i, ins := range self { + if !tab[i] { + ret = append(ret, "\t" + ins.disassemble()) + } else { + ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble())) + } + } + + /* add the last label, if needed */ + if tab[nb] { + ret = append(ret, fmt.Sprintf("L_%d:", nb)) + } + + /* add an "end" indicator, and join all the strings */ + return strings.Join(append(ret, "\tend"), "\n") +} + +type _Compiler struct { + opts option.CompileOptions + pv bool + tab map[reflect.Type]bool + rec map[reflect.Type]uint8 +} + +func newCompiler() *_Compiler { + return &_Compiler { + opts: option.DefaultCompileOptions(), + tab: map[reflect.Type]bool{}, + rec: map[reflect.Type]uint8{}, + } +} + +func (self *_Compiler) apply(opts option.CompileOptions) *_Compiler { + self.opts = opts + if self.opts.RecursiveDepth > 0 { + self.rec = map[reflect.Type]uint8{} + } + return self +} + +func (self *_Compiler) rescue(ep *error) { + if val := recover(); val != nil { + if err, ok := val.(error); ok { + *ep = err + } else { + panic(val) + } + } +} + +func (self *_Compiler) compile(vt reflect.Type, pv bool) (ret _Program, err error) { + defer self.rescue(&err) + self.compileOne(&ret, 0, vt, pv) + return +} + +func (self *_Compiler) compileOne(p *_Program, sp int, vt reflect.Type, pv bool) { + if self.tab[vt] { + p.vp(_OP_recurse, vt, pv) + } else { + self.compileRec(p, sp, vt, pv) + } +} + +func (self *_Compiler) compileRec(p *_Program, sp int, vt reflect.Type, pv bool) { + pr := self.pv + pt := reflect.PtrTo(vt) + + /* check for addressable `json.Marshaler` with pointer receiver */ + if pv && pt.Implements(jsonMarshalerType) { + p.rtt(_OP_marshal_p, pt) + return + } + + /* check for `json.Marshaler` */ + if vt.Implements(jsonMarshalerType) { + self.compileMarshaler(p, _OP_marshal, vt, jsonMarshalerType) + return + } + + /* check for addressable `encoding.TextMarshaler` with pointer receiver */ + if pv && pt.Implements(encodingTextMarshalerType) { + p.rtt(_OP_marshal_text_p, pt) + return + } + + /* check for `encoding.TextMarshaler` */ + if vt.Implements(encodingTextMarshalerType) { + self.compileMarshaler(p, _OP_marshal_text, vt, encodingTextMarshalerType) + return + } + + /* enter the recursion, and compile the type */ + self.pv = pv + self.tab[vt] = true + self.compileOps(p, sp, vt) + + /* exit the recursion */ + self.pv = pr + delete(self.tab, vt) +} + +func (self *_Compiler) compileOps(p *_Program, sp int, vt reflect.Type) { + switch vt.Kind() { + case reflect.Bool : p.add(_OP_bool) + case reflect.Int : p.add(_OP_int()) + case reflect.Int8 : p.add(_OP_i8) + case reflect.Int16 : p.add(_OP_i16) + case reflect.Int32 : p.add(_OP_i32) + case reflect.Int64 : p.add(_OP_i64) + case reflect.Uint : p.add(_OP_uint()) + case reflect.Uint8 : p.add(_OP_u8) + case reflect.Uint16 : p.add(_OP_u16) + case reflect.Uint32 : p.add(_OP_u32) + case reflect.Uint64 : p.add(_OP_u64) + case reflect.Uintptr : p.add(_OP_uintptr()) + case reflect.Float32 : p.add(_OP_f32) + case reflect.Float64 : p.add(_OP_f64) + case reflect.String : self.compileString (p, vt) + case reflect.Array : self.compileArray (p, sp, vt.Elem(), vt.Len()) + case reflect.Interface : self.compileInterface (p, vt) + case reflect.Map : self.compileMap (p, sp, vt) + case reflect.Ptr : self.compilePtr (p, sp, vt.Elem()) + case reflect.Slice : self.compileSlice (p, sp, vt.Elem()) + case reflect.Struct : self.compileStruct (p, sp, vt) + default : panic (error_type(vt)) + } +} + +func (self *_Compiler) compileNil(p *_Program, sp int, vt reflect.Type, nil_op _Op, fn func(*_Program, int, reflect.Type)) { + x := p.pc() + p.add(_OP_is_nil) + fn(p, sp, vt) + e := p.pc() + p.add(_OP_goto) + p.pin(x) + p.add(nil_op) + p.pin(e) +} + +func (self *_Compiler) compilePtr(p *_Program, sp int, vt reflect.Type) { + self.compileNil(p, sp, vt, _OP_null, self.compilePtrBody) +} + +func (self *_Compiler) compilePtrBody(p *_Program, sp int, vt reflect.Type) { + p.tag(sp) + p.add(_OP_save) + p.add(_OP_deref) + self.compileOne(p, sp + 1, vt, true) + p.add(_OP_drop) +} + +func (self *_Compiler) compileMap(p *_Program, sp int, vt reflect.Type) { + self.compileNil(p, sp, vt, _OP_empty_obj, self.compileMapBody) +} + +func (self *_Compiler) compileMapBody(p *_Program, sp int, vt reflect.Type) { + p.tag(sp + 1) + p.int(_OP_byte, '{') + p.add(_OP_save) + p.rtt(_OP_map_iter, vt) + p.add(_OP_save) + i := p.pc() + p.add(_OP_map_check_key) + u := p.pc() + p.add(_OP_map_write_key) + self.compileMapBodyKey(p, vt.Key()) + p.pin(u) + p.int(_OP_byte, ':') + p.add(_OP_map_value_next) + self.compileOne(p, sp + 2, vt.Elem(), false) + j := p.pc() + p.add(_OP_map_check_key) + p.int(_OP_byte, ',') + v := p.pc() + p.add(_OP_map_write_key) + self.compileMapBodyKey(p, vt.Key()) + p.pin(v) + p.int(_OP_byte, ':') + p.add(_OP_map_value_next) + self.compileOne(p, sp + 2, vt.Elem(), false) + p.int(_OP_goto, j) + p.pin(i) + p.pin(j) + p.add(_OP_map_stop) + p.add(_OP_drop_2) + p.int(_OP_byte, '}') +} + +func (self *_Compiler) compileMapBodyKey(p *_Program, vk reflect.Type) { + if !vk.Implements(encodingTextMarshalerType) { + self.compileMapBodyTextKey(p, vk) + } else { + self.compileMapBodyUtextKey(p, vk) + } +} + +func (self *_Compiler) compileMapBodyTextKey(p *_Program, vk reflect.Type) { + switch vk.Kind() { + case reflect.Invalid : panic("map key is nil") + case reflect.Bool : p.key(_OP_bool) + case reflect.Int : p.key(_OP_int()) + case reflect.Int8 : p.key(_OP_i8) + case reflect.Int16 : p.key(_OP_i16) + case reflect.Int32 : p.key(_OP_i32) + case reflect.Int64 : p.key(_OP_i64) + case reflect.Uint : p.key(_OP_uint()) + case reflect.Uint8 : p.key(_OP_u8) + case reflect.Uint16 : p.key(_OP_u16) + case reflect.Uint32 : p.key(_OP_u32) + case reflect.Uint64 : p.key(_OP_u64) + case reflect.Uintptr : p.key(_OP_uintptr()) + case reflect.Float32 : p.key(_OP_f32) + case reflect.Float64 : p.key(_OP_f64) + case reflect.String : self.compileString(p, vk) + default : panic(error_type(vk)) + } +} + +func (self *_Compiler) compileMapBodyUtextKey(p *_Program, vk reflect.Type) { + if vk.Kind() != reflect.Ptr { + p.rtt(_OP_marshal_text, vk) + } else { + self.compileMapBodyUtextPtr(p, vk) + } +} + +func (self *_Compiler) compileMapBodyUtextPtr(p *_Program, vk reflect.Type) { + i := p.pc() + p.add(_OP_is_nil) + p.rtt(_OP_marshal_text, vk) + j := p.pc() + p.add(_OP_goto) + p.pin(i) + p.str(_OP_text, "\"\"") + p.pin(j) +} + +func (self *_Compiler) compileSlice(p *_Program, sp int, vt reflect.Type) { + self.compileNil(p, sp, vt, _OP_empty_arr, self.compileSliceBody) +} + +func (self *_Compiler) compileSliceBody(p *_Program, sp int, vt reflect.Type) { + if isSimpleByte(vt) { + p.add(_OP_bin) + } else { + self.compileSliceArray(p, sp, vt) + } +} + +func (self *_Compiler) compileSliceArray(p *_Program, sp int, vt reflect.Type) { + p.tag(sp) + p.int(_OP_byte, '[') + p.add(_OP_save) + p.add(_OP_slice_len) + i := p.pc() + p.rtt(_OP_slice_next, vt) + self.compileOne(p, sp + 1, vt, true) + j := p.pc() + p.rtt(_OP_slice_next, vt) + p.int(_OP_byte, ',') + self.compileOne(p, sp + 1, vt, true) + p.int(_OP_goto, j) + p.pin(i) + p.pin(j) + p.add(_OP_drop) + p.int(_OP_byte, ']') +} + +func (self *_Compiler) compileArray(p *_Program, sp int, vt reflect.Type, nb int) { + p.tag(sp) + p.int(_OP_byte, '[') + p.add(_OP_save) + + /* first item */ + if nb != 0 { + self.compileOne(p, sp + 1, vt, self.pv) + p.add(_OP_load) + } + + /* remaining items */ + for i := 1; i < nb; i++ { + p.int(_OP_byte, ',') + p.int(_OP_index, i * int(vt.Size())) + self.compileOne(p, sp + 1, vt, self.pv) + p.add(_OP_load) + } + + /* end of array */ + p.add(_OP_drop) + p.int(_OP_byte, ']') +} + +func (self *_Compiler) compileString(p *_Program, vt reflect.Type) { + if vt != jsonNumberType { + p.add(_OP_str) + } else { + p.add(_OP_number) + } +} + +func (self *_Compiler) compileStruct(p *_Program, sp int, vt reflect.Type) { + if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) { + p.vp(_OP_recurse, vt, self.pv) + if self.opts.RecursiveDepth > 0 { + if self.pv { + self.rec[vt] = 1 + } else { + self.rec[vt] = 0 + } + } + } else { + self.compileStructBody(p, sp, vt) + } +} + +func (self *_Compiler) compileStructBody(p *_Program, sp int, vt reflect.Type) { + p.tag(sp) + p.int(_OP_byte, '{') + p.add(_OP_save) + p.add(_OP_cond_set) + + /* compile each field */ + for _, fv := range resolver.ResolveStruct(vt) { + var s []int + var o resolver.Offset + + /* "omitempty" for arrays */ + if fv.Type.Kind() == reflect.Array { + if fv.Type.Len() == 0 && (fv.Opts & resolver.F_omitempty) != 0 { + continue + } + } + + /* index to the field */ + for _, o = range fv.Path { + if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref { + s = append(s, p.pc()) + p.add(_OP_is_nil) + p.add(_OP_deref) + } + } + + /* check for "omitempty" option */ + if fv.Type.Kind() != reflect.Struct && fv.Type.Kind() != reflect.Array && (fv.Opts & resolver.F_omitempty) != 0 { + s = append(s, p.pc()) + self.compileStructFieldZero(p, fv.Type) + } + + /* add the comma if not the first element */ + i := p.pc() + p.add(_OP_cond_testc) + p.int(_OP_byte, ',') + p.pin(i) + + /* compile the key and value */ + ft := fv.Type + p.str(_OP_text, Quote(fv.Name) + ":") + + /* check for "stringnize" option */ + if (fv.Opts & resolver.F_stringize) == 0 { + self.compileOne(p, sp + 1, ft, self.pv) + } else { + self.compileStructFieldStr(p, sp + 1, ft) + } + + /* patch the skipping jumps and reload the struct pointer */ + p.rel(s) + p.add(_OP_load) + } + + /* end of object */ + p.add(_OP_drop) + p.int(_OP_byte, '}') +} + +func (self *_Compiler) compileStructFieldStr(p *_Program, sp int, vt reflect.Type) { + pc := -1 + ft := vt + sv := false + + /* dereference the pointer if needed */ + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + + /* check if it can be stringized */ + switch ft.Kind() { + case reflect.Bool : sv = true + case reflect.Int : sv = true + case reflect.Int8 : sv = true + case reflect.Int16 : sv = true + case reflect.Int32 : sv = true + case reflect.Int64 : sv = true + case reflect.Uint : sv = true + case reflect.Uint8 : sv = true + case reflect.Uint16 : sv = true + case reflect.Uint32 : sv = true + case reflect.Uint64 : sv = true + case reflect.Uintptr : sv = true + case reflect.Float32 : sv = true + case reflect.Float64 : sv = true + case reflect.String : sv = true + } + + /* if it's not, ignore the "string" and follow the regular path */ + if !sv { + self.compileOne(p, sp, vt, self.pv) + return + } + + /* dereference the pointer */ + if vt.Kind() == reflect.Ptr { + pc = p.pc() + vt = vt.Elem() + p.add(_OP_is_nil) + p.add(_OP_deref) + } + + /* special case of a double-quoted string */ + if ft != jsonNumberType && ft.Kind() == reflect.String { + p.add(_OP_quote) + } else { + self.compileStructFieldQuoted(p, sp, vt) + } + + /* the "null" case of the pointer */ + if pc != -1 { + e := p.pc() + p.add(_OP_goto) + p.pin(pc) + p.add(_OP_null) + p.pin(e) + } +} + +func (self *_Compiler) compileStructFieldZero(p *_Program, vt reflect.Type) { + switch vt.Kind() { + case reflect.Bool : p.add(_OP_is_zero_1) + case reflect.Int : p.add(_OP_is_zero_ints()) + case reflect.Int8 : p.add(_OP_is_zero_1) + case reflect.Int16 : p.add(_OP_is_zero_2) + case reflect.Int32 : p.add(_OP_is_zero_4) + case reflect.Int64 : p.add(_OP_is_zero_8) + case reflect.Uint : p.add(_OP_is_zero_ints()) + case reflect.Uint8 : p.add(_OP_is_zero_1) + case reflect.Uint16 : p.add(_OP_is_zero_2) + case reflect.Uint32 : p.add(_OP_is_zero_4) + case reflect.Uint64 : p.add(_OP_is_zero_8) + case reflect.Uintptr : p.add(_OP_is_nil) + case reflect.Float32 : p.add(_OP_is_zero_4) + case reflect.Float64 : p.add(_OP_is_zero_8) + case reflect.String : p.add(_OP_is_nil_p1) + case reflect.Interface : p.add(_OP_is_nil_p1) + case reflect.Map : p.add(_OP_is_zero_map) + case reflect.Ptr : p.add(_OP_is_nil) + case reflect.Slice : p.add(_OP_is_nil_p1) + default : panic(error_type(vt)) + } +} + +func (self *_Compiler) compileStructFieldQuoted(p *_Program, sp int, vt reflect.Type) { + p.int(_OP_byte, '"') + self.compileOne(p, sp, vt, self.pv) + p.int(_OP_byte, '"') +} + +func (self *_Compiler) compileInterface(p *_Program, vt reflect.Type) { + x := p.pc() + p.add(_OP_is_nil_p1) + + /* iface and efaces are different */ + if vt.NumMethod() == 0 { + p.add(_OP_eface) + } else { + p.add(_OP_iface) + } + + /* the "null" value */ + e := p.pc() + p.add(_OP_goto) + p.pin(x) + p.add(_OP_null) + p.pin(e) +} + +func (self *_Compiler) compileMarshaler(p *_Program, op _Op, vt reflect.Type, mt reflect.Type) { + pc := p.pc() + vk := vt.Kind() + + /* direct receiver */ + if vk != reflect.Ptr { + p.rtt(op, vt) + return + } + + /* value receiver with a pointer type, check for nil before calling the marshaler */ + p.add(_OP_is_nil) + p.rtt(op, vt) + i := p.pc() + p.add(_OP_goto) + p.pin(pc) + p.add(_OP_null) + p.pin(i) +} diff --git a/vendor/github.com/bytedance/sonic/encoder/debug_go116.go b/vendor/github.com/bytedance/sonic/encoder/debug_go116.go new file mode 100644 index 0000000..4bc9c15 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/encoder/debug_go116.go @@ -0,0 +1,66 @@ +// +build go1.15,!go1.17 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encoder + +import ( + `os` + `strings` + `runtime` + `runtime/debug` + + `github.com/bytedance/sonic/internal/jit` +) + +var ( + debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" + debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" +) + +var ( + _Instr_End _Instr = newInsOp(_OP_null) + + _F_gc = jit.Func(runtime.GC) + _F_force_gc = jit.Func(debug.FreeOSMemory) + _F_println = jit.Func(println_wrapper) +) + +func println_wrapper(i int, op1 int, op2 int){ + println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) +} + +func (self *_Assembler) force_gc() { + self.call_go(_F_gc) + self.call_go(_F_force_gc) +} + +func (self *_Assembler) debug_instr(i int, v *_Instr) { + if debugSyncGC { + if (i+1 == len(self.p)) { + self.print_gc(i, v, &_Instr_End) + } else { + next := &(self.p[i+1]) + self.print_gc(i, v, next) + name := _OpNames[next.op()] + if strings.Contains(name, "save") { + return + } + } + self.force_gc() + } +} diff --git a/vendor/github.com/bytedance/sonic/encoder/debug_go117.go b/vendor/github.com/bytedance/sonic/encoder/debug_go117.go new file mode 100644 index 0000000..e1016de --- /dev/null +++ b/vendor/github.com/bytedance/sonic/encoder/debug_go117.go @@ -0,0 +1,205 @@ +// +build go1.17,!go1.21 + +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encoder + +import ( + `fmt` + `os` + `runtime` + `strings` + `unsafe` + + `github.com/bytedance/sonic/internal/jit` + `github.com/twitchyliquid64/golang-asm/obj` +) + +const _FP_debug = 128 + +var ( + debugSyncGC = os.Getenv("SONIC_SYNC_GC") != "" + debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == "" + debugCheckPtr = os.Getenv("SONIC_CHECK_POINTER") != "" +) + +var ( + _Instr_End = newInsOp(_OP_is_nil) + + _F_gc = jit.Func(gc) + _F_println = jit.Func(println_wrapper) + _F_print = jit.Func(print) +) + +func (self *_Assembler) dsave(r ...obj.Addr) { + for i, v := range r { + if i > _FP_debug / 8 - 1 { + panic("too many registers to save") + } else { + self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + _FP_saves + _FP_locals + int64(i) * 8)) + } + } +} + +func (self *_Assembler) dload(r ...obj.Addr) { + for i, v := range r { + if i > _FP_debug / 8 - 1 { + panic("too many registers to load") + } else { + self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + _FP_saves + _FP_locals + int64(i) * 8), v) + } + } +} + +func println_wrapper(i int, op1 int, op2 int){ + println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2]) +} + +func print(i int){ + println(i) +} + +func gc() { + if !debugSyncGC { + return + } + runtime.GC() + // debug.FreeOSMemory() +} + +func (self *_Assembler) dcall(fn obj.Addr) { + self.Emit("MOVQ", fn, _R10) // MOVQ ${fn}, R10 + self.Rjmp("CALL", _R10) // CALL R10 +} + +func (self *_Assembler) debug_gc() { + if !debugSyncGC { + return + } + self.dsave(_REG_debug...) + self.dcall(_F_gc) + self.dload(_REG_debug...) +} + +func (self *_Assembler) debug_instr(i int, v *_Instr) { + if debugSyncGC { + if i+1 == len(self.p) { + self.print_gc(i, v, &_Instr_End) + } else { + next := &(self.p[i+1]) + self.print_gc(i, v, next) + name := _OpNames[next.op()] + if strings.Contains(name, "save") { + return + } + } + // self.debug_gc() + } +} + +//go:noescape +//go:linkname checkptrBase runtime.checkptrBase +func checkptrBase(p unsafe.Pointer) uintptr + +//go:noescape +//go:linkname findObject runtime.findObject +func findObject(p, refBase, refOff uintptr) (base uintptr, s unsafe.Pointer, objIndex uintptr) + +var ( + _F_checkptr = jit.Func(checkptr) + _F_printptr = jit.Func(printptr) +) + +var ( + _R10 = jit.Reg("R10") +) +var _REG_debug = []obj.Addr { + jit.Reg("AX"), + jit.Reg("BX"), + jit.Reg("CX"), + jit.Reg("DX"), + jit.Reg("DI"), + jit.Reg("SI"), + jit.Reg("BP"), + jit.Reg("SP"), + jit.Reg("R8"), + jit.Reg("R9"), + jit.Reg("R10"), + jit.Reg("R11"), + jit.Reg("R12"), + jit.Reg("R13"), + jit.Reg("R14"), + jit.Reg("R15"), +} + +func checkptr(ptr uintptr) { + if ptr == 0 { + return + } + fmt.Printf("pointer: %x\n", ptr) + f := checkptrBase(unsafe.Pointer(uintptr(ptr))) + if f == 0 { + fmt.Printf("! unknown-based pointer: %x\n", ptr) + } else if f == 1 { + fmt.Printf("! stack pointer: %x\n", ptr) + } else { + fmt.Printf("base: %x\n", f) + } + findobj(ptr) +} + +func findobj(ptr uintptr) { + base, s, objIndex := findObject(ptr, 0, 0) + if s != nil && base == 0 { + fmt.Printf("! invalid pointer: %x\n", ptr) + } + fmt.Printf("objIndex: %d\n", objIndex) +} + +func (self *_Assembler) check_ptr(ptr obj.Addr, lea bool) { + if !debugCheckPtr { + return + } + + self.dsave(_REG_debug...) + if lea { + self.Emit("LEAQ", ptr, _R10) + } else { + self.Emit("MOVQ", ptr, _R10) + } + self.Emit("MOVQ", _R10, jit.Ptr(_SP, 0)) + self.dcall(_F_checkptr) + self.dload(_REG_debug...) +} + +func printptr(i int, ptr uintptr) { + fmt.Printf("[%d] ptr: %x\n", i, ptr) +} + +func (self *_Assembler) print_ptr(i int, ptr obj.Addr, lea bool) { + self.dsave(_REG_debug...) + if lea { + self.Emit("LEAQ", ptr, _R10) + } else { + self.Emit("MOVQ", ptr, _R10) + } + + self.Emit("MOVQ", jit.Imm(int64(i)), _AX) + self.Emit("MOVQ", _R10, _BX) + self.dcall(_F_printptr) + self.dload(_REG_debug...) +} \ No newline at end of file diff --git a/vendor/github.com/bytedance/sonic/encoder/encoder.go b/vendor/github.com/bytedance/sonic/encoder/encoder.go new file mode 100644 index 0000000..7a13301 --- /dev/null +++ b/vendor/github.com/bytedance/sonic/encoder/encoder.go @@ -0,0 +1,311 @@ +/* + * Copyright 2021 ByteDance Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encoder + +import ( + `bytes` + `encoding/json` + `reflect` + `runtime` + + `github.com/bytedance/sonic/internal/native` + `github.com/bytedance/sonic/internal/native/types` + `github.com/bytedance/sonic/internal/rt` + `github.com/bytedance/sonic/utf8` + `github.com/bytedance/sonic/option` +) + +// Options is a set of encoding options. +type Options uint64 + +const ( + bitSortMapKeys = iota + bitEscapeHTML + bitCompactMarshaler + bitNoQuoteTextMarshaler + bitNoNullSliceOrMap + bitValidateString + + // used for recursive compile + bitPointerValue = 63 +) + +const ( + // SortMapKeys indicates that the keys of a map needs to be sorted + // before serializing into JSON. + // WARNING: This hurts performance A LOT, USE WITH CARE. + SortMapKeys Options = 1 << bitSortMapKeys + + // EscapeHTML indicates encoder to escape all HTML characters + // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape). + // WARNING: This hurts performance A LOT, USE WITH CARE. + EscapeHTML Options = 1 << bitEscapeHTML + + // CompactMarshaler indicates that the output JSON from json.Marshaler + // is always compact and needs no validation + CompactMarshaler Options = 1 << bitCompactMarshaler + + // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler + // is always escaped string and needs no quoting + NoQuoteTextMarshaler Options = 1 << bitNoQuoteTextMarshaler + + // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}', + // instead of 'null' + NoNullSliceOrMap Options = 1 << bitNoNullSliceOrMap + + // ValidateString indicates that encoder should validate the input string + // before encoding it into JSON. + ValidateString Options = 1 << bitValidateString + + // CompatibleWithStd is used to be compatible with std encoder. + CompatibleWithStd Options = SortMapKeys | EscapeHTML | CompactMarshaler +) + +// Encoder represents a specific set of encoder configurations. +type Encoder struct { + Opts Options + prefix string + indent string +} + +// Encode returns the JSON encoding of v. +func (self *Encoder) Encode(v interface{}) ([]byte, error) { + if self.indent != "" || self.prefix != "" { + return EncodeIndented(v, self.prefix, self.indent, self.Opts) + } + return Encode(v, self.Opts) +} + +// SortKeys enables the SortMapKeys option. +func (self *Encoder) SortKeys() *Encoder { + self.Opts |= SortMapKeys + return self +} + +// SetEscapeHTML specifies if option EscapeHTML opens +func (self *Encoder) SetEscapeHTML(f bool) { + if f { + self.Opts |= EscapeHTML + } else { + self.Opts &= ^EscapeHTML + } +} + +// SetValidateString specifies if option ValidateString opens +func (self *Encoder) SetValidateString(f bool) { + if f { + self.Opts |= ValidateString + } else { + self.Opts &= ^ValidateString + } +} + +// SetCompactMarshaler specifies if option CompactMarshaler opens +func (self *Encoder) SetCompactMarshaler(f bool) { + if f { + self.Opts |= CompactMarshaler + } else { + self.Opts &= ^CompactMarshaler + } +} + +// SetNoQuoteTextMarshaler specifies if option NoQuoteTextMarshaler opens +func (self *Encoder) SetNoQuoteTextMarshaler(f bool) { + if f { + self.Opts |= NoQuoteTextMarshaler + } else { + self.Opts &= ^NoQuoteTextMarshaler + } +} + +// SetIndent instructs the encoder to format each subsequent encoded +// value as if indented by the package-level function EncodeIndent(). +// Calling SetIndent("", "") disables indentation. +func (enc *Encoder) SetIndent(prefix, indent string) { + enc.prefix = prefix + enc.indent = indent +} + +// Quote returns the JSON-quoted version of s. +func Quote(s string) string { + var n int + var p []byte + + /* check for empty string */ + if s == "" { + return `""` + } + + /* allocate space for result */ + n = len(s) + 2 + p = make([]byte, 0, n) + + /* call the encoder */ + _ = encodeString(&p, s) + return rt.Mem2Str(p) +} + +// Encode returns the JSON encoding of val, encoded with opts. +func Encode(val interface{}, opts Options) ([]byte, error) { + buf := newBytes() + err := EncodeInto(&buf, val, opts) + + /* check for errors */ + if err != nil { + freeBytes(buf) + return nil, err + } + + if opts & EscapeHTML != 0 || opts & ValidateString != 0 { + return buf, nil + } + + /* make a copy of the result */ + ret := make([]byte, len(buf)) + copy(ret, buf) + + freeBytes(buf) + /* return the buffer into pool */ + return ret, nil +} + +// EncodeInto is like Encode but uses a user-supplied buffer instead of allocating +// a new one. +func EncodeInto(buf *[]byte, val interface{}, opts Options) error { + stk := newStack() + efv := rt.UnpackEface(val) + err := encodeTypedPointer(buf, efv.Type, &efv.Value, stk, uint64(opts)) + + /* return the stack into pool */ + if err != nil { + resetStack(stk) + } + freeStack(stk) + + /* EscapeHTML needs to allocate a new buffer*/ + if opts & EscapeHTML != 0 { + dest := HTMLEscape(nil, *buf) + freeBytes(*buf) // free origin used buffer + *buf = dest + } + + if opts & ValidateString != 0 && !utf8.Validate(*buf) { + dest := utf8.CorrectWith(nil, *buf, `\ufffd`) + freeBytes(*buf) // free origin used buffer + *buf = dest + } + + /* avoid GC ahead */ + runtime.KeepAlive(buf) + runtime.KeepAlive(efv) + return err +} + +var typeByte = rt.UnpackType(reflect.TypeOf(byte(0))) + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML - - -

Welcome, Ginner!

- - -`)) - -func main() { - r := gin.Default() - r.Static("/assets", "./assets") - r.SetHTMLTemplate(html) - - r.GET("/", func(c *gin.Context) { - if pusher := c.Writer.Pusher(); pusher != nil { - // use pusher.Push() to do server push - if err := pusher.Push("/assets/app.js", nil); err != nil { - log.Printf("Failed to push: %v", err) - } - } - c.HTML(http.StatusOK, "https", gin.H{ - "status": "success", - }) - }) - - // Listen and Server in https://127.0.0.1:8080 - r.RunTLS(":8080", "./testdata/server.pem", "./testdata/server.key") -} -``` - -### Define format for the log of routes - -The default log of routes is: -``` -[GIN-debug] POST /foo --> main.main.func1 (3 handlers) -[GIN-debug] GET /bar --> main.main.func2 (3 handlers) -[GIN-debug] GET /status --> main.main.func3 (3 handlers) -``` - -If you want to log this information in given format (e.g. JSON, key values or something else), then you can define this format with `gin.DebugPrintRouteFunc`. -In the example below, we log all routes with standard log package but you can use another log tools that suits of your needs. -```go -import ( - "log" - "net/http" - - "github.com/gin-gonic/gin" -) - -func main() { - r := gin.Default() - gin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) { - log.Printf("endpoint %v %v %v %v\n", httpMethod, absolutePath, handlerName, nuHandlers) - } - - r.POST("/foo", func(c *gin.Context) { - c.JSON(http.StatusOK, "foo") - }) - - r.GET("/bar", func(c *gin.Context) { - c.JSON(http.StatusOK, "bar") - }) - - r.GET("/status", func(c *gin.Context) { - c.JSON(http.StatusOK, "ok") - }) - - // Listen and Server in http://0.0.0.0:8080 - r.Run() -} -``` - -### Set and get a cookie - -```go -import ( - "fmt" - - "github.com/gin-gonic/gin" -) - -func main() { - - router := gin.Default() - - router.GET("/cookie", func(c *gin.Context) { - - cookie, err := c.Cookie("gin_cookie") - - if err != nil { - cookie = "NotSet" - c.SetCookie("gin_cookie", "test", 3600, "/", "localhost", false, true) - } - - fmt.Printf("Cookie value: %s \n", cookie) - }) - - router.Run() -} -``` - -## Don't trust all proxies - -Gin lets you specify which headers to hold the real client IP (if any), -as well as specifying which proxies (or direct clients) you trust to -specify one of these headers. - -Use function `SetTrustedProxies()` on your `gin.Engine` to specify network addresses -or network CIDRs from where clients which their request headers related to client -IP can be trusted. They can be IPv4 addresses, IPv4 CIDRs, IPv6 addresses or -IPv6 CIDRs. - -**Attention:** Gin trust all proxies by default if you don't specify a trusted -proxy using the function above, **this is NOT safe**. At the same time, if you don't -use any proxy, you can disable this feature by using `Engine.SetTrustedProxies(nil)`, -then `Context.ClientIP()` will return the remote address directly to avoid some -unnecessary computation. - -```go -import ( - "fmt" - - "github.com/gin-gonic/gin" -) - -func main() { - - router := gin.Default() - router.SetTrustedProxies([]string{"192.168.1.2"}) - - router.GET("/", func(c *gin.Context) { - // If the client is 192.168.1.2, use the X-Forwarded-For - // header to deduce the original client IP from the trust- - // worthy parts of that header. - // Otherwise, simply return the direct client IP - fmt.Printf("ClientIP: %s\n", c.ClientIP()) - }) - router.Run() -} -``` - -**Notice:** If you are using a CDN service, you can set the `Engine.TrustedPlatform` -to skip TrustedProxies check, it has a higher priority than TrustedProxies. -Look at the example below: -```go -import ( - "fmt" - - "github.com/gin-gonic/gin" -) - -func main() { - - router := gin.Default() - // Use predefined header gin.PlatformXXX - router.TrustedPlatform = gin.PlatformGoogleAppEngine - // Or set your own trusted request header for another trusted proxy service - // Don't set it to any suspect request header, it's unsafe - router.TrustedPlatform = "X-CDN-IP" - - router.GET("/", func(c *gin.Context) { - // If you set TrustedPlatform, ClientIP() will resolve the - // corresponding header and return IP directly - fmt.Printf("ClientIP: %s\n", c.ClientIP()) - }) - router.Run() -} -``` - -## Testing - -The `net/http/httptest` package is preferable way for HTTP testing. - -```go -package main - -import ( - "net/http" - - "github.com/gin-gonic/gin" -) - -func setupRouter() *gin.Engine { - r := gin.Default() - r.GET("/ping", func(c *gin.Context) { - c.String(http.StatusOK, "pong") - }) - return r -} - -func main() { - r := setupRouter() - r.Run(":8080") -} -``` - -Test for code example above: - -```go -package main - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPingRoute(t *testing.T) { - router := setupRouter() - - w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/ping", nil) - router.ServeHTTP(w, req) - - assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "pong", w.Body.String()) -} -``` - -## Users - -Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework. - -* [gorush](https://github.com/appleboy/gorush): A push notification server written in Go. -* [fnproject](https://github.com/fnproject/fn): The container native, cloud agnostic serverless platform. -* [photoprism](https://github.com/photoprism/photoprism): Personal photo management powered by Go and Google TensorFlow. -* [krakend](https://github.com/devopsfaith/krakend): Ultra performant API Gateway with middlewares. -* [picfit](https://github.com/thoas/picfit): An image resizing server written in Go. -* [brigade](https://github.com/brigadecore/brigade): Event-based Scripting for Kubernetes. -* [dkron](https://github.com/distribworks/dkron): Distributed, fault tolerant job scheduling system. +Gin is the work of hundreds of contributors. We appreciate your help! +Please see [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow. \ No newline at end of file diff --git a/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/vendor/github.com/gin-gonic/gin/binding/default_validator.go index c03afe7..e216b85 100644 --- a/vendor/github.com/gin-gonic/gin/binding/default_validator.go +++ b/vendor/github.com/gin-gonic/gin/binding/default_validator.go @@ -43,7 +43,7 @@ func (err SliceValidationError) Error() string { } } -var _ StructValidator = &defaultValidator{} +var _ StructValidator = (*defaultValidator)(nil) // ValidateStruct receives any kind of type, but only performed struct or pointer to struct type. func (v *defaultValidator) ValidateStruct(obj any) error { diff --git a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go index 98cebfe..540bbbb 100644 --- a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go +++ b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go @@ -19,7 +19,7 @@ import ( var ( errUnknownType = errors.New("unknown type") - // ErrConvertMapStringSlice can not covert to map[string][]string + // ErrConvertMapStringSlice can not convert to map[string][]string ErrConvertMapStringSlice = errors.New("can not convert to map slices of strings") // ErrConvertToMapString can not convert to map[string]string diff --git a/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/vendor/github.com/gin-gonic/gin/binding/protobuf.go index 44f2fdb..57721fc 100644 --- a/vendor/github.com/gin-gonic/gin/binding/protobuf.go +++ b/vendor/github.com/gin-gonic/gin/binding/protobuf.go @@ -6,7 +6,7 @@ package binding import ( "errors" - "io/ioutil" + "io" "net/http" "google.golang.org/protobuf/proto" @@ -19,7 +19,7 @@ func (protobufBinding) Name() string { } func (b protobufBinding) Bind(req *http.Request, obj any) error { - buf, err := ioutil.ReadAll(req.Body) + buf, err := io.ReadAll(req.Body) if err != nil { return err } diff --git a/vendor/github.com/gin-gonic/gin/binding/toml.go b/vendor/github.com/gin-gonic/gin/binding/toml.go index a6b8a90..a66b93a 100644 --- a/vendor/github.com/gin-gonic/gin/binding/toml.go +++ b/vendor/github.com/gin-gonic/gin/binding/toml.go @@ -18,14 +18,6 @@ func (tomlBinding) Name() string { return "toml" } -func decodeToml(r io.Reader, obj any) error { - decoder := toml.NewDecoder(r) - if err := decoder.Decode(obj); err != nil { - return err - } - return decoder.Decode(obj) -} - func (tomlBinding) Bind(req *http.Request, obj any) error { return decodeToml(req.Body, obj) } @@ -33,3 +25,11 @@ func (tomlBinding) Bind(req *http.Request, obj any) error { func (tomlBinding) BindBody(body []byte, obj any) error { return decodeToml(bytes.NewReader(body), obj) } + +func decodeToml(r io.Reader, obj any) error { + decoder := toml.NewDecoder(r) + if err := decoder.Decode(obj); err != nil { + return err + } + return decoder.Decode(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/yaml.go b/vendor/github.com/gin-gonic/gin/binding/yaml.go index b0d36a3..2535f8c 100644 --- a/vendor/github.com/gin-gonic/gin/binding/yaml.go +++ b/vendor/github.com/gin-gonic/gin/binding/yaml.go @@ -9,7 +9,7 @@ import ( "io" "net/http" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) type yamlBinding struct{} diff --git a/vendor/github.com/gin-gonic/gin/context.go b/vendor/github.com/gin-gonic/gin/context.go index b1ad95e..556f8ac 100644 --- a/vendor/github.com/gin-gonic/gin/context.go +++ b/vendor/github.com/gin-gonic/gin/context.go @@ -7,7 +7,6 @@ package gin import ( "errors" "io" - "io/ioutil" "log" "math" "mime/multipart" @@ -15,6 +14,7 @@ import ( "net/http" "net/url" "os" + "path/filepath" "strings" "sync" "time" @@ -153,9 +153,10 @@ func (c *Context) Handler() HandlerFunc { // FullPath returns a matched route full path. For not found routes // returns an empty string. -// router.GET("/user/:id", func(c *gin.Context) { -// c.FullPath() == "/user/:id" // true -// }) +// +// router.GET("/user/:id", func(c *gin.Context) { +// c.FullPath() == "/user/:id" // true +// }) func (c *Context) FullPath() string { return c.fullPath } @@ -247,20 +248,20 @@ func (c *Context) Error(err error) *Error { // It also lazy initializes c.Keys if it was not used previously. func (c *Context) Set(key string, value any) { c.mu.Lock() + defer c.mu.Unlock() if c.Keys == nil { c.Keys = make(map[string]any) } c.Keys[key] = value - c.mu.Unlock() } // Get returns the value for the given key, ie: (value, true). // If the value does not exist it returns (nil, false) func (c *Context) Get(key string) (value any, exists bool) { c.mu.RLock() + defer c.mu.RUnlock() value, exists = c.Keys[key] - c.mu.RUnlock() return } @@ -382,10 +383,13 @@ func (c *Context) GetStringMapStringSlice(key string) (smss map[string][]string) // Param returns the value of the URL param. // It is a shortcut for c.Params.ByName(key) -// router.GET("/user/:id", func(c *gin.Context) { -// // a GET request to /user/john -// id := c.Param("id") // id == "john" -// }) +// +// router.GET("/user/:id", func(c *gin.Context) { +// // a GET request to /user/john +// id := c.Param("id") // id == "/john" +// // a GET request to /user/john/ +// id := c.Param("id") // id == "/john/" +// }) func (c *Context) Param(key string) string { return c.Params.ByName(key) } @@ -402,11 +406,12 @@ func (c *Context) AddParam(key, value string) { // Query returns the keyed url query value if it exists, // otherwise it returns an empty string `("")`. // It is shortcut for `c.Request.URL.Query().Get(key)` -// GET /path?id=1234&name=Manu&value= -// c.Query("id") == "1234" -// c.Query("name") == "Manu" -// c.Query("value") == "" -// c.Query("wtf") == "" +// +// GET /path?id=1234&name=Manu&value= +// c.Query("id") == "1234" +// c.Query("name") == "Manu" +// c.Query("value") == "" +// c.Query("wtf") == "" func (c *Context) Query(key string) (value string) { value, _ = c.GetQuery(key) return @@ -415,10 +420,11 @@ func (c *Context) Query(key string) (value string) { // DefaultQuery returns the keyed url query value if it exists, // otherwise it returns the specified defaultValue string. // See: Query() and GetQuery() for further information. -// GET /?name=Manu&lastname= -// c.DefaultQuery("name", "unknown") == "Manu" -// c.DefaultQuery("id", "none") == "none" -// c.DefaultQuery("lastname", "none") == "" +// +// GET /?name=Manu&lastname= +// c.DefaultQuery("name", "unknown") == "Manu" +// c.DefaultQuery("id", "none") == "none" +// c.DefaultQuery("lastname", "none") == "" func (c *Context) DefaultQuery(key, defaultValue string) string { if value, ok := c.GetQuery(key); ok { return value @@ -430,10 +436,11 @@ func (c *Context) DefaultQuery(key, defaultValue string) string { // if it exists `(value, true)` (even when the value is an empty string), // otherwise it returns `("", false)`. // It is shortcut for `c.Request.URL.Query().Get(key)` -// GET /?name=Manu&lastname= -// ("Manu", true) == c.GetQuery("name") -// ("", false) == c.GetQuery("id") -// ("", true) == c.GetQuery("lastname") +// +// GET /?name=Manu&lastname= +// ("Manu", true) == c.GetQuery("name") +// ("", false) == c.GetQuery("id") +// ("", true) == c.GetQuery("lastname") func (c *Context) GetQuery(key string) (string, bool) { if values, ok := c.GetQueryArray(key); ok { return values[0], ok @@ -500,9 +507,10 @@ func (c *Context) DefaultPostForm(key, defaultValue string) string { // form or multipart form when it exists `(value, true)` (even when the value is an empty string), // otherwise it returns ("", false). // For example, during a PATCH request to update the user's email: -// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" -// email= --> ("", true) := GetPostForm("email") // set email to "" -// --> ("", false) := GetPostForm("email") // do nothing with email +// +// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" +// email= --> ("", true) := GetPostForm("email") // set email to "" +// --> ("", false) := GetPostForm("email") // do nothing with email func (c *Context) GetPostForm(key string) (string, bool) { if values, ok := c.GetPostFormArray(key); ok { return values[0], ok @@ -551,7 +559,7 @@ func (c *Context) GetPostFormMap(key string) (map[string]string, bool) { return c.get(c.formCache, key) } -// get is an internal method and returns a map which satisfy conditions. +// get is an internal method and returns a map which satisfies conditions. func (c *Context) get(m map[string][]string, key string) (map[string]string, bool) { dicts := make(map[string]string) exist := false @@ -595,6 +603,10 @@ func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string) error } defer src.Close() + if err = os.MkdirAll(filepath.Dir(dst), 0750); err != nil { + return err + } + out, err := os.Create(dst) if err != nil { return err @@ -607,8 +619,10 @@ func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string) error // Bind checks the Method and Content-Type to select a binding engine automatically, // Depending on the "Content-Type" header different bindings are used, for example: -// "application/json" --> JSON binding -// "application/xml" --> XML binding +// +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// // It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. // It decodes the json payload into the struct specified as a pointer. // It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid. @@ -651,7 +665,7 @@ func (c *Context) BindHeader(obj any) error { // It will abort the request with HTTP 400 if any error occurs. func (c *Context) BindUri(obj any) error { if err := c.ShouldBindUri(obj); err != nil { - c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) //nolint: errcheck return err } return nil @@ -662,7 +676,7 @@ func (c *Context) BindUri(obj any) error { // See the binding package. func (c *Context) MustBindWith(obj any, b binding.Binding) error { if err := c.ShouldBindWith(obj, b); err != nil { - c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) //nolint: errcheck return err } return nil @@ -670,8 +684,10 @@ func (c *Context) MustBindWith(obj any, b binding.Binding) error { // ShouldBind checks the Method and Content-Type to select a binding engine automatically, // Depending on the "Content-Type" header different bindings are used, for example: -// "application/json" --> JSON binding -// "application/xml" --> XML binding +// +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// // It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. // It decodes the json payload into the struct specified as a pointer. // Like c.Bind() but this method does not set the response status code to 400 or abort if input is not valid. @@ -738,7 +754,7 @@ func (c *Context) ShouldBindBodyWith(obj any, bb binding.BindingBody) (err error } } if body == nil { - body, err = ioutil.ReadAll(c.Request.Body) + body, err = io.ReadAll(c.Request.Body) if err != nil { return err } @@ -748,7 +764,7 @@ func (c *Context) ShouldBindBodyWith(obj any, bb binding.BindingBody) (err error } // ClientIP implements one best effort algorithm to return the real client IP. -// It called c.RemoteIP() under the hood, to check if the remote IP is a trusted proxy or not. +// It calls c.RemoteIP() under the hood, to check if the remote IP is a trusted proxy or not. // If it is it will then try to parse the headers defined in Engine.RemoteIPHeaders (defaulting to [X-Forwarded-For, X-Real-Ip]). // If the headers are not syntactically valid OR the remote IP does not correspond to a trusted proxy, // the remote IP (coming from Request.RemoteAddr) is returned. @@ -857,7 +873,7 @@ func (c *Context) GetHeader(key string) string { // GetRawData returns stream data. func (c *Context) GetRawData() ([]byte, error) { - return ioutil.ReadAll(c.Request.Body) + return io.ReadAll(c.Request.Body) } // SetSameSite with cookie @@ -908,7 +924,9 @@ func (c *Context) Render(code int, r render.Render) { } if err := r.Render(c.Writer); err != nil { - panic(err) + // Pushing error to c.Errors + _ = c.Error(err) + c.Abort() } } @@ -1112,7 +1130,7 @@ func (c *Context) Negotiate(code int, config Negotiate) { c.TOML(code, data) default: - c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) // nolint: errcheck + c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) //nolint: errcheck } } @@ -1131,7 +1149,7 @@ func (c *Context) NegotiateFormat(offered ...string) string { // According to RFC 2616 and RFC 2396, non-ASCII characters are not allowed in headers, // therefore we can just iterate over the string without casting it into []rune i := 0 - for ; i < len(accepted); i++ { + for ; i < len(accepted) && i < len(offer); i++ { if accepted[i] == '*' || offer[i] == '*' { return offer } diff --git a/vendor/github.com/gin-gonic/gin/debug.go b/vendor/github.com/gin-gonic/gin/debug.go index 25fd7c8..cbcedbc 100644 --- a/vendor/github.com/gin-gonic/gin/debug.go +++ b/vendor/github.com/gin-gonic/gin/debug.go @@ -12,7 +12,7 @@ import ( "strings" ) -const ginSupportMinGoVer = 14 +const ginSupportMinGoVer = 16 // IsDebugging returns true if the framework is running in debug mode. // Use SetMode(gin.ReleaseMode) to disable debug mode. @@ -66,8 +66,8 @@ func getMinVer(v string) (uint64, error) { } func debugPrintWARNINGDefault() { - if v, e := getMinVer(runtime.Version()); e == nil && v <= ginSupportMinGoVer { - debugPrint(`[WARNING] Now Gin requires Go 1.14+. + if v, e := getMinVer(runtime.Version()); e == nil && v < ginSupportMinGoVer { + debugPrint(`[WARNING] Now Gin requires Go 1.16+. `) } diff --git a/vendor/github.com/gin-gonic/gin/errors.go b/vendor/github.com/gin-gonic/gin/errors.go index 2853ce8..06b53c2 100644 --- a/vendor/github.com/gin-gonic/gin/errors.go +++ b/vendor/github.com/gin-gonic/gin/errors.go @@ -39,7 +39,7 @@ type Error struct { type errorMsgs []*Error -var _ error = &Error{} +var _ error = (*Error)(nil) // SetType sets the error's type. func (msg *Error) SetType(flags ErrorType) *Error { @@ -124,10 +124,11 @@ func (a errorMsgs) Last() *Error { // Errors returns an array with all the error messages. // Example: -// c.Error(errors.New("first")) -// c.Error(errors.New("second")) -// c.Error(errors.New("third")) -// c.Errors.Errors() // == []string{"first", "second", "third"} +// +// c.Error(errors.New("first")) +// c.Error(errors.New("second")) +// c.Error(errors.New("third")) +// c.Errors.Errors() // == []string{"first", "second", "third"} func (a errorMsgs) Errors() []string { if len(a) == 0 { return nil diff --git a/vendor/github.com/gin-gonic/gin/gin.go b/vendor/github.com/gin-gonic/gin/gin.go index 5513569..f95e5dd 100644 --- a/vendor/github.com/gin-gonic/gin/gin.go +++ b/vendor/github.com/gin-gonic/gin/gin.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "path" + "regexp" "strings" "sync" @@ -40,6 +41,9 @@ var defaultTrustedCIDRs = []*net.IPNet{ }, } +var regSafePrefix = regexp.MustCompile("[^a-zA-Z0-9/-]+") +var regRemoveRepeatedChar = regexp.MustCompile("/{2,}") + // HandlerFunc defines the handler used by gin middleware as return value. type HandlerFunc func(*Context) @@ -166,7 +170,7 @@ type Engine struct { trustedCIDRs []*net.IPNet } -var _ IRouter = &Engine{} +var _ IRouter = (*Engine)(nil) // New returns a new blank Engine instance without any middleware attached. // By default, the configuration is: @@ -668,6 +672,9 @@ func redirectTrailingSlash(c *Context) { req := c.Request p := req.URL.Path if prefix := path.Clean(c.Request.Header.Get("X-Forwarded-Prefix")); prefix != "." { + prefix = regSafePrefix.ReplaceAllString(prefix, "") + prefix = regRemoveRepeatedChar.ReplaceAllString(prefix, "/") + p = prefix + "/" + req.URL.Path } req.URL.Path = p + "/" diff --git a/vendor/github.com/gin-gonic/gin/internal/json/json.go b/vendor/github.com/gin-gonic/gin/internal/json/json.go index a26d7db..c5f3efc 100644 --- a/vendor/github.com/gin-gonic/gin/internal/json/json.go +++ b/vendor/github.com/gin-gonic/gin/internal/json/json.go @@ -2,8 +2,10 @@ // Use of this source code is governed by a MIT style // license that can be found in the LICENSE file. -//go:build !jsoniter && !go_json -// +build !jsoniter,!go_json +//go:build !jsoniter && !go_json && !(sonic && avx && (linux || windows || darwin) && amd64) +// +build !jsoniter +// +build !go_json +// +build !sonic !avx !linux,!windows,!darwin !amd64 package json diff --git a/vendor/github.com/gin-gonic/gin/internal/json/sonic.go b/vendor/github.com/gin-gonic/gin/internal/json/sonic.go new file mode 100644 index 0000000..5a9ca4b --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/internal/json/sonic.go @@ -0,0 +1,27 @@ +// Copyright 2022 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build sonic && avx && (linux || windows || darwin) && amd64 +// +build sonic +// +build avx +// +build linux windows darwin +// +build amd64 + +package json + +import "github.com/bytedance/sonic" + +var ( + json = sonic.ConfigStd + // Marshal is exported by gin/json package. + Marshal = json.Marshal + // Unmarshal is exported by gin/json package. + Unmarshal = json.Unmarshal + // MarshalIndent is exported by gin/json package. + MarshalIndent = json.MarshalIndent + // NewDecoder is exported by gin/json package. + NewDecoder = json.NewDecoder + // NewEncoder is exported by gin/json package. + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/gin-gonic/gin/mode.go b/vendor/github.com/gin-gonic/gin/mode.go index 545fdaa..fd26d90 100644 --- a/vendor/github.com/gin-gonic/gin/mode.go +++ b/vendor/github.com/gin-gonic/gin/mode.go @@ -35,8 +35,9 @@ const ( // Note that both Logger and Recovery provides custom ways to configure their // output io.Writer. // To support coloring in Windows use: -// import "github.com/mattn/go-colorable" -// gin.DefaultWriter = colorable.NewColorableStdout() +// +// import "github.com/mattn/go-colorable" +// gin.DefaultWriter = colorable.NewColorableStdout() var DefaultWriter io.Writer = os.Stdout // DefaultErrorWriter is the default io.Writer used by Gin to debug errors diff --git a/vendor/github.com/gin-gonic/gin/path.go b/vendor/github.com/gin-gonic/gin/path.go index d42d6b9..82438c1 100644 --- a/vendor/github.com/gin-gonic/gin/path.go +++ b/vendor/github.com/gin-gonic/gin/path.go @@ -10,12 +10,12 @@ package gin // // The following rules are applied iteratively until no further processing can // be done: -// 1. Replace multiple slashes with a single slash. -// 2. Eliminate each . path name element (the current directory). -// 3. Eliminate each inner .. path name element (the parent directory) -// along with the non-.. element that precedes it. -// 4. Eliminate .. elements that begin a rooted path: -// that is, replace "/.." by "/" at the beginning of a path. +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. // // If the result of this process is an empty string, "/" is returned. func cleanPath(p string) string { diff --git a/vendor/github.com/gin-gonic/gin/recovery.go b/vendor/github.com/gin-gonic/gin/recovery.go index abb6451..2955c03 100644 --- a/vendor/github.com/gin-gonic/gin/recovery.go +++ b/vendor/github.com/gin-gonic/gin/recovery.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "net" "net/http" @@ -63,7 +62,9 @@ func CustomRecoveryWithWriter(out io.Writer, handle RecoveryFunc) HandlerFunc { if ne, ok := err.(*net.OpError); ok { var se *os.SyscallError if errors.As(ne, &se) { - if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { + seStr := strings.ToLower(se.Error()) + if strings.Contains(seStr, "broken pipe") || + strings.Contains(seStr, "connection reset by peer") { brokenPipe = true } } @@ -91,7 +92,7 @@ func CustomRecoveryWithWriter(out io.Writer, handle RecoveryFunc) HandlerFunc { } if brokenPipe { // If the connection is dead, we can't write a status to it. - c.Error(err.(error)) // nolint: errcheck + c.Error(err.(error)) //nolint: errcheck c.Abort() } else { handle(c, err) @@ -121,7 +122,7 @@ func stack(skip int) []byte { // Print this much at least. If we can't find the source, it won't show. fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc) if file != lastFile { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { continue } diff --git a/vendor/github.com/gin-gonic/gin/render/json.go b/vendor/github.com/gin-gonic/gin/render/json.go index af678e8..fc8dea4 100644 --- a/vendor/github.com/gin-gonic/gin/render/json.go +++ b/vendor/github.com/gin-gonic/gin/render/json.go @@ -53,11 +53,8 @@ var ( ) // Render (JSON) writes data with custom ContentType. -func (r JSON) Render(w http.ResponseWriter) (err error) { - if err = WriteJSON(w, r.Data); err != nil { - panic(err) - } - return +func (r JSON) Render(w http.ResponseWriter) error { + return WriteJSON(w, r.Data) } // WriteContentType (JSON) writes JSON ContentType. diff --git a/vendor/github.com/gin-gonic/gin/render/yaml.go b/vendor/github.com/gin-gonic/gin/render/yaml.go index 4f0ac01..fc927c1 100644 --- a/vendor/github.com/gin-gonic/gin/render/yaml.go +++ b/vendor/github.com/gin-gonic/gin/render/yaml.go @@ -7,7 +7,7 @@ package render import ( "net/http" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // YAML contains the given interface object. diff --git a/vendor/github.com/gin-gonic/gin/response_writer.go b/vendor/github.com/gin-gonic/gin/response_writer.go index 77c7ed8..753a0b0 100644 --- a/vendor/github.com/gin-gonic/gin/response_writer.go +++ b/vendor/github.com/gin-gonic/gin/response_writer.go @@ -49,7 +49,11 @@ type responseWriter struct { status int } -var _ ResponseWriter = &responseWriter{} +var _ ResponseWriter = (*responseWriter)(nil) + +func (w *responseWriter) Unwrap() http.ResponseWriter { + return w.ResponseWriter +} func (w *responseWriter) reset(writer http.ResponseWriter) { w.ResponseWriter = writer @@ -61,6 +65,7 @@ func (w *responseWriter) WriteHeader(code int) { if code > 0 && w.status != code { if w.Written() { debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code) + return } w.status = code } diff --git a/vendor/github.com/gin-gonic/gin/routergroup.go b/vendor/github.com/gin-gonic/gin/routergroup.go index 3c082d9..c833fe8 100644 --- a/vendor/github.com/gin-gonic/gin/routergroup.go +++ b/vendor/github.com/gin-gonic/gin/routergroup.go @@ -42,6 +42,7 @@ type IRoutes interface { PUT(string, ...HandlerFunc) IRoutes OPTIONS(string, ...HandlerFunc) IRoutes HEAD(string, ...HandlerFunc) IRoutes + Match([]string, string, ...HandlerFunc) IRoutes StaticFile(string, string) IRoutes StaticFileFS(string, string, http.FileSystem) IRoutes @@ -58,7 +59,7 @@ type RouterGroup struct { root bool } -var _ IRouter = &RouterGroup{} +var _ IRouter = (*RouterGroup)(nil) // Use adds middleware to the group, see example code in GitHub. func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes { @@ -106,37 +107,37 @@ func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...Ha return group.handle(httpMethod, relativePath, handlers) } -// POST is a shortcut for router.Handle("POST", path, handle). +// POST is a shortcut for router.Handle("POST", path, handlers). func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes { return group.handle(http.MethodPost, relativePath, handlers) } -// GET is a shortcut for router.Handle("GET", path, handle). +// GET is a shortcut for router.Handle("GET", path, handlers). func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes { return group.handle(http.MethodGet, relativePath, handlers) } -// DELETE is a shortcut for router.Handle("DELETE", path, handle). +// DELETE is a shortcut for router.Handle("DELETE", path, handlers). func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes { return group.handle(http.MethodDelete, relativePath, handlers) } -// PATCH is a shortcut for router.Handle("PATCH", path, handle). +// PATCH is a shortcut for router.Handle("PATCH", path, handlers). func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes { return group.handle(http.MethodPatch, relativePath, handlers) } -// PUT is a shortcut for router.Handle("PUT", path, handle). +// PUT is a shortcut for router.Handle("PUT", path, handlers). func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes { return group.handle(http.MethodPut, relativePath, handlers) } -// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle). +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handlers). func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes { return group.handle(http.MethodOptions, relativePath, handlers) } -// HEAD is a shortcut for router.Handle("HEAD", path, handle). +// HEAD is a shortcut for router.Handle("HEAD", path, handlers). func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes { return group.handle(http.MethodHead, relativePath, handlers) } @@ -151,6 +152,15 @@ func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRou return group.returnObj() } +// Match registers a route that matches the specified methods that you declared. +func (group *RouterGroup) Match(methods []string, relativePath string, handlers ...HandlerFunc) IRoutes { + for _, method := range methods { + group.handle(method, relativePath, handlers) + } + + return group.returnObj() +} + // StaticFile registers a single route in order to serve a single file of the local filesystem. // router.StaticFile("favicon.ico", "./resources/favicon.ico") func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes { @@ -161,7 +171,7 @@ func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes { // StaticFileFS works just like `StaticFile` but a custom `http.FileSystem` can be used instead.. // router.StaticFileFS("favicon.ico", "./resources/favicon.ico", Dir{".", false}) -// Gin by default user: gin.Dir() +// Gin by default uses: gin.Dir() func (group *RouterGroup) StaticFileFS(relativePath, filepath string, fs http.FileSystem) IRoutes { return group.staticFileHandler(relativePath, func(c *Context) { c.FileFromFS(filepath, fs) @@ -182,13 +192,14 @@ func (group *RouterGroup) staticFileHandler(relativePath string, handler Handler // of the Router's NotFound handler. // To use the operating system's file system implementation, // use : -// router.Static("/static", "/var/www") +// +// router.Static("/static", "/var/www") func (group *RouterGroup) Static(relativePath, root string) IRoutes { return group.StaticFS(relativePath, Dir(root, false)) } // StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead. -// Gin by default user: gin.Dir() +// Gin by default uses: gin.Dir() func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes { if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { panic("URL parameters can not be used when serving a static folder") diff --git a/vendor/github.com/gin-gonic/gin/tree.go b/vendor/github.com/gin-gonic/gin/tree.go index 0179aa0..dda8f4f 100644 --- a/vendor/github.com/gin-gonic/gin/tree.go +++ b/vendor/github.com/gin-gonic/gin/tree.go @@ -457,11 +457,11 @@ walk: // Outer loop for walking the tree if !n.wildChild { // If the path at the end of the loop is not equal to '/' and the current node has no child nodes - // the current node needs to roll back to last vaild skippedNode + // the current node needs to roll back to last valid skippedNode if path != "/" { - for l := len(*skippedNodes); l > 0; { - skippedNode := (*skippedNodes)[l-1] - *skippedNodes = (*skippedNodes)[:l-1] + for length := len(*skippedNodes); length > 0; length-- { + skippedNode := (*skippedNodes)[length-1] + *skippedNodes = (*skippedNodes)[:length-1] if strings.HasSuffix(skippedNode.path, path) { path = skippedNode.path n = skippedNode.node @@ -574,11 +574,11 @@ walk: // Outer loop for walking the tree if path == prefix { // If the current path does not equal '/' and the node does not have a registered handle and the most recently matched node has a child node - // the current node needs to roll back to last vaild skippedNode + // the current node needs to roll back to last valid skippedNode if n.handlers == nil && path != "/" { - for l := len(*skippedNodes); l > 0; { - skippedNode := (*skippedNodes)[l-1] - *skippedNodes = (*skippedNodes)[:l-1] + for length := len(*skippedNodes); length > 0; length-- { + skippedNode := (*skippedNodes)[length-1] + *skippedNodes = (*skippedNodes)[:length-1] if strings.HasSuffix(skippedNode.path, path) { path = skippedNode.path n = skippedNode.node @@ -633,9 +633,9 @@ walk: // Outer loop for walking the tree // roll back to last valid skippedNode if !value.tsr && path != "/" { - for l := len(*skippedNodes); l > 0; { - skippedNode := (*skippedNodes)[l-1] - *skippedNodes = (*skippedNodes)[:l-1] + for length := len(*skippedNodes); length > 0; length-- { + skippedNode := (*skippedNodes)[length-1] + *skippedNodes = (*skippedNodes)[:length-1] if strings.HasSuffix(skippedNode.path, path) { path = skippedNode.path n = skippedNode.node diff --git a/vendor/github.com/gin-gonic/gin/version.go b/vendor/github.com/gin-gonic/gin/version.go index 37e27f2..390da4f 100644 --- a/vendor/github.com/gin-gonic/gin/version.go +++ b/vendor/github.com/gin-gonic/gin/version.go @@ -5,4 +5,4 @@ package gin // Version is the current gin framework's version. -const Version = "v1.8.2" +const Version = "v1.9.0" diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md index 5b0694f..7b6be2c 100644 --- a/vendor/github.com/go-playground/locales/README.md +++ b/vendor/github.com/go-playground/locales/README.md @@ -1,10 +1,8 @@ ## locales -![Project status](https://img.shields.io/badge/version-0.14.0-green.svg) +![Project status](https://img.shields.io/badge/version-0.14.1-green.svg) [![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) [![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) ![License](https://img.shields.io/dub/l/vibe-d.svg) -[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md index 46dec6d..d9b6654 100644 --- a/vendor/github.com/go-playground/universal-translator/README.md +++ b/vendor/github.com/go-playground/universal-translator/README.md @@ -1,11 +1,9 @@ ## universal-translator -![Project status](https://img.shields.io/badge/version-0.18.0-green.svg) -[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator) +![Project status](https://img.shields.io/badge/version-0.18.1-green.svg) [![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) [![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) ![License](https://img.shields.io/dub/l/vibe-d.svg) -[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go index 1216f19..87a1b46 100644 --- a/vendor/github.com/go-playground/universal-translator/import_export.go +++ b/vendor/github.com/go-playground/universal-translator/import_export.go @@ -3,7 +3,6 @@ package ut import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" @@ -41,7 +40,6 @@ const ( func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { _, err := os.Stat(dirname) - fmt.Println(dirname, err, os.IsNotExist(err)) if err != nil { if !os.IsNotExist(err) { @@ -138,7 +136,7 @@ func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) return err } - err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + err = os.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) if err != nil { return err } @@ -200,7 +198,7 @@ func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilenam // NOTE: generally used when assets have been embedded into the binary and are already in memory. func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { - b, err := ioutil.ReadAll(reader) + b, err := io.ReadAll(reader) if err != nil { return err } diff --git a/vendor/github.com/go-playground/validator/v10/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore index 6e43fac..2410a91 100644 --- a/vendor/github.com/go-playground/validator/v10/.gitignore +++ b/vendor/github.com/go-playground/validator/v10/.gitignore @@ -28,3 +28,4 @@ _testmain.go *.txt cover.html README.html +.idea diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index 9d0a79e..5f8878d 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.11.1-green.svg) +![Project status](https://img.shields.io/badge/version-10.11.2-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) diff --git a/vendor/github.com/goccy/go-json/Makefile b/vendor/github.com/goccy/go-json/Makefile index 363563a..5bbfc4c 100644 --- a/vendor/github.com/goccy/go-json/Makefile +++ b/vendor/github.com/goccy/go-json/Makefile @@ -22,7 +22,7 @@ cover-html: cover .PHONY: lint lint: golangci-lint - golangci-lint run + $(BIN_DIR)/golangci-lint run golangci-lint: | $(BIN_DIR) @{ \ @@ -30,7 +30,7 @@ golangci-lint: | $(BIN_DIR) GOLANGCI_LINT_TMP_DIR=$$(mktemp -d); \ cd $$GOLANGCI_LINT_TMP_DIR; \ go mod init tmp; \ - GOBIN=$(BIN_DIR) go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.36.0; \ + GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.48.0; \ rm -rf $$GOLANGCI_LINT_TMP_DIR; \ } diff --git a/vendor/github.com/goccy/go-json/README.md b/vendor/github.com/goccy/go-json/README.md index 5686237..7bacc54 100644 --- a/vendor/github.com/goccy/go-json/README.md +++ b/vendor/github.com/goccy/go-json/README.md @@ -184,7 +184,7 @@ func Marshal(v interface{}) ([]byte, error) { `json.Marshal` and `json.Unmarshal` receive `interface{}` value and they perform type determination dynamically to process. In normal case, you need to use the `reflect` library to determine the type dynamically, but since `reflect.Type` is defined as `interface`, when you call the method of `reflect.Type`, The reflect's argument is escaped. -Therefore, the arguments for `Marshal` and `Unmarshal` are always escape to the heap. +Therefore, the arguments for `Marshal` and `Unmarshal` are always escaped to the heap. However, `go-json` can use the feature of `reflect.Type` while avoiding escaping. `reflect.Type` is defined as `interface`, but in reality `reflect.Type` is implemented only by the structure `rtype` defined in the `reflect` package. diff --git a/vendor/github.com/goccy/go-json/decode.go b/vendor/github.com/goccy/go-json/decode.go index d99749d..74c6ac3 100644 --- a/vendor/github.com/goccy/go-json/decode.go +++ b/vendor/github.com/goccy/go-json/decode.go @@ -83,6 +83,37 @@ func unmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs return validateEndBuf(src, cursor) } +var ( + pathDecoder = decoder.NewPathDecoder() +) + +func extractFromPath(path *Path, data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) { + if path.path.RootSelectorOnly { + return [][]byte{data}, nil + } + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + ctx.Option.Flags |= decoder.PathOption + ctx.Option.Path = path.path + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + paths, cursor, err := pathDecoder.DecodePath(ctx, 0, 0) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return nil, err + } + decoder.ReleaseRuntimeContext(ctx) + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return paths, nil +} + func unmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { src := make([]byte, len(data)+1) // append nul byte to the end copy(src, data) diff --git a/vendor/github.com/goccy/go-json/error.go b/vendor/github.com/goccy/go-json/error.go index 94c1339..5b2dcee 100644 --- a/vendor/github.com/goccy/go-json/error.go +++ b/vendor/github.com/goccy/go-json/error.go @@ -37,3 +37,5 @@ type UnmarshalTypeError = errors.UnmarshalTypeError type UnsupportedTypeError = errors.UnsupportedTypeError type UnsupportedValueError = errors.UnsupportedValueError + +type PathError = errors.PathError diff --git a/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go index 030cb7a..b6876cf 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go @@ -35,3 +35,7 @@ func (d *anonymousFieldDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p = *(*unsafe.Pointer)(p) return d.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+d.offset)) } + +func (d *anonymousFieldDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return d.dec.DecodePath(ctx, cursor, depth) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/array.go b/vendor/github.com/goccy/go-json/internal/decoder/array.go index 21f1fd5..8ef91cf 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/array.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/array.go @@ -1,6 +1,7 @@ package decoder import ( + "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" @@ -167,3 +168,7 @@ func (d *arrayDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe } } } + +func (d *arrayDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: array decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/assign.go b/vendor/github.com/goccy/go-json/internal/decoder/assign.go new file mode 100644 index 0000000..c53e6ad --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/assign.go @@ -0,0 +1,438 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" +) + +var ( + nilValue = reflect.ValueOf(nil) +) + +func AssignValue(src, dst reflect.Value) error { + if dst.Type().Kind() != reflect.Ptr { + return fmt.Errorf("invalid dst type. required pointer type: %T", dst.Type()) + } + casted, err := castValue(dst.Elem().Type(), src) + if err != nil { + return err + } + dst.Elem().Set(casted) + return nil +} + +func castValue(t reflect.Type, v reflect.Value) (reflect.Value, error) { + switch t.Kind() { + case reflect.Int: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int(vv.Int())), nil + case reflect.Int8: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int8(vv.Int())), nil + case reflect.Int16: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int16(vv.Int())), nil + case reflect.Int32: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int32(vv.Int())), nil + case reflect.Int64: + return castInt(v) + case reflect.Uint: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint(vv.Uint())), nil + case reflect.Uint8: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint8(vv.Uint())), nil + case reflect.Uint16: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint16(vv.Uint())), nil + case reflect.Uint32: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint32(vv.Uint())), nil + case reflect.Uint64: + return castUint(v) + case reflect.Uintptr: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uintptr(vv.Uint())), nil + case reflect.String: + return castString(v) + case reflect.Bool: + return castBool(v) + case reflect.Float32: + vv, err := castFloat(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(float32(vv.Float())), nil + case reflect.Float64: + return castFloat(v) + case reflect.Array: + return castArray(t, v) + case reflect.Slice: + return castSlice(t, v) + case reflect.Map: + return castMap(t, v) + case reflect.Struct: + return castStruct(t, v) + } + return v, nil +} + +func castInt(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(int64(v.Uint())), nil + case reflect.String: + i64, err := strconv.ParseInt(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(i64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(int64(1)), nil + } + return reflect.ValueOf(int64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(int64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty slice") + case reflect.Interface: + return castInt(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to int64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to int64 from struct") + case reflect.Ptr: + return castInt(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to int64 from %s", v.Type().Kind()) +} + +func castUint(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(uint64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v, nil + case reflect.String: + u64, err := strconv.ParseUint(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(u64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(uint64(1)), nil + } + return reflect.ValueOf(uint64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(uint64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty slice") + case reflect.Interface: + return castUint(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to uint64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to uint64 from struct") + case reflect.Ptr: + return castUint(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from %s", v.Type().Kind()) +} + +func castString(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(fmt.Sprint(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(fmt.Sprint(v.Uint())), nil + case reflect.String: + return v, nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf("true"), nil + } + return reflect.ValueOf("false"), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(fmt.Sprint(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castString(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castString(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to string from %s", v.Type().Kind()) +} + +func castBool(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Int() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch v.Uint() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Uint()) + case reflect.String: + b, err := strconv.ParseBool(v.String()) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(b), nil + case reflect.Bool: + return v, nil + case reflect.Float32, reflect.Float64: + switch v.Float() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %f", v.Float()) + case reflect.Array: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castBool(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castBool(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to bool from %s", v.Type().Kind()) +} + +func castFloat(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(float64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(float64(v.Uint())), nil + case reflect.String: + f64, err := strconv.ParseFloat(v.String(), 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(f64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(float64(1)), nil + } + return reflect.ValueOf(float64(0)), nil + case reflect.Float32, reflect.Float64: + return v, nil + case reflect.Array: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty slice") + case reflect.Interface: + return castFloat(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to float64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to float64 from struct") + case reflect.Ptr: + return castFloat(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to float64 from %s", v.Type().Kind()) +} + +func castArray(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castArray(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to array from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + if t.Len() != v.Len() { + return nilValue, fmt.Errorf("failed to cast [%d]array from slice of %d length", t.Len(), v.Len()) + } + ret := reflect.New(t).Elem() + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castSlice(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castSlice(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to slice from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + ret := reflect.MakeSlice(t, v.Len(), v.Len()) + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castMap(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.MakeMap(t) + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key, err := castValue(t.Key(), iter.Key()) + if err != nil { + return nilValue, err + } + value, err := castValue(t.Elem(), iter.Value()) + if err != nil { + return nilValue, err + } + ret.SetMapIndex(key, value) + } + return ret, nil + case reflect.Interface: + return castMap(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castMap(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to map from empty slice") + } + return nilValue, fmt.Errorf("failed to cast to map from %s", v.Type().Kind()) +} + +func castStruct(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.New(t).Elem() + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key := iter.Key() + k, err := castString(key) + if err != nil { + return nilValue, err + } + fieldName := k.String() + field, ok := t.FieldByName(fieldName) + if ok { + value, err := castValue(field.Type, iter.Value()) + if err != nil { + return nilValue, err + } + ret.FieldByName(fieldName).Set(value) + } + } + return ret, nil + case reflect.Struct: + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + ret.FieldByName(name).Set(v.FieldByName(name)) + } + return ret, nil + case reflect.Interface: + return castStruct(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castStruct(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to struct from empty slice") + default: + return nilValue, fmt.Errorf("failed to cast to struct from %s", v.Type().Kind()) + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bool.go b/vendor/github.com/goccy/go-json/internal/decoder/bool.go index 455042a..ba6cf5b 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/bool.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/bool.go @@ -1,6 +1,7 @@ package decoder import ( + "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" @@ -76,3 +77,7 @@ func (d *boolDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe. } return 0, errors.ErrUnexpectedEndOfJSON("bool", cursor) } + +func (d *boolDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: bool decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bytes.go b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go index 92c7dcf..939bf43 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/bytes.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go @@ -2,6 +2,7 @@ package decoder import ( "encoding/base64" + "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" @@ -78,6 +79,10 @@ func (d *bytesDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe return cursor, nil } +func (d *bytesDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: []byte decoder does not support decode path") +} + func (d *bytesDecoder) decodeStreamBinary(s *Stream, depth int64, p unsafe.Pointer) ([]byte, error) { c := s.skipWhiteSpace() if c == '[' { diff --git a/vendor/github.com/goccy/go-json/internal/decoder/float.go b/vendor/github.com/goccy/go-json/internal/decoder/float.go index dfb7168..9b2eb8b 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/float.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/float.go @@ -156,3 +156,15 @@ func (d *floatDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe d.op(p, f64) return cursor, nil } + +func (d *floatDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/func.go b/vendor/github.com/goccy/go-json/internal/decoder/func.go index ee35637..4cc12ca 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/func.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/func.go @@ -2,6 +2,7 @@ package decoder import ( "bytes" + "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" @@ -139,3 +140,7 @@ func (d *funcDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe. } return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) } + +func (d *funcDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: func decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/int.go b/vendor/github.com/goccy/go-json/internal/decoder/int.go index 509b753..1a7f081 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/int.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/int.go @@ -240,3 +240,7 @@ func (d *intDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.P d.op(p, i64) return cursor, nil } + +func (d *intDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: int decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/interface.go b/vendor/github.com/goccy/go-json/internal/decoder/interface.go index 4dbb4be..45c69ab 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/interface.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/interface.go @@ -94,6 +94,7 @@ func (d *interfaceDecoder) numDecoder(s *Stream) Decoder { var ( emptyInterfaceType = runtime.Type2RType(reflect.TypeOf((*interface{})(nil)).Elem()) + EmptyInterfaceType = emptyInterfaceType interfaceMapType = runtime.Type2RType( reflect.TypeOf((*map[string]interface{})(nil)).Elem(), ) @@ -456,3 +457,72 @@ func (d *interfaceDecoder) decodeEmptyInterface(ctx *RuntimeContext, cursor, dep } return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) } + +func NewPathDecoder() Decoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: "", + fieldName: "", + floatDecoder: newFloatDecoder("", "", func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder("", "", func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder("", ""), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + "", "", + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + "", "", + ) + return ifaceDecoder +} + +var ( + truebytes = []byte("true") + falsebytes = []byte("false") +) + +func (d *interfaceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + return d.mapDecoder.DecodePath(ctx, cursor, depth) + case '[': + return d.sliceDecoder.DecodePath(ctx, cursor, depth) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.DecodePath(ctx, cursor, depth) + case '"': + return d.stringDecoder.DecodePath(ctx, cursor, depth) + case 't': + if err := validateTrue(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{truebytes}, cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 5 + return [][]byte{falsebytes}, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + } + return nil, cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/invalid.go b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go index 1ef50a7..4c9721b 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/invalid.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go @@ -43,3 +43,13 @@ func (d *invalidDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsa Field: d.fieldName, } } + +func (d *invalidDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/map.go b/vendor/github.com/goccy/go-json/internal/decoder/map.go index cb55ef0..7a6eea3 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/map.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/map.go @@ -185,3 +185,96 @@ func (d *mapDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.P cursor++ } } + +func (d *mapDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return nil, 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '{': + default: + return nil, 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return nil, cursor, nil + } + keyDecoder, ok := d.keyDecoder.(*stringDecoder) + if !ok { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: reflect.TypeOf(""), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } + } + ret := [][]byte{} + for { + key, keyCursor, err := keyDecoder.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + child, found, err := ctx.Option.Path.Field(string(key)) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return ret, cursor, nil + } + if buf[cursor] != ',' { + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/number.go b/vendor/github.com/goccy/go-json/internal/decoder/number.go index bf63773..10e5435 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/number.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/number.go @@ -51,6 +51,17 @@ func (d *numberDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsaf return cursor, nil } +func (d *numberDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + func (d *numberDecoder) decodeStreamByte(s *Stream) ([]byte, error) { start := s.cursor for { diff --git a/vendor/github.com/goccy/go-json/internal/decoder/option.go b/vendor/github.com/goccy/go-json/internal/decoder/option.go index e41f876..502f772 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/option.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/option.go @@ -7,9 +7,11 @@ type OptionFlags uint8 const ( FirstWinOption OptionFlags = 1 << iota ContextOption + PathOption ) type Option struct { Flags OptionFlags Context context.Context + Path *Path } diff --git a/vendor/github.com/goccy/go-json/internal/decoder/path.go b/vendor/github.com/goccy/go-json/internal/decoder/path.go new file mode 100644 index 0000000..a15ff69 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/path.go @@ -0,0 +1,670 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type PathString string + +func (s PathString) Build() (*Path, error) { + builder := new(PathBuilder) + return builder.Build([]rune(s)) +} + +type PathBuilder struct { + root PathNode + node PathNode + singleQuotePathSelector bool + doubleQuotePathSelector bool +} + +func (b *PathBuilder) Build(buf []rune) (*Path, error) { + node, err := b.build(buf) + if err != nil { + return nil, err + } + return &Path{ + node: node, + RootSelectorOnly: node == nil, + SingleQuotePathSelector: b.singleQuotePathSelector, + DoubleQuotePathSelector: b.doubleQuotePathSelector, + }, nil +} + +func (b *PathBuilder) build(buf []rune) (PathNode, error) { + if len(buf) == 0 { + return nil, errors.ErrEmptyPath() + } + if buf[0] != '$' { + return nil, errors.ErrInvalidPath("JSON Path must start with a $ character") + } + if len(buf) == 1 { + return nil, nil + } + buf = buf[1:] + offset, err := b.buildNext(buf) + if err != nil { + return nil, err + } + if len(buf) > offset { + return nil, errors.ErrInvalidPath("remain invalid path %q", buf[offset:]) + } + return b.root, nil +} + +func (b *PathBuilder) buildNextCharIfExists(buf []rune, cursor int) (int, error) { + if len(buf) > cursor { + offset, err := b.buildNext(buf[cursor:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + return cursor, nil +} + +func (b *PathBuilder) buildNext(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + offset, err := b.buildSelector(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + case '[': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + offset, err := b.buildIndex(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + default: + return 0, errors.ErrInvalidPath("expect dot or left bracket character. but found %c character", buf[0]) + } +} + +func (b *PathBuilder) buildSelector(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with double dot character") + } + offset, err := b.buildPathRecursive(buf[1:]) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '"': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with double quote character") + } + offset, err := b.buildQuoteSelector(buf[cursor+1:], DoubleQuotePathSelector) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addSelectorNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildQuoteSelector(buf []rune, sel QuotePathSelector) (int, error) { + switch buf[0] { + case '[', ']', '$', '.', '*', '\'', '"': + return 0, errors.ErrInvalidPath("found invalid path character %c after quote", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '\'': + if sel != SingleQuotePathSelector { + return 0, errors.ErrInvalidPath("found double quote character in field selector with single quote context") + } + if len(buf) <= cursor+1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character in field selector context") + } + if buf[cursor+1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket for field selector with single quote but found %c", buf[cursor+1]) + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.singleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+2) + case '"': + if sel != DoubleQuotePathSelector { + return 0, errors.ErrInvalidPath("found single quote character in field selector with double quote context") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.doubleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find quote character in selector quote path context") +} + +func (b *PathBuilder) buildPathRecursive(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after double dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addRecursiveNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildIndex(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$': + return 0, errors.ErrInvalidPath("found invalid path character %c after left bracket", buf[0]) + case '\'': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character") + } + offset, err := b.buildQuoteSelector(buf[1:], SingleQuotePathSelector) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '*': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with star character") + } + if buf[1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket character for index all path but found %c character", buf[1]) + } + b.addIndexAllNode() + offset := len("*]") + if len(buf) > 2 { + buildOffset, err := b.buildNext(buf[2:]) + if err != nil { + return 0, err + } + return offset + buildOffset, nil + } + return offset, nil + } + + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case ']': + index, err := strconv.ParseInt(string(buf[:cursor]), 10, 64) + if err != nil { + return 0, errors.ErrInvalidPath("%q is unexpected index path", buf[:cursor]) + } + b.addIndexNode(int(index)) + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find right bracket character in index path context") +} + +func (b *PathBuilder) addIndexAllNode() { + node := newPathIndexAllNode() + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addRecursiveNode(selector string) { + node := newPathRecursiveNode(selector) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addSelectorNode(name string) { + node := newPathSelectorNode(name) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addIndexNode(idx int) { + node := newPathIndexNode(idx) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +type QuotePathSelector int + +const ( + SingleQuotePathSelector QuotePathSelector = 1 + DoubleQuotePathSelector QuotePathSelector = 2 +) + +type Path struct { + node PathNode + RootSelectorOnly bool + SingleQuotePathSelector bool + DoubleQuotePathSelector bool +} + +func (p *Path) Field(sel string) (PathNode, bool, error) { + if p.node == nil { + return nil, false, nil + } + return p.node.Field(sel) +} + +func (p *Path) Get(src, dst reflect.Value) error { + if p.node == nil { + return nil + } + return p.node.Get(src, dst) +} + +func (p *Path) String() string { + if p.node == nil { + return "$" + } + return p.node.String() +} + +type PathNode interface { + fmt.Stringer + Index(idx int) (PathNode, bool, error) + Field(fieldName string) (PathNode, bool, error) + Get(src, dst reflect.Value) error + chain(PathNode) PathNode + target() bool + single() bool +} + +type BasePathNode struct { + child PathNode +} + +func (n *BasePathNode) chain(node PathNode) PathNode { + n.child = node + return node +} + +func (n *BasePathNode) target() bool { + return n.child == nil +} + +func (n *BasePathNode) single() bool { + return true +} + +type PathSelectorNode struct { + *BasePathNode + selector string +} + +func newPathSelectorNode(selector string) *PathSelectorNode { + return &PathSelectorNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathSelectorNode) Index(idx int) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathSelectorNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathSelectorNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(iter.Value(), dst) + } + return AssignValue(iter.Value(), dst) + } + } + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(src.Field(i), dst) + } + return AssignValue(src.Field(i), dst) + } + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + case reflect.Float64, reflect.String, reflect.Bool: + return AssignValue(src, dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathSelectorNode) String() string { + s := fmt.Sprintf(".%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexNode struct { + *BasePathNode + selector int +} + +func newPathIndexNode(selector int) *PathIndexNode { + return &PathIndexNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathIndexNode) Index(idx int) (PathNode, bool, error) { + if n.selector == idx { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathIndexNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + if src.Len() > n.selector { + if n.child != nil { + return n.child.Get(src.Index(n.selector), dst) + } + return AssignValue(src.Index(n.selector), dst) + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get [%d] value from %s", n.selector, src.Type()) +} + +func (n *PathIndexNode) String() string { + s := fmt.Sprintf("[%d]", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexAllNode struct { + *BasePathNode +} + +func newPathIndexAllNode() *PathIndexAllNode { + return &PathIndexAllNode{ + BasePathNode: &BasePathNode{}, + } +} + +func (n *PathIndexAllNode) Index(idx int) (PathNode, bool, error) { + return n.child, true, nil +} + +func (n *PathIndexAllNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexAllNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + var arr []interface{} + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + if n.child != nil { + if err := n.child.Get(src.Index(i), rv); err != nil { + return err + } + } else { + if err := AssignValue(src.Index(i), rv); err != nil { + return err + } + } + arr = append(arr, v) + } + if err := AssignValue(reflect.ValueOf(arr), dst); err != nil { + return err + } + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get all value from %s", src.Type()) +} + +func (n *PathIndexAllNode) String() string { + s := "[*]" + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathRecursiveNode struct { + *BasePathNode + selector string +} + +func newPathRecursiveNode(selector string) *PathRecursiveNode { + node := newPathSelectorNode(selector) + return &PathRecursiveNode{ + BasePathNode: &BasePathNode{ + child: node, + }, + selector: selector, + } +} + +func (n *PathRecursiveNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathRecursiveNode) Index(_ int) (PathNode, bool, error) { + return n, true, nil +} + +func valueToSliceValue(v interface{}) []interface{} { + rv := reflect.ValueOf(v) + ret := []interface{}{} + if rv.Type().Kind() == reflect.Slice || rv.Type().Kind() == reflect.Array { + for i := 0; i < rv.Len(); i++ { + ret = append(ret, rv.Index(i).Interface()) + } + return ret + } + return []interface{}{v} +} + +func (n *PathRecursiveNode) Get(src, dst reflect.Value) error { + if n.child == nil { + return fmt.Errorf("failed to get by recursive path ..%s", n.selector) + } + var arr []interface{} + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(iter.Value(), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(iter.Value(), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(src.Field(i), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Field(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Array, reflect.Slice: + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Index(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathRecursiveNode) String() string { + s := fmt.Sprintf("..%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go index 2c83b9c..de12e10 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go @@ -1,6 +1,7 @@ package decoder import ( + "fmt" "unsafe" "github.com/goccy/go-json/internal/runtime" @@ -34,6 +35,10 @@ func (d *ptrDecoder) contentDecoder() Decoder { //go:linkname unsafe_New reflect.unsafe_New func unsafe_New(*runtime.Type) unsafe.Pointer +func UnsafeNew(t *runtime.Type) unsafe.Pointer { + return unsafe_New(t) +} + func (d *ptrDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { if s.skipWhiteSpace() == nul { s.read() @@ -85,3 +90,7 @@ func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.P cursor = c return cursor, nil } + +func (d *ptrDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: ptr decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/slice.go b/vendor/github.com/goccy/go-json/internal/decoder/slice.go index 85b6e11..30a23e4 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/slice.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/slice.go @@ -299,3 +299,82 @@ func (d *sliceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe } } } + +func (d *sliceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + ret := [][]byte{} + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + cursor++ + return ret, cursor, nil + } + idx := 0 + for { + child, found, err := ctx.Option.Path.node.Index(idx) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + cursor++ + return ret, cursor, nil + case ',': + idx++ + default: + return nil, 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errNumber(cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/string.go b/vendor/github.com/goccy/go-json/internal/decoder/string.go index d07ad71..32602c9 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/string.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/string.go @@ -60,6 +60,17 @@ func (d *stringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsaf return cursor, nil } +func (d *stringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + var ( hexToInt = [256]int{ '0': 0, diff --git a/vendor/github.com/goccy/go-json/internal/decoder/struct.go b/vendor/github.com/goccy/go-json/internal/decoder/struct.go index 2c64680..6d32654 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/struct.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/struct.go @@ -817,3 +817,7 @@ func (d *structDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsaf cursor++ } } + +func (d *structDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: struct decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/type.go b/vendor/github.com/goccy/go-json/internal/decoder/type.go index 70e9907..beaf3ab 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/type.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/type.go @@ -10,6 +10,7 @@ import ( type Decoder interface { Decode(*RuntimeContext, int64, int64, unsafe.Pointer) (int64, error) + DecodePath(*RuntimeContext, int64, int64) ([][]byte, int64, error) DecodeStream(*Stream, int64, unsafe.Pointer) error } diff --git a/vendor/github.com/goccy/go-json/internal/decoder/uint.go b/vendor/github.com/goccy/go-json/internal/decoder/uint.go index a62c514..4131731 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/uint.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/uint.go @@ -188,3 +188,7 @@ func (d *uintDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe. d.op(p, u64) return cursor, nil } + +func (d *uintDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: uint decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go index e9b25c6..4cd6dbd 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go @@ -3,6 +3,7 @@ package decoder import ( "context" "encoding/json" + "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" @@ -97,3 +98,7 @@ func (d *unmarshalJSONDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, } return end, nil } + +func (d *unmarshalJSONDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal json decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go index 1ef2877..6d37993 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go @@ -3,6 +3,7 @@ package decoder import ( "bytes" "encoding" + "fmt" "unicode" "unicode/utf16" "unicode/utf8" @@ -142,6 +143,10 @@ func (d *unmarshalTextDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, return end, nil } +func (d *unmarshalTextDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal text decoder does not support decode path") +} + func unquoteBytes(s []byte) (t []byte, ok bool) { length := len(s) if length < 2 || s[0] != '"' || s[length-1] != '"' { diff --git a/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go index 66227ae..0c4e2e6 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go @@ -1,6 +1,7 @@ package decoder import ( + "fmt" "reflect" "unsafe" @@ -66,3 +67,7 @@ func (d *wrappedStringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, ctx.Buf = oldBuf return c, nil } + +func (d *wrappedStringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: wrapped string decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index bf5e0f9..3b3ff3f 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -506,8 +506,6 @@ func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) { func (c *Compiler) mapKeyCode(typ *runtime.Type) (Code, error) { switch { - case c.implementsMarshalJSON(typ): - return c.marshalJSONCode(typ) case c.implementsMarshalText(typ): return c.marshalTextCode(typ) } diff --git a/vendor/github.com/goccy/go-json/internal/errors/error.go b/vendor/github.com/goccy/go-json/internal/errors/error.go index d58e39f..9207d0f 100644 --- a/vendor/github.com/goccy/go-json/internal/errors/error.go +++ b/vendor/github.com/goccy/go-json/internal/errors/error.go @@ -162,3 +162,22 @@ func ErrInvalidBeginningOfValue(c byte, cursor int64) *SyntaxError { Offset: cursor, } } + +type PathError struct { + msg string +} + +func (e *PathError) Error() string { + return fmt.Sprintf("json: invalid path format: %s", e.msg) +} + +func ErrInvalidPath(msg string, args ...interface{}) *PathError { + if len(args) != 0 { + return &PathError{msg: fmt.Sprintf(msg, args...)} + } + return &PathError{msg: msg} +} + +func ErrEmptyPath() *PathError { + return &PathError{msg: "path is empty"} +} diff --git a/vendor/github.com/goccy/go-json/path.go b/vendor/github.com/goccy/go-json/path.go new file mode 100644 index 0000000..38abce7 --- /dev/null +++ b/vendor/github.com/goccy/go-json/path.go @@ -0,0 +1,84 @@ +package json + +import ( + "reflect" + + "github.com/goccy/go-json/internal/decoder" +) + +// CreatePath creates JSON Path. +// +// JSON Path rule +// $ : root object or element. The JSON Path format must start with this operator, which refers to the outermost level of the JSON-formatted string. +// . : child operator. You can identify child values using dot-notation. +// .. : recursive descent. +// [] : subscript operator. If the JSON object is an array, you can use brackets to specify the array index. +// [*] : all objects/elements for array. +// +// Reserved words must be properly escaped when included in Path. +// +// Escape Rule +// single quote style escape: e.g.) `$['a.b'].c` +// double quote style escape: e.g.) `$."a.b".c` +func CreatePath(p string) (*Path, error) { + path, err := decoder.PathString(p).Build() + if err != nil { + return nil, err + } + return &Path{path: path}, nil +} + +// Path represents JSON Path. +type Path struct { + path *decoder.Path +} + +// RootSelectorOnly whether only the root selector ($) is used. +func (p *Path) RootSelectorOnly() bool { + return p.path.RootSelectorOnly +} + +// UsedSingleQuotePathSelector whether single quote-based escaping was done when building the JSON Path. +func (p *Path) UsedSingleQuotePathSelector() bool { + return p.path.SingleQuotePathSelector +} + +// UsedSingleQuotePathSelector whether double quote-based escaping was done when building the JSON Path. +func (p *Path) UsedDoubleQuotePathSelector() bool { + return p.path.DoubleQuotePathSelector +} + +// Extract extracts a specific JSON string. +func (p *Path) Extract(data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) { + return extractFromPath(p, data, optFuncs...) +} + +// PathString returns original JSON Path string. +func (p *Path) PathString() string { + return p.path.String() +} + +// Unmarshal extract and decode the value of the part corresponding to JSON Path from the input data. +func (p *Path) Unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + contents, err := extractFromPath(p, data, optFuncs...) + if err != nil { + return err + } + results := make([]interface{}, 0, len(contents)) + for _, content := range contents { + var result interface{} + if err := Unmarshal(content, &result); err != nil { + return err + } + results = append(results, result) + } + if err := decoder.AssignValue(reflect.ValueOf(results), reflect.ValueOf(v)); err != nil { + return err + } + return nil +} + +// Get extract and substitute the value of the part corresponding to JSON Path from the input value. +func (p *Path) Get(src, dst interface{}) error { + return p.path.Get(reflect.ValueOf(src), reflect.ValueOf(dst)) +} diff --git a/vendor/github.com/klauspost/cpuid/v2/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml new file mode 100644 index 0000000..944cc00 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml @@ -0,0 +1,74 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com + +builds: + - + id: "cpuid" + binary: cpuid + main: ./cmd/cpuid/main.go + env: + - CGO_ENABLED=0 + flags: + - -ldflags=-s -w + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm64 + goarm: + - 7 + +archives: + - + id: cpuid + name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - LICENSE +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/cpuid + maintainer: Klaus Post + description: CPUID Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt new file mode 100644 index 0000000..2ef4714 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt @@ -0,0 +1,35 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/v2/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE new file mode 100644 index 0000000..5cec7ee --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md new file mode 100644 index 0000000..465f4b7 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -0,0 +1,137 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) +[![Build Status][3]][4] + +[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master +[4]: https://travis-ci.org/klauspost/cpuid + +## installing + +`go get -u github.com/klauspost/cpuid/v2` using modules. + +Drop `v2` for others. + +## example + +```Go +package main + +import ( + "fmt" + "strings" + + . "github.com/klauspost/cpuid/v2" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID) + fmt.Println("Features:", fmt.Sprintf(strings.Join(CPU.FeatureSet(), ","))) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", CPU.Cache.L1D, "bytes") + fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes") + fmt.Println("Frequency", CPU.Hz, "hz") + + // Test if we have these specific features: + if CPU.Supports(SSE, SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: AMD Ryzen 9 3950X 16-Core Processor +PhysicalCores: 16 +ThreadsPerCore: 2 +LogicalCores: 32 +Family 23 Model: 113 Vendor ID: AMD +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3 +Cacheline bytes: 64 +L1 Data Cache: 32768 bytes +L1 Instruction Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes +Frequency 0 hz +We have Streaming SIMD 2 Extensions +``` + +# usage + +The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. +A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. + +Note that for some cpu/os combinations some features will not be detected. +`amd64` has rather good support and should work reliably on all platforms. + +Note that hypervisors may not pass through all CPU features. + +## arm64 feature detection + +Not all operating systems provide ARM features directly +and there is no safe way to do so for the rest. + +Currently `arm64/linux` and `arm64/freebsd` should be quite reliable. +`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected. + +A `DetectARM()` can be used if you are able to control your deployment, +it will detect CPU features, but may crash if the OS doesn't intercept the calls. +A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below. + +Note that currently only features are detected on ARM, +no additional information is currently available. + +## flags + +It is possible to add flags that affects cpu detection. + +For this the `Flags()` command is provided. + +This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called. + +This means that any detection used in `init()` functions will not contain these flags. + +Example: + +```Go +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/klauspost/cpuid/v2" +) + +func main() { + cpuid.Flags() + flag.Parse() + cpuid.Detect() + + // Test if we have these specific features: + if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go new file mode 100644 index 0000000..1d88736 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -0,0 +1,1070 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) as well as arm64 is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import ( + "flag" + "fmt" + "math" + "os" + "runtime" + "strings" +) + +// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf +// and Processor Programming Reference (PPR) + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + VendorUnknown Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM + Bhyve + Hygon + SiS + RDC + + Ampere + ARM + Broadcom + Cavium + DEC + Fujitsu + Infineon + Motorola + NVIDIA + AMCC + Qualcomm + Marvell + + lastVendor +) + +//go:generate stringer -type=FeatureID,Vendor + +// FeatureID is the ID of a specific cpu feature. +type FeatureID int + +const ( + // Keep index -1 as unknown + UNKNOWN = -1 + + // Add features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXTILE // Tile architecture + AVX // AVX functions + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one. + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CPBOOST // Core Performance Boost + CX16 // CMPXCHG16B Instruction + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + GFNI // Galois Field New Instructions + HLE // Hardware Lock Elision + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCOMMIT // MCOMMIT instruction supported + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + NX // NX (No-Execute) bit + POPCNT // POPCNT instruction + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SERIALIZE // Serialize Instruction Execution + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SHA // Intel SHA Extensions + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + SUCCOR // Software uncorrectable error containment and recovery capability. + TBM // AMD Trailing Bit Manipulation + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + XOP // Bulldozer XOP functions + + // ARM features: + AESARM // AES instructions + ARMCPUID // Some CPU ID registers readable at user-level + ASIMD // Advanced SIMD + ASIMDDP // SIMD Dot Product + ASIMDHP // Advanced SIMD half-precision floating point + ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) + ATOMICS // Large System Extensions (LSE) + CRC32 // CRC32/CRC32C instructions + DCPOP // Data cache clean to Point of Persistence (DC CVAP) + EVTSTRM // Generic timer + FCMA // Floatin point complex number addition and multiplication + FP // Single-precision and double-precision floating point + FPHP // Half-precision floating point + GPA // Generic Pointer Authentication + JSCVT // Javascript-style double->int convert (FJCVTZS) + LRCPC // Weaker release consistency (LDAPR, etc) + PMULL // Polynomial Multiply instructions (PMULL/PMULL2) + SHA1 // SHA-1 instructions (SHA1C, etc) + SHA2 // SHA-2 instructions (SHA256H, etc) + SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) + SHA512 // SHA512 instructions + SM3 // SM3 instructions + SM4 // SM4 instructions + SVE // Scalable Vector Extension + + // Keep it last. It automatically defines the size of []flagSet + lastID + + firstID FeatureID = UNKNOWN + 1 +) + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected + } + SGX SGXSupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) +var darwinHasAVX512 = func() bool { return false } + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data. +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + // Set defaults + CPU.ThreadsPerCore = 1 + CPU.Cache.L1I = -1 + CPU.Cache.L1D = -1 + CPU.Cache.L2 = -1 + CPU.Cache.L3 = -1 + safe := true + if detectArmFlag != nil { + safe = !*detectArmFlag + } + addInfo(&CPU, safe) + if displayFeats != nil && *displayFeats { + fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ",")) + // Exit with non-zero so tests will print value. + os.Exit(1) + } + if disableFlag != nil { + s := strings.Split(*disableFlag, ",") + for _, feat := range s { + feat := ParseFeature(strings.TrimSpace(feat)) + if feat != UNKNOWN { + CPU.featureSet.unset(feat) + } + } + } +} + +// DetectARM will detect ARM64 features. +// This is NOT done automatically since it can potentially crash +// if the OS does not handle the command. +// If in the future this can be done safely this function may not +// do anything. +func DetectARM() { + addInfo(&CPU, false) +} + +var detectArmFlag *bool +var displayFeats *bool +var disableFlag *string + +// Flags will enable flags. +// This must be called *before* flag.Parse AND +// Detect must be called after the flags have been parsed. +// Note that this means that any detection used in init() functions +// will not contain these flags. +func Flags() { + disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list") + displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits") + detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash") +} + +// Supports returns whether the CPU supports all of the requested features. +func (c CPUInfo) Supports(ids ...FeatureID) bool { + for _, id := range ids { + if !c.featureSet.inSet(id) { + return false + } + } + return true +} + +// Has allows for checking a single feature. +// Should be inlined by the compiler. +func (c CPUInfo) Has(id FeatureID) bool { + return c.featureSet.inSet(id) +} + +// Disable will disable one or several features. +func (c *CPUInfo) Disable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.unset(id) + } + return true +} + +// Enable will disable one or several features even if they were undetected. +// This is of course not recommended for obvious reasons. +func (c *CPUInfo) Enable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.set(id) + } + return true +} + +// IsVendor returns true if vendor is recognized as Intel +func (c CPUInfo) IsVendor(v Vendor) bool { + return c.VendorID == v +} + +func (c CPUInfo) FeatureSet() []string { + s := make([]string, 0) + for _, f := range c.featureSet.Strings() { + s = append(s, f) + } + return s +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.Supports(RDTSCP) { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.Supports(RDTSCP) { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// frequencies tries to compute the clock speed of the CPU. If leaf 15 is +// supported, use it, otherwise parse the brand string. Yes, really. +func (c *CPUInfo) frequencies() { + c.Hz, c.BoostFreq = 0, 0 + mfi := maxFunctionID() + if mfi >= 0x15 { + eax, ebx, ecx, _ := cpuid(0x15) + if eax != 0 && ebx != 0 && ecx != 0 { + c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) + } + } + if mfi >= 0x16 { + a, b, _, _ := cpuid(0x16) + // Base... + if a&0xffff > 0 { + c.Hz = int64(a&0xffff) * 1_000_000 + } + // Boost... + if b&0xffff > 0 { + c.BoostFreq = int64(b&0xffff) * 1_000_000 + } + } + if c.Hz > 0 { + return + } + + // computeHz determines the official rated speed of a CPU from its brand + // string. This insanity is *actually the official documented way to do + // this according to Intel*, prior to leaf 0x15 existing. The official + // documentation only shows this working for exactly `x.xx` or `xxxx` + // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other + // sizes. + model := c.BrandName + hz := strings.LastIndex(model, "Hz") + if hz < 3 { + return + } + var multiplier int64 + switch model[hz-1] { + case 'M': + multiplier = 1000 * 1000 + case 'G': + multiplier = 1000 * 1000 * 1000 + case 'T': + multiplier = 1000 * 1000 * 1000 * 1000 + } + if multiplier == 0 { + return + } + freq := int64(0) + divisor := int64(0) + decimalShift := int64(1) + var i int + for i = hz - 2; i >= 0 && model[i] != ' '; i-- { + if model[i] >= '0' && model[i] <= '9' { + freq += int64(model[i]-'0') * decimalShift + decimalShift *= 10 + } else if model[i] == '.' { + if divisor != 0 { + return + } + divisor = decimalShift + } else { + return + } + } + // we didn't find a space + if i < 0 { + return + } + if divisor != 0 { + c.Hz = (freq * multiplier) / divisor + return + } + c.Hz = freq * multiplier +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. +func (c CPUInfo) VM() bool { + return CPU.featureSet.inSet(HYPERVISOR) +} + +// flags contains detected cpu features and characteristics +type flags uint64 + +// log2(bits_in_uint64) +const flagBitsLog2 = 6 +const flagBits = 1 << flagBitsLog2 +const flagMask = flagBits - 1 + +// flagSet contains detected cpu features and characteristics in an array of flags +type flagSet [(lastID + flagMask) / flagBits]flags + +func (s flagSet) inSet(feat FeatureID) bool { + return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 +} + +func (s *flagSet) set(feat FeatureID) { + s[feat>>flagBitsLog2] |= 1 << (feat & flagMask) +} + +// setIf will set a feature if boolean is true. +func (s *flagSet) setIf(cond bool, features ...FeatureID) { + if cond { + for _, offset := range features { + s[offset>>flagBitsLog2] |= 1 << (offset & flagMask) + } + } +} + +func (s *flagSet) unset(offset FeatureID) { + bit := flags(1 << (offset & flagMask)) + s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit +} + +// or with another flagset. +func (s *flagSet) or(other flagSet) { + for i, v := range other[:] { + s[i] |= v + } +} + +// ParseFeature will parse the string and return the ID of the matching feature. +// Will return UNKNOWN if not found. +func ParseFeature(s string) FeatureID { + s = strings.ToUpper(s) + for i := firstID; i < lastID; i++ { + if i.String() == s { + return i + } + } + return UNKNOWN +} + +// Strings returns an array of the detected features for FlagsSet. +func (s flagSet) Strings() []string { + if len(s) == 0 { + return []string{""} + } + r := make([]string, 0) + for i := firstID; i < lastID; i++ { + if s.inSet(i) { + r = append(r, i.String()) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + vend, _ := vendorID() + + if mfi < 0x4 || (vend != Intel && vend != AMD) { + return 1 + } + + if mfi < 0xb { + if vend != Intel { + return 1 + } + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + if vend == AMD { + // Workaround for AMD returning 0, assume 2 if >= Zen 2 + // It will be more correct than not. + fam, _ := familyModel() + _, _, _, d := cpuid(1) + if (d&(1<<28)) != 0 && fam >= 23 { + return 2 + } + } + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + v, _ := vendorID() + switch v { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD, Hygon: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + v, _ := vendorID() + switch v { + case Intel: + return logicalCores() / threadsPerCore() + case AMD, Hygon: + lc := logicalCores() + tpc := threadsPerCore() + if lc > 0 && tpc > 0 { + return lc / tpc + } + + // The following is inaccurate on AMD EPYC 7742 64-Core Processor + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + if c&0xff > 0 { + return int(c&0xff) + 1 + } + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, + "bhyve bhyve ": Bhyve, + "HygonGenuine": Hygon, + "Vortex86 SoC": SiS, + "SiS SiS SiS ": SiS, + "RiseRiseRise": SiS, + "Genuine RDC": RDC, +} + +func vendorID() (Vendor, string) { + _, b, c, d := cpuid(0) + v := string(valAsString(b, d, c)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor, _ := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD, Hygon: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + + // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties + if maxExtendedFunction() < 0x8000001D { + return + } + for i := uint32(0); i < math.MaxUint32; i++ { + eax, ebx, ecx, _ := cpuidex(0x8000001D, i) + + level := (eax >> 5) & 7 + cacheNumSets := ecx + 1 + cacheLineSize := 1 + (ebx & 2047) + cachePhysPartitions := 1 + ((ebx >> 12) & 511) + cacheNumWays := 1 + ((ebx >> 22) & 511) + + typ := eax & 15 + size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays) + if typ == 0 { + return + } + + switch level { + case 1: + switch typ { + case 1: + // Data cache + c.Cache.L1D = size + case 2: + // Inst cache + c.Cache.L1I = size + default: + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + } + + return +} + +type SGXEPCSection struct { + BaseAddress uint64 + EPCSize uint64 +} + +type SGXSupport struct { + Available bool + LaunchControl bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 + EPCSections []SGXEPCSection +} + +func hasSGX(available, lc bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + rval.LaunchControl = lc + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + rval.EPCSections = make([]SGXEPCSection, 0) + + for subleaf := uint32(2); subleaf < 2+8; subleaf++ { + eax, ebx, ecx, edx := cpuidex(0x12, subleaf) + leafType := eax & 0xf + + if leafType == 0 { + // Invalid subleaf, stop iterating + break + } else if leafType == 1 { + // EPC Section subleaf + baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32) + size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32) + + section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size} + rval.EPCSections = append(rval.EPCSections, section) + } + } + + return +} + +func support() flagSet { + var fs flagSet + mfi := maxFunctionID() + vend, _ := vendorID() + if mfi < 0x1 { + return fs + } + family, model := familyModel() + + _, _, c, d := cpuid(1) + fs.setIf((d&(1<<15)) != 0, CMOV) + fs.setIf((d&(1<<23)) != 0, MMX) + fs.setIf((d&(1<<25)) != 0, MMXEXT) + fs.setIf((d&(1<<25)) != 0, SSE) + fs.setIf((d&(1<<26)) != 0, SSE2) + fs.setIf((c&1) != 0, SSE3) + fs.setIf((c&(1<<5)) != 0, VMX) + fs.setIf((c&0x00000200) != 0, SSSE3) + fs.setIf((c&0x00080000) != 0, SSE4) + fs.setIf((c&0x00100000) != 0, SSE42) + fs.setIf((c&(1<<25)) != 0, AESNI) + fs.setIf((c&(1<<1)) != 0, CLMUL) + fs.setIf(c&(1<<23) != 0, POPCNT) + fs.setIf(c&(1<<30) != 0, RDRAND) + + // This bit has been reserved by Intel & AMD for use by hypervisors, + // and indicates the presence of a hypervisor. + fs.setIf(c&(1<<31) != 0, HYPERVISOR) + fs.setIf(c&(1<<29) != 0, F16C) + fs.setIf(c&(1<<13) != 0, CX16) + + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits + const avxCheck = 1<<26 | 1<<27 | 1<<28 + if c&avxCheck == avxCheck { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + fs.set(AVX) + switch vend { + case Intel: + // Older than Haswell. + fs.setIf(family == 6 && model < 60, AVXSLOW) + case AMD: + // Older than Zen 2 + fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW) + } + } + } + // FMA3 can be used with SSE registers, so no OS support is strictly needed. + // fma3 and OSXSAVE needed. + const fma3Check = 1<<12 | 1<<27 + fs.setIf(c&fma3Check == fma3Check, FMA3) + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + eax1, _, _, _ := cpuidex(7, 1) + if fs.inSet(AVX) && (ebx&0x00000020) != 0 { + fs.set(AVX2) + } + // CPUID.(EAX=7, ECX=0).EBX + if (ebx & 0x00000008) != 0 { + fs.set(BMI1) + fs.setIf((ebx&0x00000100) != 0, BMI2) + } + fs.setIf(ebx&(1<<2) != 0, SGX) + fs.setIf(ebx&(1<<4) != 0, HLE) + fs.setIf(ebx&(1<<9) != 0, ERMS) + fs.setIf(ebx&(1<<11) != 0, RTM) + fs.setIf(ebx&(1<<14) != 0, MPX) + fs.setIf(ebx&(1<<18) != 0, RDSEED) + fs.setIf(ebx&(1<<19) != 0, ADX) + fs.setIf(ebx&(1<<29) != 0, SHA) + // CPUID.(EAX=7, ECX=0).ECX + fs.setIf(ecx&(1<<5) != 0, WAITPKG) + fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) + fs.setIf(ecx&(1<<27) != 0, MOVDIRI) + fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) + fs.setIf(ecx&(1<<29) != 0, ENQCMD) + fs.setIf(ecx&(1<<30) != 0, SGXLC) + // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) + fs.setIf(edx&(1<<14) != 0, SERIALIZE) + fs.setIf(edx&(1<<16) != 0, TSXLDTRK) + fs.setIf(edx&(1<<26) != 0, IBPB) + fs.setIf(edx&(1<<27) != 0, STIBP) + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3 + if runtime.GOOS == "darwin" { + hasAVX512 = fs.inSet(AVX) && darwinHasAVX512() + } + if hasAVX512 { + fs.setIf(ebx&(1<<16) != 0, AVX512F) + fs.setIf(ebx&(1<<17) != 0, AVX512DQ) + fs.setIf(ebx&(1<<21) != 0, AVX512IFMA) + fs.setIf(ebx&(1<<26) != 0, AVX512PF) + fs.setIf(ebx&(1<<27) != 0, AVX512ER) + fs.setIf(ebx&(1<<28) != 0, AVX512CD) + fs.setIf(ebx&(1<<30) != 0, AVX512BW) + fs.setIf(ebx&(1<<31) != 0, AVX512VL) + // ecx + fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) + fs.setIf(ecx&(1<<8) != 0, GFNI) + fs.setIf(ecx&(1<<9) != 0, VAES) + fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) + fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) + fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) + fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ) + // edx + fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT) + fs.setIf(edx&(1<<22) != 0, AMXBF16) + fs.setIf(edx&(1<<23) != 0, AVX512FP16) + fs.setIf(edx&(1<<24) != 0, AMXTILE) + fs.setIf(edx&(1<<25) != 0, AMXINT8) + // eax1 = CPUID.(EAX=7, ECX=1).EAX + fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + fs.set(LZCNT) + fs.set(POPCNT) + } + fs.setIf((c&(1<<10)) != 0, IBS) + fs.setIf((d&(1<<31)) != 0, AMD3DNOW) + fs.setIf((d&(1<<30)) != 0, AMD3DNOWEXT) + fs.setIf((d&(1<<23)) != 0, MMX) + fs.setIf((d&(1<<22)) != 0, MMXEXT) + fs.setIf((c&(1<<6)) != 0, SSE4A) + fs.setIf(d&(1<<20) != 0, NX) + fs.setIf(d&(1<<27) != 0, RDTSCP) + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if fs.inSet(AVX) { + fs.setIf((c&0x00000800) != 0, XOP) + fs.setIf((c&0x00010000) != 0, FMA4) + } + + } + if maxExtendedFunction() >= 0x80000007 { + _, b, _, d := cpuid(0x80000007) + fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW) + fs.setIf((b&(1<<1)) != 0, SUCCOR) + fs.setIf((b&(1<<2)) != 0, HWA) + fs.setIf((d&(1<<9)) != 0, CPBOOST) + } + + if maxExtendedFunction() >= 0x80000008 { + _, b, _, _ := cpuid(0x80000008) + fs.setIf((b&(1<<9)) != 0, WBNOINVD) + fs.setIf((b&(1<<8)) != 0, MCOMMIT) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf((b&(1<<4)) != 0, RDPRU) + fs.setIf((b&(1<<3)) != 0, INVLPGB) + fs.setIf((b&(1<<1)) != 0, MSRIRC) + fs.setIf((b&(1<<0)) != 0, CLZERO) + } + + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { + eax, _, _, _ := cpuid(0x8000001b) + fs.setIf((eax>>0)&1 == 1, IBSFFV) + fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM) + fs.setIf((eax>>2)&1 == 1, IBSOPSAM) + fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT) + fs.setIf((eax>>4)&1 == 1, IBSOPCNT) + fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) + fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) + fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) + } + + return fs +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s new file mode 100644 index 0000000..8587c3a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s @@ -0,0 +1,47 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build 386,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0 + MOVL $0, eax+0(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s new file mode 100644 index 0000000..bc11f89 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s @@ -0,0 +1,72 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// From https://go-review.googlesource.com/c/sys/+/285572/ +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0-1 + MOVB $0, ret+0(FP) // default to false + +#ifdef GOOS_darwin // return if not darwin +#ifdef GOARCH_amd64 // return if not amd64 +// These values from: +// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h +#define commpage64_base_address 0x00007fffffe00000 +#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) +#define commpage64_version (commpage64_base_address+0x01E) +#define hasAVX512F 0x0000004000000000 + MOVQ $commpage64_version, BX + MOVW (BX), AX + CMPW AX, $13 // versions < 13 do not support AVX512 + JL no_avx512 + MOVQ $commpage64_cpu_capabilities64, BX + MOVQ (BX), AX + MOVQ $hasAVX512F, CX + ANDQ CX, AX + JZ no_avx512 + MOVB $1, ret+0(FP) + +no_avx512: +#endif +#endif + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s new file mode 100644 index 0000000..b31d6ae --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -0,0 +1,26 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build arm64,!gccgo,!noasm,!appengine + +// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt + +// func getMidr +TEXT ·getMidr(SB), 7, $0 + WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */ + MOVD R0, midr+0(FP) + RET + +// func getProcFeatures +TEXT ·getProcFeatures(SB), 7, $0 + WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */ + MOVD R0, procFeatures+0(FP) + RET + +// func getInstAttributes +TEXT ·getInstAttributes(SB), 7, $0 + WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */ + WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */ + MOVD R0, instAttrReg0+0(FP) + MOVD R1, instAttrReg1+8(FP) + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go new file mode 100644 index 0000000..9bf9f77 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -0,0 +1,246 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build arm64,!gccgo,!noasm,!appengine + +package cpuid + +import "runtime" + +func getMidr() (midr uint64) +func getProcFeatures() (procFeatures uint64) +func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(c *CPUInfo, safe bool) { + // Seems to be safe to assume on ARM64 + c.CacheLine = 64 + detectOS(c) + + // ARM64 disabled since it may crash if interrupt is not intercepted by OS. + if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" { + return + } + midr := getMidr() + + // MIDR_EL1 - Main ID Register + // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | Implementer | [31-24] | y | + // |--------------------------------------------------| + // | Variant | [23-20] | y | + // |--------------------------------------------------| + // | Architecture | [19-16] | y | + // |--------------------------------------------------| + // | PartNum | [15-4] | y | + // |--------------------------------------------------| + // | Revision | [3-0] | y | + // x--------------------------------------------------x + + switch (midr >> 24) & 0xff { + case 0xC0: + c.VendorString = "Ampere Computing" + c.VendorID = Ampere + case 0x41: + c.VendorString = "Arm Limited" + c.VendorID = ARM + case 0x42: + c.VendorString = "Broadcom Corporation" + c.VendorID = Broadcom + case 0x43: + c.VendorString = "Cavium Inc" + c.VendorID = Cavium + case 0x44: + c.VendorString = "Digital Equipment Corporation" + c.VendorID = DEC + case 0x46: + c.VendorString = "Fujitsu Ltd" + c.VendorID = Fujitsu + case 0x49: + c.VendorString = "Infineon Technologies AG" + c.VendorID = Infineon + case 0x4D: + c.VendorString = "Motorola or Freescale Semiconductor Inc" + c.VendorID = Motorola + case 0x4E: + c.VendorString = "NVIDIA Corporation" + c.VendorID = NVIDIA + case 0x50: + c.VendorString = "Applied Micro Circuits Corporation" + c.VendorID = AMCC + case 0x51: + c.VendorString = "Qualcomm Inc" + c.VendorID = Qualcomm + case 0x56: + c.VendorString = "Marvell International Ltd" + c.VendorID = Marvell + case 0x69: + c.VendorString = "Intel Corporation" + c.VendorID = Intel + } + + // Lower 4 bits: Architecture + // Architecture Meaning + // 0b0001 Armv4. + // 0b0010 Armv4T. + // 0b0011 Armv5 (obsolete). + // 0b0100 Armv5T. + // 0b0101 Armv5TE. + // 0b0110 Armv5TEJ. + // 0b0111 Armv6. + // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'. + // Upper 4 bit: Variant + // An IMPLEMENTATION DEFINED variant number. + // Typically, this field is used to distinguish between different product variants, or major revisions of a product. + c.Family = int(midr>>16) & 0xff + + // PartNum, bits [15:4] + // An IMPLEMENTATION DEFINED primary part number for the device. + // On processors implemented by Arm, if the top four bits of the primary + // part number are 0x0 or 0x7, the variant and architecture are encoded differently. + // Revision, bits [3:0] + // An IMPLEMENTATION DEFINED revision number for the device. + c.Model = int(midr) & 0xffff + + procFeatures := getProcFeatures() + + // ID_AA64PFR0_EL1 - Processor Feature Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | DIT | [51-48] | y | + // |--------------------------------------------------| + // | SVE | [35-32] | y | + // |--------------------------------------------------| + // | GIC | [27-24] | n | + // |--------------------------------------------------| + // | AdvSIMD | [23-20] | y | + // |--------------------------------------------------| + // | FP | [19-16] | y | + // |--------------------------------------------------| + // | EL3 | [15-12] | n | + // |--------------------------------------------------| + // | EL2 | [11-8] | n | + // |--------------------------------------------------| + // | EL1 | [7-4] | n | + // |--------------------------------------------------| + // | EL0 | [3-0] | n | + // x--------------------------------------------------x + + var f flagSet + // if procFeatures&(0xf<<48) != 0 { + // fmt.Println("DIT") + // } + f.setIf(procFeatures&(0xf<<32) != 0, SVE) + if procFeatures&(0xf<<20) != 15<<20 { + f.set(ASIMD) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 + // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. + f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP) + } + f.setIf(procFeatures&(0xf<<16) != 0, FP) + + instAttrReg0, instAttrReg1 := getInstAttributes() + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // + // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | TS | [55-52] | y | + // |--------------------------------------------------| + // | FHM | [51-48] | y | + // |--------------------------------------------------| + // | DP | [47-44] | y | + // |--------------------------------------------------| + // | SM4 | [43-40] | y | + // |--------------------------------------------------| + // | SM3 | [39-36] | y | + // |--------------------------------------------------| + // | SHA3 | [35-32] | y | + // |--------------------------------------------------| + // | RDM | [31-28] | y | + // |--------------------------------------------------| + // | ATOMICS | [23-20] | y | + // |--------------------------------------------------| + // | CRC32 | [19-16] | y | + // |--------------------------------------------------| + // | SHA2 | [15-12] | y | + // |--------------------------------------------------| + // | SHA1 | [11-8] | y | + // |--------------------------------------------------| + // | AES | [7-4] | y | + // x--------------------------------------------------x + + // if instAttrReg0&(0xf<<52) != 0 { + // fmt.Println("TS") + // } + // if instAttrReg0&(0xf<<48) != 0 { + // fmt.Println("FHM") + // } + f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) + f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) + f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) + f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3) + f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM) + f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS) + f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32) + f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. + f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512) + f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1) + f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. + f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL) + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 + // + // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | GPI | [31-28] | y | + // |--------------------------------------------------| + // | GPA | [27-24] | y | + // |--------------------------------------------------| + // | LRCPC | [23-20] | y | + // |--------------------------------------------------| + // | FCMA | [19-16] | y | + // |--------------------------------------------------| + // | JSCVT | [15-12] | y | + // |--------------------------------------------------| + // | API | [11-8] | y | + // |--------------------------------------------------| + // | APA | [7-4] | y | + // |--------------------------------------------------| + // | DPB | [3-0] | y | + // x--------------------------------------------------x + + // if instAttrReg1&(0xf<<28) != 0 { + // fmt.Println("GPI") + // } + f.setIf(instAttrReg1&(0xf<<28) != 24, GPA) + f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC) + f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA) + f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT) + // if instAttrReg1&(0xf<<8) != 0 { + // fmt.Println("API") + // } + // if instAttrReg1&(0xf<<4) != 0 { + // fmt.Println("APA") + // } + f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP) + + // Store + c.featureSet.or(f) +} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go new file mode 100644 index 0000000..e9c8606 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -0,0 +1,14 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build !amd64,!386,!arm64 gccgo noasm appengine + +package cpuid + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(info *CPUInfo, safe bool) {} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go new file mode 100644 index 0000000..367c35c --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -0,0 +1,35 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +func asmDarwinHasAVX512() bool + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm + darwinHasAVX512 = asmDarwinHasAVX512 +} + +func addInfo(c *CPUInfo, safe bool) { + c.maxFunc = maxFunctionID() + c.maxExFunc = maxExtendedFunction() + c.BrandName = brandName() + c.CacheLine = cacheLine() + c.Family, c.Model = familyModel() + c.featureSet = support() + c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.ThreadsPerCore = threadsPerCore() + c.LogicalCores = logicalCores() + c.PhysicalCores = physicalCores() + c.VendorID, c.VendorString = vendorID() + c.cacheSize() + c.frequencies() +} diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go new file mode 100644 index 0000000..b1fe42e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -0,0 +1,185 @@ +// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT. + +package cpuid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ADX-1] + _ = x[AESNI-2] + _ = x[AMD3DNOW-3] + _ = x[AMD3DNOWEXT-4] + _ = x[AMXBF16-5] + _ = x[AMXINT8-6] + _ = x[AMXTILE-7] + _ = x[AVX-8] + _ = x[AVX2-9] + _ = x[AVX512BF16-10] + _ = x[AVX512BITALG-11] + _ = x[AVX512BW-12] + _ = x[AVX512CD-13] + _ = x[AVX512DQ-14] + _ = x[AVX512ER-15] + _ = x[AVX512F-16] + _ = x[AVX512FP16-17] + _ = x[AVX512IFMA-18] + _ = x[AVX512PF-19] + _ = x[AVX512VBMI-20] + _ = x[AVX512VBMI2-21] + _ = x[AVX512VL-22] + _ = x[AVX512VNNI-23] + _ = x[AVX512VP2INTERSECT-24] + _ = x[AVX512VPOPCNTDQ-25] + _ = x[AVXSLOW-26] + _ = x[BMI1-27] + _ = x[BMI2-28] + _ = x[CLDEMOTE-29] + _ = x[CLMUL-30] + _ = x[CLZERO-31] + _ = x[CMOV-32] + _ = x[CPBOOST-33] + _ = x[CX16-34] + _ = x[ENQCMD-35] + _ = x[ERMS-36] + _ = x[F16C-37] + _ = x[FMA3-38] + _ = x[FMA4-39] + _ = x[GFNI-40] + _ = x[HLE-41] + _ = x[HTT-42] + _ = x[HWA-43] + _ = x[HYPERVISOR-44] + _ = x[IBPB-45] + _ = x[IBS-46] + _ = x[IBSBRNTRGT-47] + _ = x[IBSFETCHSAM-48] + _ = x[IBSFFV-49] + _ = x[IBSOPCNT-50] + _ = x[IBSOPCNTEXT-51] + _ = x[IBSOPSAM-52] + _ = x[IBSRDWROPCNT-53] + _ = x[IBSRIPINVALIDCHK-54] + _ = x[INT_WBINVD-55] + _ = x[INVLPGB-56] + _ = x[LZCNT-57] + _ = x[MCAOVERFLOW-58] + _ = x[MCOMMIT-59] + _ = x[MMX-60] + _ = x[MMXEXT-61] + _ = x[MOVDIR64B-62] + _ = x[MOVDIRI-63] + _ = x[MPX-64] + _ = x[MSRIRC-65] + _ = x[NX-66] + _ = x[POPCNT-67] + _ = x[RDPRU-68] + _ = x[RDRAND-69] + _ = x[RDSEED-70] + _ = x[RDTSCP-71] + _ = x[RTM-72] + _ = x[RTM_ALWAYS_ABORT-73] + _ = x[SERIALIZE-74] + _ = x[SGX-75] + _ = x[SGXLC-76] + _ = x[SHA-77] + _ = x[SSE-78] + _ = x[SSE2-79] + _ = x[SSE3-80] + _ = x[SSE4-81] + _ = x[SSE42-82] + _ = x[SSE4A-83] + _ = x[SSSE3-84] + _ = x[STIBP-85] + _ = x[SUCCOR-86] + _ = x[TBM-87] + _ = x[TSXLDTRK-88] + _ = x[VAES-89] + _ = x[VMX-90] + _ = x[VPCLMULQDQ-91] + _ = x[WAITPKG-92] + _ = x[WBNOINVD-93] + _ = x[XOP-94] + _ = x[AESARM-95] + _ = x[ARMCPUID-96] + _ = x[ASIMD-97] + _ = x[ASIMDDP-98] + _ = x[ASIMDHP-99] + _ = x[ASIMDRDM-100] + _ = x[ATOMICS-101] + _ = x[CRC32-102] + _ = x[DCPOP-103] + _ = x[EVTSTRM-104] + _ = x[FCMA-105] + _ = x[FP-106] + _ = x[FPHP-107] + _ = x[GPA-108] + _ = x[JSCVT-109] + _ = x[LRCPC-110] + _ = x[PMULL-111] + _ = x[SHA1-112] + _ = x[SHA2-113] + _ = x[SHA3-114] + _ = x[SHA512-115] + _ = x[SM3-116] + _ = x[SM4-117] + _ = x[SVE-118] + _ = x[lastID-119] + _ = x[firstID-0] +} + +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXSLOWBMI1BMI2CLDEMOTECLMULCLZEROCMOVCPBOOSTCX16ENQCMDERMSF16CFMA3FMA4GFNIHLEHTTHWAHYPERVISORIBPBIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKINT_WBINVDINVLPGBLZCNTMCAOVERFLOWMCOMMITMMXMMXEXTMOVDIR64BMOVDIRIMPXMSRIRCNXPOPCNTRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESGXSGXLCSHASSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSUCCORTBMTSXLDTRKVAESVMXVPCLMULQDQWAITPKGWBNOINVDXOPAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" + +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 58, 62, 72, 84, 92, 100, 108, 116, 123, 133, 143, 151, 161, 172, 180, 190, 208, 223, 230, 234, 238, 246, 251, 257, 261, 268, 272, 278, 282, 286, 290, 294, 298, 301, 304, 307, 317, 321, 324, 334, 345, 351, 359, 370, 378, 390, 406, 416, 423, 428, 439, 446, 449, 455, 464, 471, 474, 480, 482, 488, 493, 499, 505, 511, 514, 530, 539, 542, 547, 550, 553, 557, 561, 565, 570, 575, 580, 585, 591, 594, 602, 606, 609, 619, 626, 634, 637, 643, 651, 656, 663, 670, 678, 685, 690, 695, 702, 706, 708, 712, 715, 720, 725, 730, 734, 738, 742, 748, 751, 754, 757, 763} + +func (i FeatureID) String() string { + if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { + return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VendorUnknown-0] + _ = x[Intel-1] + _ = x[AMD-2] + _ = x[VIA-3] + _ = x[Transmeta-4] + _ = x[NSC-5] + _ = x[KVM-6] + _ = x[MSVM-7] + _ = x[VMware-8] + _ = x[XenHVM-9] + _ = x[Bhyve-10] + _ = x[Hygon-11] + _ = x[SiS-12] + _ = x[RDC-13] + _ = x[Ampere-14] + _ = x[ARM-15] + _ = x[Broadcom-16] + _ = x[Cavium-17] + _ = x[DEC-18] + _ = x[Fujitsu-19] + _ = x[Infineon-20] + _ = x[Motorola-21] + _ = x[NVIDIA-22] + _ = x[AMCC-23] + _ = x[Qualcomm-24] + _ = x[Marvell-25] + _ = x[lastVendor-26] +} + +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor" + +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155} + +func (i Vendor) String() string { + if i < 0 || i >= Vendor(len(_Vendor_index)-1) { + return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]] +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go new file mode 100644 index 0000000..8d2cb03 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -0,0 +1,19 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import "runtime" + +func detectOS(c *CPUInfo) bool { + // There are no hw.optional sysctl values for the below features on Mac OS 11.0 + // to detect their supported state dynamically. Assume the CPU features that + // Apple Silicon M1 supports to be available as a minimal set of features + // to all Go programs running on darwin/arm64. + // TODO: Add more if we know them. + c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) + c.PhysicalCores = runtime.NumCPU() + // For now assuming 1 thread per core... + c.ThreadsPerCore = 1 + c.LogicalCores = c.PhysicalCores + return true +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go new file mode 100644 index 0000000..ee278b9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go @@ -0,0 +1,130 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file located +// here https://github.com/golang/sys/blob/master/LICENSE + +package cpuid + +import ( + "encoding/binary" + "io/ioutil" + "runtime" +) + +// HWCAP bits. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func detectOS(c *CPUInfo) bool { + // For now assuming no hyperthreading is reasonable. + c.LogicalCores = runtime.NumCPU() + c.PhysicalCores = c.LogicalCores + c.ThreadsPerCore = 1 + if hwcap == 0 { + // We did not get values from the runtime. + // Try reading /proc/self/auxv + + // From https://github.com/golang/sys + const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + uintSize = int(32 << (^uint(0) >> 63)) + ) + + buf, err := ioutil.ReadFile("/proc/self/auxv") + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return false + } + bo := binary.LittleEndian + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwcap = val + case _AT_HWCAP2: + // Not used + } + } + if hwcap == 0 { + return false + } + } + + // HWCap was populated by the runtime from the auxiliary vector. + // Use HWCap information since reading aarch64 system registers + // is not supported in user space on older linux kernels. + c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM) + c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID) + c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32) + c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) + c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) + c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) + c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) + c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) + c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) + c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) + c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512) + c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3) + c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4) + c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE) + + // The Samsung S9+ kernel reports support for atomics, but not all cores + // actually support them, resulting in SIGILL. See issue #28431. + // TODO(elias.naur): Only disable the optimization on bad chipsets on android. + c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS) + + return true +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go new file mode 100644 index 0000000..1a951e6 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go @@ -0,0 +1,17 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +// +build arm64 +// +build !linux +// +build !darwin + +package cpuid + +import "runtime" + +func detectOS(c *CPUInfo) bool { + c.PhysicalCores = runtime.NumCPU() + // For now assuming 1 thread per core... + c.ThreadsPerCore = 1 + c.LogicalCores = c.PhysicalCores + return false +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go new file mode 100644 index 0000000..4d0b8b4 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go @@ -0,0 +1,7 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//+build nounsafe + +package cpuid + +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go new file mode 100644 index 0000000..3298002 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//+build !nounsafe + +package cpuid + +import _ "unsafe" // needed for go:linkname + +//go:linkname hwcap internal/cpu.HWCap +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh new file mode 100644 index 0000000..471d986 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null . + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null . + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null . + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null . +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 39bbcf0..d569c0c 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,5 +1,5 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine -// +build darwin freebsd openbsd netbsd dragonfly +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly hurd // +build !appengine package isatty diff --git a/vendor/github.com/twitchyliquid64/golang-asm/LICENSE b/vendor/github.com/twitchyliquid64/golang-asm/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go new file mode 100644 index 0000000..b8ddbc9 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arch.go @@ -0,0 +1,716 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package arch defines architecture-specific information and support functions. +package arch + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/arm" + "github.com/twitchyliquid64/golang-asm/obj/arm64" + "github.com/twitchyliquid64/golang-asm/obj/mips" + "github.com/twitchyliquid64/golang-asm/obj/ppc64" + "github.com/twitchyliquid64/golang-asm/obj/riscv" + "github.com/twitchyliquid64/golang-asm/obj/s390x" + "github.com/twitchyliquid64/golang-asm/obj/wasm" + "github.com/twitchyliquid64/golang-asm/obj/x86" + "fmt" + "strings" +) + +// Pseudo-registers whose names are the constant name without the leading R. +const ( + RFP = -(iota + 1) + RSB + RSP + RPC +) + +// Arch wraps the link architecture object with more architecture-specific information. +type Arch struct { + *obj.LinkArch + // Map of instruction names to enumeration. + Instructions map[string]obj.As + // Map of register names to enumeration. + Register map[string]int16 + // Table of register prefix names. These are things like R for R(0) and SPR for SPR(268). + RegisterPrefix map[string]bool + // RegisterNumber converts R(10) into arm.REG_R10. + RegisterNumber func(string, int16) (int16, bool) + // Instruction is a jump. + IsJump func(word string) bool +} + +// nilRegisterNumber is the register number function for architectures +// that do not accept the R(N) notation. It always returns failure. +func nilRegisterNumber(name string, n int16) (int16, bool) { + return 0, false +} + +// Set configures the architecture specified by GOARCH and returns its representation. +// It returns nil if GOARCH is not recognized. +func Set(GOARCH string) *Arch { + switch GOARCH { + case "386": + return archX86(&x86.Link386) + case "amd64": + return archX86(&x86.Linkamd64) + case "arm": + return archArm() + case "arm64": + return archArm64() + case "mips": + return archMips(&mips.Linkmips) + case "mipsle": + return archMips(&mips.Linkmipsle) + case "mips64": + return archMips64(&mips.Linkmips64) + case "mips64le": + return archMips64(&mips.Linkmips64le) + case "ppc64": + return archPPC64(&ppc64.Linkppc64) + case "ppc64le": + return archPPC64(&ppc64.Linkppc64le) + case "riscv64": + return archRISCV64() + case "s390x": + return archS390x() + case "wasm": + return archWasm() + } + return nil +} + +func jumpX86(word string) bool { + return word[0] == 'J' || word == "CALL" || strings.HasPrefix(word, "LOOP") || word == "XBEGIN" +} + +func jumpRISCV(word string) bool { + switch word { + case "BEQ", "BEQZ", "BGE", "BGEU", "BGEZ", "BGT", "BGTU", "BGTZ", "BLE", "BLEU", "BLEZ", + "BLT", "BLTU", "BLTZ", "BNE", "BNEZ", "CALL", "JAL", "JALR", "JMP": + return true + } + return false +} + +func jumpWasm(word string) bool { + return word == "JMP" || word == "CALL" || word == "Call" || word == "Br" || word == "BrIf" +} + +func archX86(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + for i, s := range x86.Register { + register[s] = int16(i + x86.REG_AL) + } + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Register prefix not used on this architecture. + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range x86.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseAMD64 + } + } + // Annoying aliases. + instructions["JA"] = x86.AJHI /* alternate */ + instructions["JAE"] = x86.AJCC /* alternate */ + instructions["JB"] = x86.AJCS /* alternate */ + instructions["JBE"] = x86.AJLS /* alternate */ + instructions["JC"] = x86.AJCS /* alternate */ + instructions["JCC"] = x86.AJCC /* carry clear (CF = 0) */ + instructions["JCS"] = x86.AJCS /* carry set (CF = 1) */ + instructions["JE"] = x86.AJEQ /* alternate */ + instructions["JEQ"] = x86.AJEQ /* equal (ZF = 1) */ + instructions["JG"] = x86.AJGT /* alternate */ + instructions["JGE"] = x86.AJGE /* greater than or equal (signed) (SF = OF) */ + instructions["JGT"] = x86.AJGT /* greater than (signed) (ZF = 0 && SF = OF) */ + instructions["JHI"] = x86.AJHI /* higher (unsigned) (CF = 0 && ZF = 0) */ + instructions["JHS"] = x86.AJCC /* alternate */ + instructions["JL"] = x86.AJLT /* alternate */ + instructions["JLE"] = x86.AJLE /* less than or equal (signed) (ZF = 1 || SF != OF) */ + instructions["JLO"] = x86.AJCS /* alternate */ + instructions["JLS"] = x86.AJLS /* lower or same (unsigned) (CF = 1 || ZF = 1) */ + instructions["JLT"] = x86.AJLT /* less than (signed) (SF != OF) */ + instructions["JMI"] = x86.AJMI /* negative (minus) (SF = 1) */ + instructions["JNA"] = x86.AJLS /* alternate */ + instructions["JNAE"] = x86.AJCS /* alternate */ + instructions["JNB"] = x86.AJCC /* alternate */ + instructions["JNBE"] = x86.AJHI /* alternate */ + instructions["JNC"] = x86.AJCC /* alternate */ + instructions["JNE"] = x86.AJNE /* not equal (ZF = 0) */ + instructions["JNG"] = x86.AJLE /* alternate */ + instructions["JNGE"] = x86.AJLT /* alternate */ + instructions["JNL"] = x86.AJGE /* alternate */ + instructions["JNLE"] = x86.AJGT /* alternate */ + instructions["JNO"] = x86.AJOC /* alternate */ + instructions["JNP"] = x86.AJPC /* alternate */ + instructions["JNS"] = x86.AJPL /* alternate */ + instructions["JNZ"] = x86.AJNE /* alternate */ + instructions["JO"] = x86.AJOS /* alternate */ + instructions["JOC"] = x86.AJOC /* overflow clear (OF = 0) */ + instructions["JOS"] = x86.AJOS /* overflow set (OF = 1) */ + instructions["JP"] = x86.AJPS /* alternate */ + instructions["JPC"] = x86.AJPC /* parity clear (PF = 0) */ + instructions["JPE"] = x86.AJPS /* alternate */ + instructions["JPL"] = x86.AJPL /* non-negative (plus) (SF = 0) */ + instructions["JPO"] = x86.AJPC /* alternate */ + instructions["JPS"] = x86.AJPS /* parity set (PF = 1) */ + instructions["JS"] = x86.AJMI /* alternate */ + instructions["JZ"] = x86.AJEQ /* alternate */ + instructions["MASKMOVDQU"] = x86.AMASKMOVOU + instructions["MOVD"] = x86.AMOVQ + instructions["MOVDQ2Q"] = x86.AMOVQ + instructions["MOVNTDQ"] = x86.AMOVNTO + instructions["MOVOA"] = x86.AMOVO + instructions["PSLLDQ"] = x86.APSLLO + instructions["PSRLDQ"] = x86.APSRLO + instructions["PADDD"] = x86.APADDL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: nil, + RegisterNumber: nilRegisterNumber, + IsJump: jumpX86, + } +} + +func archArm() *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := arm.REG_R0; i < arm.REG_SPSR; i++ { + register[obj.Rconv(i)] = int16(i) + } + // Avoid unintentionally clobbering g using R10. + delete(register, "R10") + register["g"] = arm.REG_R10 + for i := 0; i < 16; i++ { + register[fmt.Sprintf("C%d", i)] = int16(i) + } + + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + register["SP"] = RSP + registerPrefix := map[string]bool{ + "F": true, + "R": true, + } + + // special operands for DMB/DSB instructions + register["MB_SY"] = arm.REG_MB_SY + register["MB_ST"] = arm.REG_MB_ST + register["MB_ISH"] = arm.REG_MB_ISH + register["MB_ISHST"] = arm.REG_MB_ISHST + register["MB_NSH"] = arm.REG_MB_NSH + register["MB_NSHST"] = arm.REG_MB_NSHST + register["MB_OSH"] = arm.REG_MB_OSH + register["MB_OSHST"] = arm.REG_MB_OSHST + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range arm.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseARM + } + } + // Annoying aliases. + instructions["B"] = obj.AJMP + instructions["BL"] = obj.ACALL + // MCR differs from MRC by the way fields of the word are encoded. + // (Details in arm.go). Here we add the instruction so parse will find + // it, but give it an opcode number known only to us. + instructions["MCR"] = aMCR + + return &Arch{ + LinkArch: &arm.Linkarm, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: armRegisterNumber, + IsJump: jumpArm, + } +} + +func archArm64() *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for 386 and amd64. + register[obj.Rconv(arm64.REGSP)] = int16(arm64.REGSP) + for i := arm64.REG_R0; i <= arm64.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + // Rename R18 to R18_PLATFORM to avoid accidental use. + register["R18_PLATFORM"] = register["R18"] + delete(register, "R18") + for i := arm64.REG_F0; i <= arm64.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := arm64.REG_V0; i <= arm64.REG_V31; i++ { + register[obj.Rconv(i)] = int16(i) + } + + // System registers. + for i := 0; i < len(arm64.SystemReg); i++ { + register[arm64.SystemReg[i].Name] = arm64.SystemReg[i].Reg + } + + register["LR"] = arm64.REGLINK + register["DAIFSet"] = arm64.REG_DAIFSet + register["DAIFClr"] = arm64.REG_DAIFClr + register["PLDL1KEEP"] = arm64.REG_PLDL1KEEP + register["PLDL1STRM"] = arm64.REG_PLDL1STRM + register["PLDL2KEEP"] = arm64.REG_PLDL2KEEP + register["PLDL2STRM"] = arm64.REG_PLDL2STRM + register["PLDL3KEEP"] = arm64.REG_PLDL3KEEP + register["PLDL3STRM"] = arm64.REG_PLDL3STRM + register["PLIL1KEEP"] = arm64.REG_PLIL1KEEP + register["PLIL1STRM"] = arm64.REG_PLIL1STRM + register["PLIL2KEEP"] = arm64.REG_PLIL2KEEP + register["PLIL2STRM"] = arm64.REG_PLIL2STRM + register["PLIL3KEEP"] = arm64.REG_PLIL3KEEP + register["PLIL3STRM"] = arm64.REG_PLIL3STRM + register["PSTL1KEEP"] = arm64.REG_PSTL1KEEP + register["PSTL1STRM"] = arm64.REG_PSTL1STRM + register["PSTL2KEEP"] = arm64.REG_PSTL2KEEP + register["PSTL2STRM"] = arm64.REG_PSTL2STRM + register["PSTL3KEEP"] = arm64.REG_PSTL3KEEP + register["PSTL3STRM"] = arm64.REG_PSTL3STRM + + // Conditional operators, like EQ, NE, etc. + register["EQ"] = arm64.COND_EQ + register["NE"] = arm64.COND_NE + register["HS"] = arm64.COND_HS + register["CS"] = arm64.COND_HS + register["LO"] = arm64.COND_LO + register["CC"] = arm64.COND_LO + register["MI"] = arm64.COND_MI + register["PL"] = arm64.COND_PL + register["VS"] = arm64.COND_VS + register["VC"] = arm64.COND_VC + register["HI"] = arm64.COND_HI + register["LS"] = arm64.COND_LS + register["GE"] = arm64.COND_GE + register["LT"] = arm64.COND_LT + register["GT"] = arm64.COND_GT + register["LE"] = arm64.COND_LE + register["AL"] = arm64.COND_AL + register["NV"] = arm64.COND_NV + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + register["SP"] = RSP + // Avoid unintentionally clobbering g using R28. + delete(register, "R28") + register["g"] = arm64.REG_R28 + registerPrefix := map[string]bool{ + "F": true, + "R": true, + "V": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range arm64.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseARM64 + } + } + // Annoying aliases. + instructions["B"] = arm64.AB + instructions["BL"] = arm64.ABL + + return &Arch{ + LinkArch: &arm64.Linkarm64, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: arm64RegisterNumber, + IsJump: jumpArm64, + } + +} + +func archPPC64(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := ppc64.REG_R0; i <= ppc64.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_F0; i <= ppc64.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_V0; i <= ppc64.REG_V31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_VS0; i <= ppc64.REG_VS63; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_CR0; i <= ppc64.REG_CR7; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["CR"] = ppc64.REG_CR + register["XER"] = ppc64.REG_XER + register["LR"] = ppc64.REG_LR + register["CTR"] = ppc64.REG_CTR + register["FPSCR"] = ppc64.REG_FPSCR + register["MSR"] = ppc64.REG_MSR + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R30. + delete(register, "R30") + register["g"] = ppc64.REG_R30 + registerPrefix := map[string]bool{ + "CR": true, + "F": true, + "R": true, + "SPR": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range ppc64.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABasePPC64 + } + } + // Annoying aliases. + instructions["BR"] = ppc64.ABR + instructions["BL"] = ppc64.ABL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: ppc64RegisterNumber, + IsJump: jumpPPC64, + } +} + +func archMips(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := mips.REG_R0; i <= mips.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + + for i := mips.REG_F0; i <= mips.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_M0; i <= mips.REG_M31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["HI"] = mips.REG_HI + register["LO"] = mips.REG_LO + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R30. + delete(register, "R30") + register["g"] = mips.REG_R30 + + registerPrefix := map[string]bool{ + "F": true, + "FCR": true, + "M": true, + "R": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range mips.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseMIPS + } + } + // Annoying alias. + instructions["JAL"] = mips.AJAL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: mipsRegisterNumber, + IsJump: jumpMIPS, + } +} + +func archMips64(linkArch *obj.LinkArch) *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := mips.REG_R0; i <= mips.REG_R31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_F0; i <= mips.REG_F31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_M0; i <= mips.REG_M31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := mips.REG_W0; i <= mips.REG_W31; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["HI"] = mips.REG_HI + register["LO"] = mips.REG_LO + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R30. + delete(register, "R30") + register["g"] = mips.REG_R30 + // Avoid unintentionally clobbering RSB using R28. + delete(register, "R28") + register["RSB"] = mips.REG_R28 + registerPrefix := map[string]bool{ + "F": true, + "FCR": true, + "M": true, + "R": true, + "W": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range mips.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseMIPS + } + } + // Annoying alias. + instructions["JAL"] = mips.AJAL + + return &Arch{ + LinkArch: linkArch, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: mipsRegisterNumber, + IsJump: jumpMIPS, + } +} + +func archRISCV64() *Arch { + register := make(map[string]int16) + + // Standard register names. + for i := riscv.REG_X0; i <= riscv.REG_X31; i++ { + name := fmt.Sprintf("X%d", i-riscv.REG_X0) + register[name] = int16(i) + } + for i := riscv.REG_F0; i <= riscv.REG_F31; i++ { + name := fmt.Sprintf("F%d", i-riscv.REG_F0) + register[name] = int16(i) + } + + // General registers with ABI names. + register["ZERO"] = riscv.REG_ZERO + register["RA"] = riscv.REG_RA + register["SP"] = riscv.REG_SP + register["GP"] = riscv.REG_GP + register["TP"] = riscv.REG_TP + register["T0"] = riscv.REG_T0 + register["T1"] = riscv.REG_T1 + register["T2"] = riscv.REG_T2 + register["S0"] = riscv.REG_S0 + register["S1"] = riscv.REG_S1 + register["A0"] = riscv.REG_A0 + register["A1"] = riscv.REG_A1 + register["A2"] = riscv.REG_A2 + register["A3"] = riscv.REG_A3 + register["A4"] = riscv.REG_A4 + register["A5"] = riscv.REG_A5 + register["A6"] = riscv.REG_A6 + register["A7"] = riscv.REG_A7 + register["S2"] = riscv.REG_S2 + register["S3"] = riscv.REG_S3 + register["S4"] = riscv.REG_S4 + register["S5"] = riscv.REG_S5 + register["S6"] = riscv.REG_S6 + register["S7"] = riscv.REG_S7 + register["S8"] = riscv.REG_S8 + register["S9"] = riscv.REG_S9 + register["S10"] = riscv.REG_S10 + register["S11"] = riscv.REG_S11 + register["T3"] = riscv.REG_T3 + register["T4"] = riscv.REG_T4 + register["T5"] = riscv.REG_T5 + register["T6"] = riscv.REG_T6 + + // Go runtime register names. + register["g"] = riscv.REG_G + register["CTXT"] = riscv.REG_CTXT + register["TMP"] = riscv.REG_TMP + + // ABI names for floating point register. + register["FT0"] = riscv.REG_FT0 + register["FT1"] = riscv.REG_FT1 + register["FT2"] = riscv.REG_FT2 + register["FT3"] = riscv.REG_FT3 + register["FT4"] = riscv.REG_FT4 + register["FT5"] = riscv.REG_FT5 + register["FT6"] = riscv.REG_FT6 + register["FT7"] = riscv.REG_FT7 + register["FS0"] = riscv.REG_FS0 + register["FS1"] = riscv.REG_FS1 + register["FA0"] = riscv.REG_FA0 + register["FA1"] = riscv.REG_FA1 + register["FA2"] = riscv.REG_FA2 + register["FA3"] = riscv.REG_FA3 + register["FA4"] = riscv.REG_FA4 + register["FA5"] = riscv.REG_FA5 + register["FA6"] = riscv.REG_FA6 + register["FA7"] = riscv.REG_FA7 + register["FS2"] = riscv.REG_FS2 + register["FS3"] = riscv.REG_FS3 + register["FS4"] = riscv.REG_FS4 + register["FS5"] = riscv.REG_FS5 + register["FS6"] = riscv.REG_FS6 + register["FS7"] = riscv.REG_FS7 + register["FS8"] = riscv.REG_FS8 + register["FS9"] = riscv.REG_FS9 + register["FS10"] = riscv.REG_FS10 + register["FS11"] = riscv.REG_FS11 + register["FT8"] = riscv.REG_FT8 + register["FT9"] = riscv.REG_FT9 + register["FT10"] = riscv.REG_FT10 + register["FT11"] = riscv.REG_FT11 + + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range riscv.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseRISCV + } + } + + return &Arch{ + LinkArch: &riscv.LinkRISCV64, + Instructions: instructions, + Register: register, + RegisterPrefix: nil, + RegisterNumber: nilRegisterNumber, + IsJump: jumpRISCV, + } +} + +func archS390x() *Arch { + register := make(map[string]int16) + // Create maps for easy lookup of instruction names etc. + // Note that there is no list of names as there is for x86. + for i := s390x.REG_R0; i <= s390x.REG_R15; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := s390x.REG_F0; i <= s390x.REG_F15; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := s390x.REG_V0; i <= s390x.REG_V31; i++ { + register[obj.Rconv(i)] = int16(i) + } + for i := s390x.REG_AR0; i <= s390x.REG_AR15; i++ { + register[obj.Rconv(i)] = int16(i) + } + register["LR"] = s390x.REG_LR + // Pseudo-registers. + register["SB"] = RSB + register["FP"] = RFP + register["PC"] = RPC + // Avoid unintentionally clobbering g using R13. + delete(register, "R13") + register["g"] = s390x.REG_R13 + registerPrefix := map[string]bool{ + "AR": true, + "F": true, + "R": true, + } + + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range s390x.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseS390X + } + } + // Annoying aliases. + instructions["BR"] = s390x.ABR + instructions["BL"] = s390x.ABL + + return &Arch{ + LinkArch: &s390x.Links390x, + Instructions: instructions, + Register: register, + RegisterPrefix: registerPrefix, + RegisterNumber: s390xRegisterNumber, + IsJump: jumpS390x, + } +} + +func archWasm() *Arch { + instructions := make(map[string]obj.As) + for i, s := range obj.Anames { + instructions[s] = obj.As(i) + } + for i, s := range wasm.Anames { + if obj.As(i) >= obj.A_ARCHSPECIFIC { + instructions[s] = obj.As(i) + obj.ABaseWasm + } + } + + return &Arch{ + LinkArch: &wasm.Linkwasm, + Instructions: instructions, + Register: wasm.Register, + RegisterPrefix: nil, + RegisterNumber: nilRegisterNumber, + IsJump: jumpWasm, + } +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm.go new file mode 100644 index 0000000..645e98a --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm.go @@ -0,0 +1,257 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the ARM +// instruction set, to minimize its interaction with the core of the +// assembler. + +package arch + +import ( + "strings" + + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/arm" +) + +var armLS = map[string]uint8{ + "U": arm.C_UBIT, + "S": arm.C_SBIT, + "W": arm.C_WBIT, + "P": arm.C_PBIT, + "PW": arm.C_WBIT | arm.C_PBIT, + "WP": arm.C_WBIT | arm.C_PBIT, +} + +var armSCOND = map[string]uint8{ + "EQ": arm.C_SCOND_EQ, + "NE": arm.C_SCOND_NE, + "CS": arm.C_SCOND_HS, + "HS": arm.C_SCOND_HS, + "CC": arm.C_SCOND_LO, + "LO": arm.C_SCOND_LO, + "MI": arm.C_SCOND_MI, + "PL": arm.C_SCOND_PL, + "VS": arm.C_SCOND_VS, + "VC": arm.C_SCOND_VC, + "HI": arm.C_SCOND_HI, + "LS": arm.C_SCOND_LS, + "GE": arm.C_SCOND_GE, + "LT": arm.C_SCOND_LT, + "GT": arm.C_SCOND_GT, + "LE": arm.C_SCOND_LE, + "AL": arm.C_SCOND_NONE, + "U": arm.C_UBIT, + "S": arm.C_SBIT, + "W": arm.C_WBIT, + "P": arm.C_PBIT, + "PW": arm.C_WBIT | arm.C_PBIT, + "WP": arm.C_WBIT | arm.C_PBIT, + "F": arm.C_FBIT, + "IBW": arm.C_WBIT | arm.C_PBIT | arm.C_UBIT, + "IAW": arm.C_WBIT | arm.C_UBIT, + "DBW": arm.C_WBIT | arm.C_PBIT, + "DAW": arm.C_WBIT, + "IB": arm.C_PBIT | arm.C_UBIT, + "IA": arm.C_UBIT, + "DB": arm.C_PBIT, + "DA": 0, +} + +var armJump = map[string]bool{ + "B": true, + "BL": true, + "BX": true, + "BEQ": true, + "BNE": true, + "BCS": true, + "BHS": true, + "BCC": true, + "BLO": true, + "BMI": true, + "BPL": true, + "BVS": true, + "BVC": true, + "BHI": true, + "BLS": true, + "BGE": true, + "BLT": true, + "BGT": true, + "BLE": true, + "CALL": true, + "JMP": true, +} + +func jumpArm(word string) bool { + return armJump[word] +} + +// IsARMCMP reports whether the op (as defined by an arm.A* constant) is +// one of the comparison instructions that require special handling. +func IsARMCMP(op obj.As) bool { + switch op { + case arm.ACMN, arm.ACMP, arm.ATEQ, arm.ATST: + return true + } + return false +} + +// IsARMSTREX reports whether the op (as defined by an arm.A* constant) is +// one of the STREX-like instructions that require special handling. +func IsARMSTREX(op obj.As) bool { + switch op { + case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU: + return true + } + return false +} + +// MCR is not defined by the obj/arm; instead we define it privately here. +// It is encoded as an MRC with a bit inside the instruction word, +// passed to arch.ARMMRCOffset. +const aMCR = arm.ALAST + 1 + +// IsARMMRC reports whether the op (as defined by an arm.A* constant) is +// MRC or MCR +func IsARMMRC(op obj.As) bool { + switch op { + case arm.AMRC, aMCR: // Note: aMCR is defined in this package. + return true + } + return false +} + +// IsARMBFX reports whether the op (as defined by an arm.A* constant) is one the +// BFX-like instructions which are in the form of "op $width, $LSB, (Reg,) Reg". +func IsARMBFX(op obj.As) bool { + switch op { + case arm.ABFX, arm.ABFXU, arm.ABFC, arm.ABFI: + return true + } + return false +} + +// IsARMFloatCmp reports whether the op is a floating comparison instruction. +func IsARMFloatCmp(op obj.As) bool { + switch op { + case arm.ACMPF, arm.ACMPD: + return true + } + return false +} + +// ARMMRCOffset implements the peculiar encoding of the MRC and MCR instructions. +// The difference between MRC and MCR is represented by a bit high in the word, not +// in the usual way by the opcode itself. Asm must use AMRC for both instructions, so +// we return the opcode for MRC so that asm doesn't need to import obj/arm. +func ARMMRCOffset(op obj.As, cond string, x0, x1, x2, x3, x4, x5 int64) (offset int64, op0 obj.As, ok bool) { + op1 := int64(0) + if op == arm.AMRC { + op1 = 1 + } + bits, ok := ParseARMCondition(cond) + if !ok { + return + } + offset = (0xe << 24) | // opcode + (op1 << 20) | // MCR/MRC + ((int64(bits) ^ arm.C_SCOND_XOR) << 28) | // scond + ((x0 & 15) << 8) | //coprocessor number + ((x1 & 7) << 21) | // coprocessor operation + ((x2 & 15) << 12) | // ARM register + ((x3 & 15) << 16) | // Crn + ((x4 & 15) << 0) | // Crm + ((x5 & 7) << 5) | // coprocessor information + (1 << 4) /* must be set */ + return offset, arm.AMRC, true +} + +// IsARMMULA reports whether the op (as defined by an arm.A* constant) is +// MULA, MULS, MMULA, MMULS, MULABB, MULAWB or MULAWT, the 4-operand instructions. +func IsARMMULA(op obj.As) bool { + switch op { + case arm.AMULA, arm.AMULS, arm.AMMULA, arm.AMMULS, arm.AMULABB, arm.AMULAWB, arm.AMULAWT: + return true + } + return false +} + +var bcode = []obj.As{ + arm.ABEQ, + arm.ABNE, + arm.ABCS, + arm.ABCC, + arm.ABMI, + arm.ABPL, + arm.ABVS, + arm.ABVC, + arm.ABHI, + arm.ABLS, + arm.ABGE, + arm.ABLT, + arm.ABGT, + arm.ABLE, + arm.AB, + obj.ANOP, +} + +// ARMConditionCodes handles the special condition code situation for the ARM. +// It returns a boolean to indicate success; failure means cond was unrecognized. +func ARMConditionCodes(prog *obj.Prog, cond string) bool { + if cond == "" { + return true + } + bits, ok := ParseARMCondition(cond) + if !ok { + return false + } + /* hack to make B.NE etc. work: turn it into the corresponding conditional */ + if prog.As == arm.AB { + prog.As = bcode[(bits^arm.C_SCOND_XOR)&0xf] + bits = (bits &^ 0xf) | arm.C_SCOND_NONE + } + prog.Scond = bits + return true +} + +// ParseARMCondition parses the conditions attached to an ARM instruction. +// The input is a single string consisting of period-separated condition +// codes, such as ".P.W". An initial period is ignored. +func ParseARMCondition(cond string) (uint8, bool) { + return parseARMCondition(cond, armLS, armSCOND) +} + +func parseARMCondition(cond string, ls, scond map[string]uint8) (uint8, bool) { + cond = strings.TrimPrefix(cond, ".") + if cond == "" { + return arm.C_SCOND_NONE, true + } + names := strings.Split(cond, ".") + bits := uint8(0) + for _, name := range names { + if b, present := ls[name]; present { + bits |= b + continue + } + if b, present := scond[name]; present { + bits = (bits &^ arm.C_SCOND) | b + continue + } + return 0, false + } + return bits, true +} + +func armRegisterNumber(name string, n int16) (int16, bool) { + if n < 0 || 15 < n { + return 0, false + } + switch name { + case "R": + return arm.REG_R0 + n, true + case "F": + return arm.REG_F0 + n, true + } + return 0, false +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go new file mode 100644 index 0000000..b0606be --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/arm64.go @@ -0,0 +1,350 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the ARM64 +// instruction set, to minimize its interaction with the core of the +// assembler. + +package arch + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/arm64" + "errors" +) + +var arm64LS = map[string]uint8{ + "P": arm64.C_XPOST, + "W": arm64.C_XPRE, +} + +var arm64Jump = map[string]bool{ + "B": true, + "BL": true, + "BEQ": true, + "BNE": true, + "BCS": true, + "BHS": true, + "BCC": true, + "BLO": true, + "BMI": true, + "BPL": true, + "BVS": true, + "BVC": true, + "BHI": true, + "BLS": true, + "BGE": true, + "BLT": true, + "BGT": true, + "BLE": true, + "CALL": true, + "CBZ": true, + "CBZW": true, + "CBNZ": true, + "CBNZW": true, + "JMP": true, + "TBNZ": true, + "TBZ": true, +} + +func jumpArm64(word string) bool { + return arm64Jump[word] +} + +// IsARM64CMP reports whether the op (as defined by an arm.A* constant) is +// one of the comparison instructions that require special handling. +func IsARM64CMP(op obj.As) bool { + switch op { + case arm64.ACMN, arm64.ACMP, arm64.ATST, + arm64.ACMNW, arm64.ACMPW, arm64.ATSTW, + arm64.AFCMPS, arm64.AFCMPD, + arm64.AFCMPES, arm64.AFCMPED: + return true + } + return false +} + +// IsARM64STLXR reports whether the op (as defined by an arm64.A* +// constant) is one of the STLXR-like instructions that require special +// handling. +func IsARM64STLXR(op obj.As) bool { + switch op { + case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR, + arm64.ASTXRB, arm64.ASTXRH, arm64.ASTXRW, arm64.ASTXR, + arm64.ASTXP, arm64.ASTXPW, arm64.ASTLXP, arm64.ASTLXPW: + return true + } + // atomic instructions + if arm64.IsAtomicInstruction(op) { + return true + } + return false +} + +// ARM64Suffix handles the special suffix for the ARM64. +// It returns a boolean to indicate success; failure means +// cond was unrecognized. +func ARM64Suffix(prog *obj.Prog, cond string) bool { + if cond == "" { + return true + } + bits, ok := parseARM64Suffix(cond) + if !ok { + return false + } + prog.Scond = bits + return true +} + +// parseARM64Suffix parses the suffix attached to an ARM64 instruction. +// The input is a single string consisting of period-separated condition +// codes, such as ".P.W". An initial period is ignored. +func parseARM64Suffix(cond string) (uint8, bool) { + if cond == "" { + return 0, true + } + return parseARMCondition(cond, arm64LS, nil) +} + +func arm64RegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "F": + if 0 <= n && n <= 31 { + return arm64.REG_F0 + n, true + } + case "R": + if 0 <= n && n <= 30 { // not 31 + return arm64.REG_R0 + n, true + } + case "V": + if 0 <= n && n <= 31 { + return arm64.REG_V0 + n, true + } + } + return 0, false +} + +// IsARM64TBL reports whether the op (as defined by an arm64.A* +// constant) is one of the table lookup instructions that require special +// handling. +func IsARM64TBL(op obj.As) bool { + return op == arm64.AVTBL +} + +// ARM64RegisterExtension parses an ARM64 register with extension or arrangement. +func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { + Rnum := (reg & 31) + int16(num<<5) + if isAmount { + if num < 0 || num > 7 { + return errors.New("index shift amount is out of range") + } + } + switch ext { + case "UXTB": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTB + Rnum + case "UXTH": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTH + Rnum + case "UXTW": + if !isAmount { + return errors.New("invalid register extension") + } + // effective address of memory is a base register value and an offset register value. + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_UXTW + Rnum + } else { + a.Reg = arm64.REG_UXTW + Rnum + } + case "UXTX": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_UXTX + Rnum + case "SXTB": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_SXTB + Rnum + case "SXTH": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + return errors.New("invalid shift for the register offset addressing mode") + } + a.Reg = arm64.REG_SXTH + Rnum + case "SXTW": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_SXTW + Rnum + } else { + a.Reg = arm64.REG_SXTW + Rnum + } + case "SXTX": + if !isAmount { + return errors.New("invalid register extension") + } + if a.Type == obj.TYPE_MEM { + a.Index = arm64.REG_SXTX + Rnum + } else { + a.Reg = arm64.REG_SXTX + Rnum + } + case "LSL": + if !isAmount { + return errors.New("invalid register extension") + } + a.Index = arm64.REG_LSL + Rnum + case "B8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5) + case "B16": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5) + case "H4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5) + case "H8": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5) + case "S2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5) + case "S4": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5) + case "D1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1D & 15) << 5) + case "D2": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5) + case "Q1": + if isIndex { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_1Q & 15) << 5) + case "B": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5) + a.Index = num + case "H": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5) + a.Index = num + case "S": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5) + a.Index = num + case "D": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5) + a.Index = num + default: + return errors.New("unsupported register extension type: " + ext) + } + + return nil +} + +// ARM64RegisterArrangement parses an ARM64 vector register arrangement. +func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) { + var curQ, curSize uint16 + if name[0] != 'V' { + return 0, errors.New("expect V0 through V31; found: " + name) + } + if reg < 0 { + return 0, errors.New("invalid register number: " + name) + } + switch arng { + case "B8": + curSize = 0 + curQ = 0 + case "B16": + curSize = 0 + curQ = 1 + case "H4": + curSize = 1 + curQ = 0 + case "H8": + curSize = 1 + curQ = 1 + case "S2": + curSize = 2 + curQ = 0 + case "S4": + curSize = 2 + curQ = 1 + case "D1": + curSize = 3 + curQ = 0 + case "D2": + curSize = 3 + curQ = 1 + default: + return 0, errors.New("invalid arrangement in ARM64 register list") + } + return (int64(curQ) & 1 << 30) | (int64(curSize&3) << 10), nil +} + +// ARM64RegisterListOffset generates offset encoding according to AArch64 specification. +func ARM64RegisterListOffset(firstReg, regCnt int, arrangement int64) (int64, error) { + offset := int64(firstReg) + switch regCnt { + case 1: + offset |= 0x7 << 12 + case 2: + offset |= 0xa << 12 + case 3: + offset |= 0x6 << 12 + case 4: + offset |= 0x2 << 12 + default: + return 0, errors.New("invalid register numbers in ARM64 register list") + } + offset |= arrangement + // arm64 uses the 60th bit to differentiate from other archs + // For more details, refer to: obj/arm64/list7.go + offset |= 1 << 60 + return offset, nil +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/mips.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/mips.go new file mode 100644 index 0000000..7af9dbb --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/mips.go @@ -0,0 +1,72 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the +// MIPS (MIPS64) instruction set, to minimize its interaction +// with the core of the assembler. + +package arch + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/mips" +) + +func jumpMIPS(word string) bool { + switch word { + case "BEQ", "BFPF", "BFPT", "BGEZ", "BGEZAL", "BGTZ", "BLEZ", "BLTZ", "BLTZAL", "BNE", "JMP", "JAL", "CALL": + return true + } + return false +} + +// IsMIPSCMP reports whether the op (as defined by an mips.A* constant) is +// one of the CMP instructions that require special handling. +func IsMIPSCMP(op obj.As) bool { + switch op { + case mips.ACMPEQF, mips.ACMPEQD, mips.ACMPGEF, mips.ACMPGED, + mips.ACMPGTF, mips.ACMPGTD: + return true + } + return false +} + +// IsMIPSMUL reports whether the op (as defined by an mips.A* constant) is +// one of the MUL/DIV/REM/MADD/MSUB instructions that require special handling. +func IsMIPSMUL(op obj.As) bool { + switch op { + case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU, + mips.ADIV, mips.ADIVU, mips.ADIVV, mips.ADIVVU, + mips.AREM, mips.AREMU, mips.AREMV, mips.AREMVU, + mips.AMADD, mips.AMSUB: + return true + } + return false +} + +func mipsRegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "F": + if 0 <= n && n <= 31 { + return mips.REG_F0 + n, true + } + case "FCR": + if 0 <= n && n <= 31 { + return mips.REG_FCR0 + n, true + } + case "M": + if 0 <= n && n <= 31 { + return mips.REG_M0 + n, true + } + case "R": + if 0 <= n && n <= 31 { + return mips.REG_R0 + n, true + } + case "W": + if 0 <= n && n <= 31 { + return mips.REG_W0 + n, true + } + } + return 0, false +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/ppc64.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/ppc64.go new file mode 100644 index 0000000..8b2f097 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/ppc64.go @@ -0,0 +1,102 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the +// 64-bit PowerPC (PPC64) instruction set, to minimize its interaction +// with the core of the assembler. + +package arch + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/ppc64" +) + +func jumpPPC64(word string) bool { + switch word { + case "BC", "BCL", "BEQ", "BGE", "BGT", "BL", "BLE", "BLT", "BNE", "BR", "BVC", "BVS", "CALL", "JMP": + return true + } + return false +} + +// IsPPC64RLD reports whether the op (as defined by an ppc64.A* constant) is +// one of the RLD-like instructions that require special handling. +// The FMADD-like instructions behave similarly. +func IsPPC64RLD(op obj.As) bool { + switch op { + case ppc64.ARLDC, ppc64.ARLDCCC, ppc64.ARLDCL, ppc64.ARLDCLCC, + ppc64.ARLDCR, ppc64.ARLDCRCC, ppc64.ARLDMI, ppc64.ARLDMICC, + ppc64.ARLWMI, ppc64.ARLWMICC, ppc64.ARLWNM, ppc64.ARLWNMCC: + return true + case ppc64.AFMADD, ppc64.AFMADDCC, ppc64.AFMADDS, ppc64.AFMADDSCC, + ppc64.AFMSUB, ppc64.AFMSUBCC, ppc64.AFMSUBS, ppc64.AFMSUBSCC, + ppc64.AFNMADD, ppc64.AFNMADDCC, ppc64.AFNMADDS, ppc64.AFNMADDSCC, + ppc64.AFNMSUB, ppc64.AFNMSUBCC, ppc64.AFNMSUBS, ppc64.AFNMSUBSCC: + return true + } + return false +} + +func IsPPC64ISEL(op obj.As) bool { + return op == ppc64.AISEL +} + +// IsPPC64CMP reports whether the op (as defined by an ppc64.A* constant) is +// one of the CMP instructions that require special handling. +func IsPPC64CMP(op obj.As) bool { + switch op { + case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU, ppc64.AFCMPU: + return true + } + return false +} + +// IsPPC64NEG reports whether the op (as defined by an ppc64.A* constant) is +// one of the NEG-like instructions that require special handling. +func IsPPC64NEG(op obj.As) bool { + switch op { + case ppc64.AADDMECC, ppc64.AADDMEVCC, ppc64.AADDMEV, ppc64.AADDME, + ppc64.AADDZECC, ppc64.AADDZEVCC, ppc64.AADDZEV, ppc64.AADDZE, + ppc64.ACNTLZDCC, ppc64.ACNTLZD, ppc64.ACNTLZWCC, ppc64.ACNTLZW, + ppc64.AEXTSBCC, ppc64.AEXTSB, ppc64.AEXTSHCC, ppc64.AEXTSH, + ppc64.AEXTSWCC, ppc64.AEXTSW, ppc64.ANEGCC, ppc64.ANEGVCC, + ppc64.ANEGV, ppc64.ANEG, ppc64.ASLBMFEE, ppc64.ASLBMFEV, + ppc64.ASLBMTE, ppc64.ASUBMECC, ppc64.ASUBMEVCC, ppc64.ASUBMEV, + ppc64.ASUBME, ppc64.ASUBZECC, ppc64.ASUBZEVCC, ppc64.ASUBZEV, + ppc64.ASUBZE: + return true + } + return false +} + +func ppc64RegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "CR": + if 0 <= n && n <= 7 { + return ppc64.REG_CR0 + n, true + } + case "VS": + if 0 <= n && n <= 63 { + return ppc64.REG_VS0 + n, true + } + case "V": + if 0 <= n && n <= 31 { + return ppc64.REG_V0 + n, true + } + case "F": + if 0 <= n && n <= 31 { + return ppc64.REG_F0 + n, true + } + case "R": + if 0 <= n && n <= 31 { + return ppc64.REG_R0 + n, true + } + case "SPR": + if 0 <= n && n <= 1024 { + return ppc64.REG_SPR0 + n, true + } + } + return 0, false +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/riscv64.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/riscv64.go new file mode 100644 index 0000000..e4f1753 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/riscv64.go @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the RISCV64 +// instruction set, to minimize its interaction with the core of the +// assembler. + +package arch + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/obj/riscv" +) + +// IsRISCV64AMO reports whether the op (as defined by a riscv.A* +// constant) is one of the AMO instructions that requires special +// handling. +func IsRISCV64AMO(op obj.As) bool { + switch op { + case riscv.ASCW, riscv.ASCD, riscv.AAMOSWAPW, riscv.AAMOSWAPD, riscv.AAMOADDW, riscv.AAMOADDD, + riscv.AAMOANDW, riscv.AAMOANDD, riscv.AAMOORW, riscv.AAMOORD, riscv.AAMOXORW, riscv.AAMOXORD, + riscv.AAMOMINW, riscv.AAMOMIND, riscv.AAMOMINUW, riscv.AAMOMINUD, + riscv.AAMOMAXW, riscv.AAMOMAXD, riscv.AAMOMAXUW, riscv.AAMOMAXUD: + return true + } + return false +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/s390x.go b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/s390x.go new file mode 100644 index 0000000..1e33f1e --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/asm/arch/s390x.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file encapsulates some of the odd characteristics of the +// s390x instruction set, to minimize its interaction +// with the core of the assembler. + +package arch + +import ( + "github.com/twitchyliquid64/golang-asm/obj/s390x" +) + +func jumpS390x(word string) bool { + switch word { + case "BRC", + "BC", + "BCL", + "BEQ", + "BGE", + "BGT", + "BL", + "BLE", + "BLEU", + "BLT", + "BLTU", + "BNE", + "BR", + "BVC", + "BVS", + "BRCT", + "BRCTG", + "CMPBEQ", + "CMPBGE", + "CMPBGT", + "CMPBLE", + "CMPBLT", + "CMPBNE", + "CMPUBEQ", + "CMPUBGE", + "CMPUBGT", + "CMPUBLE", + "CMPUBLT", + "CMPUBNE", + "CRJ", + "CGRJ", + "CLRJ", + "CLGRJ", + "CIJ", + "CGIJ", + "CLIJ", + "CLGIJ", + "CALL", + "JMP": + return true + } + return false +} + +func s390xRegisterNumber(name string, n int16) (int16, bool) { + switch name { + case "AR": + if 0 <= n && n <= 15 { + return s390x.REG_AR0 + n, true + } + case "F": + if 0 <= n && n <= 15 { + return s390x.REG_F0 + n, true + } + case "R": + if 0 <= n && n <= 15 { + return s390x.REG_R0 + n, true + } + case "V": + if 0 <= n && n <= 31 { + return s390x.REG_V0 + n, true + } + } + return 0, false +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/bio/buf.go b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf.go new file mode 100644 index 0000000..c4c2514 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf.go @@ -0,0 +1,148 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bio implements common I/O abstractions used within the Go toolchain. +package bio + +import ( + "bufio" + "io" + "log" + "os" +) + +// Reader implements a seekable buffered io.Reader. +type Reader struct { + f *os.File + *bufio.Reader +} + +// Writer implements a seekable buffered io.Writer. +type Writer struct { + f *os.File + *bufio.Writer +} + +// Create creates the file named name and returns a Writer +// for that file. +func Create(name string) (*Writer, error) { + f, err := os.Create(name) + if err != nil { + return nil, err + } + return &Writer{f: f, Writer: bufio.NewWriter(f)}, nil +} + +// Open returns a Reader for the file named name. +func Open(name string) (*Reader, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + return NewReader(f), nil +} + +// NewReader returns a Reader from an open file. +func NewReader(f *os.File) *Reader { + return &Reader{f: f, Reader: bufio.NewReader(f)} +} + +func (r *Reader) MustSeek(offset int64, whence int) int64 { + if whence == 1 { + offset -= int64(r.Buffered()) + } + off, err := r.f.Seek(offset, whence) + if err != nil { + log.Fatalf("seeking in output: %v", err) + } + r.Reset(r.f) + return off +} + +func (w *Writer) MustSeek(offset int64, whence int) int64 { + if err := w.Flush(); err != nil { + log.Fatalf("writing output: %v", err) + } + off, err := w.f.Seek(offset, whence) + if err != nil { + log.Fatalf("seeking in output: %v", err) + } + return off +} + +func (r *Reader) Offset() int64 { + off, err := r.f.Seek(0, 1) + if err != nil { + log.Fatalf("seeking in output [0, 1]: %v", err) + } + off -= int64(r.Buffered()) + return off +} + +func (w *Writer) Offset() int64 { + if err := w.Flush(); err != nil { + log.Fatalf("writing output: %v", err) + } + off, err := w.f.Seek(0, 1) + if err != nil { + log.Fatalf("seeking in output [0, 1]: %v", err) + } + return off +} + +func (r *Reader) Close() error { + return r.f.Close() +} + +func (w *Writer) Close() error { + err := w.Flush() + err1 := w.f.Close() + if err == nil { + err = err1 + } + return err +} + +func (r *Reader) File() *os.File { + return r.f +} + +func (w *Writer) File() *os.File { + return w.f +} + +// Slice reads the next length bytes of r into a slice. +// +// This slice may be backed by mmap'ed memory. Currently, this memory +// will never be unmapped. The second result reports whether the +// backing memory is read-only. +func (r *Reader) Slice(length uint64) ([]byte, bool, error) { + if length == 0 { + return []byte{}, false, nil + } + + data, ok := r.sliceOS(length) + if ok { + return data, true, nil + } + + data = make([]byte, length) + _, err := io.ReadFull(r, data) + if err != nil { + return nil, false, err + } + return data, false, nil +} + +// SliceRO returns a slice containing the next length bytes of r +// backed by a read-only mmap'd data. If the mmap cannot be +// established (limit exceeded, region too small, etc) a nil slice +// will be returned. If mmap succeeds, it will never be unmapped. +func (r *Reader) SliceRO(length uint64) []byte { + data, ok := r.sliceOS(length) + if ok { + return data + } + return nil +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go new file mode 100644 index 0000000..4b43d74 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_mmap.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package bio + +import ( + "runtime" + "sync/atomic" + "syscall" +) + +// mmapLimit is the maximum number of mmaped regions to create before +// falling back to reading into a heap-allocated slice. This exists +// because some operating systems place a limit on the number of +// distinct mapped regions per process. As of this writing: +// +// Darwin unlimited +// DragonFly 1000000 (vm.max_proc_mmap) +// FreeBSD unlimited +// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count? +// NetBSD unlimited +// OpenBSD unlimited +var mmapLimit int32 = 1<<31 - 1 + +func init() { + // Linux is the only practically concerning OS. + if runtime.GOOS == "linux" { + mmapLimit = 30000 + } +} + +func (r *Reader) sliceOS(length uint64) ([]byte, bool) { + // For small slices, don't bother with the overhead of a + // mapping, especially since we have no way to unmap it. + const threshold = 16 << 10 + if length < threshold { + return nil, false + } + + // Have we reached the mmap limit? + if atomic.AddInt32(&mmapLimit, -1) < 0 { + atomic.AddInt32(&mmapLimit, 1) + return nil, false + } + + // Page-align the offset. + off := r.Offset() + align := syscall.Getpagesize() + aoff := off &^ int64(align-1) + + data, err := syscall.Mmap(int(r.f.Fd()), aoff, int(length+uint64(off-aoff)), syscall.PROT_READ, syscall.MAP_SHARED|syscall.MAP_FILE) + if err != nil { + return nil, false + } + + data = data[off-aoff:] + r.MustSeek(int64(length), 1) + return data, true +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go new file mode 100644 index 0000000..f43c67a --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/bio/buf_nommap.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd + +package bio + +func (r *Reader) sliceOS(length uint64) ([]byte, bool) { + return nil, false +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/bio/must.go b/vendor/github.com/twitchyliquid64/golang-asm/bio/must.go new file mode 100644 index 0000000..3604b29 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/bio/must.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bio + +import ( + "io" + "log" +) + +// MustClose closes Closer c and calls log.Fatal if it returns a non-nil error. +func MustClose(c io.Closer) { + if err := c.Close(); err != nil { + log.Fatal(err) + } +} + +// MustWriter returns a Writer that wraps the provided Writer, +// except that it calls log.Fatal instead of returning a non-nil error. +func MustWriter(w io.Writer) io.Writer { + return mustWriter{w} +} + +type mustWriter struct { + w io.Writer +} + +func (w mustWriter) Write(b []byte) (int, error) { + n, err := w.w.Write(b) + if err != nil { + log.Fatal(err) + } + return n, nil +} + +func (w mustWriter) WriteString(s string) (int, error) { + n, err := io.WriteString(w.w, s) + if err != nil { + log.Fatal(err) + } + return n, nil +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go b/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go new file mode 100644 index 0000000..2fee79d --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf.go @@ -0,0 +1,1650 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dwarf generates DWARF debugging information. +// DWARF generation is split between the compiler and the linker, +// this package contains the shared code. +package dwarf + +import ( + "bytes" + "github.com/twitchyliquid64/golang-asm/objabi" + "errors" + "fmt" + "os/exec" + "sort" + "strconv" + "strings" +) + +// InfoPrefix is the prefix for all the symbols containing DWARF info entries. +const InfoPrefix = "go.info." + +// ConstInfoPrefix is the prefix for all symbols containing DWARF info +// entries that contain constants. +const ConstInfoPrefix = "go.constinfo." + +// CUInfoPrefix is the prefix for symbols containing information to +// populate the DWARF compilation unit info entries. +const CUInfoPrefix = "go.cuinfo." + +// Used to form the symbol name assigned to the DWARF 'abstract subprogram" +// info entry for a function +const AbstractFuncSuffix = "$abstract" + +// Controls logging/debugging for selected aspects of DWARF subprogram +// generation (functions, scopes). +var logDwarf bool + +// Sym represents a symbol. +type Sym interface { + Length(dwarfContext interface{}) int64 +} + +// A Var represents a local variable or a function parameter. +type Var struct { + Name string + Abbrev int // Either DW_ABRV_AUTO[_LOCLIST] or DW_ABRV_PARAM[_LOCLIST] + IsReturnValue bool + IsInlFormal bool + StackOffset int32 + // This package can't use the ssa package, so it can't mention ssa.FuncDebug, + // so indirect through a closure. + PutLocationList func(listSym, startPC Sym) + Scope int32 + Type Sym + DeclFile string + DeclLine uint + DeclCol uint + InlIndex int32 // subtract 1 to form real index into InlTree + ChildIndex int32 // child DIE index in abstract function + IsInAbstract bool // variable exists in abstract function +} + +// A Scope represents a lexical scope. All variables declared within a +// scope will only be visible to instructions covered by the scope. +// Lexical scopes are contiguous in source files but can end up being +// compiled to discontiguous blocks of instructions in the executable. +// The Ranges field lists all the blocks of instructions that belong +// in this scope. +type Scope struct { + Parent int32 + Ranges []Range + Vars []*Var +} + +// A Range represents a half-open interval [Start, End). +type Range struct { + Start, End int64 +} + +// This container is used by the PutFunc* variants below when +// creating the DWARF subprogram DIE(s) for a function. +type FnState struct { + Name string + Importpath string + Info Sym + Filesym Sym + Loc Sym + Ranges Sym + Absfn Sym + StartPC Sym + Size int64 + External bool + Scopes []Scope + InlCalls InlCalls + UseBASEntries bool +} + +func EnableLogging(doit bool) { + logDwarf = doit +} + +// UnifyRanges merges the list of ranges of c into the list of ranges of s +func (s *Scope) UnifyRanges(c *Scope) { + out := make([]Range, 0, len(s.Ranges)+len(c.Ranges)) + + i, j := 0, 0 + for { + var cur Range + if i < len(s.Ranges) && j < len(c.Ranges) { + if s.Ranges[i].Start < c.Ranges[j].Start { + cur = s.Ranges[i] + i++ + } else { + cur = c.Ranges[j] + j++ + } + } else if i < len(s.Ranges) { + cur = s.Ranges[i] + i++ + } else if j < len(c.Ranges) { + cur = c.Ranges[j] + j++ + } else { + break + } + + if n := len(out); n > 0 && cur.Start <= out[n-1].End { + out[n-1].End = cur.End + } else { + out = append(out, cur) + } + } + + s.Ranges = out +} + +// AppendRange adds r to s, if r is non-empty. +// If possible, it extends the last Range in s.Ranges; if not, it creates a new one. +func (s *Scope) AppendRange(r Range) { + if r.End <= r.Start { + return + } + i := len(s.Ranges) + if i > 0 && s.Ranges[i-1].End == r.Start { + s.Ranges[i-1].End = r.End + return + } + s.Ranges = append(s.Ranges, r) +} + +type InlCalls struct { + Calls []InlCall +} + +type InlCall struct { + // index into ctx.InlTree describing the call inlined here + InlIndex int + + // Symbol of file containing inlined call site (really *obj.LSym). + CallFile Sym + + // Line number of inlined call site. + CallLine uint32 + + // Dwarf abstract subroutine symbol (really *obj.LSym). + AbsFunSym Sym + + // Indices of child inlines within Calls array above. + Children []int + + // entries in this list are PAUTO's created by the inliner to + // capture the promoted formals and locals of the inlined callee. + InlVars []*Var + + // PC ranges for this inlined call. + Ranges []Range + + // Root call (not a child of some other call). + Root bool +} + +// A Context specifies how to add data to a Sym. +type Context interface { + PtrSize() int + AddInt(s Sym, size int, i int64) + AddBytes(s Sym, b []byte) + AddAddress(s Sym, t interface{}, ofs int64) + AddCURelativeAddress(s Sym, t interface{}, ofs int64) + AddSectionOffset(s Sym, size int, t interface{}, ofs int64) + AddDWARFAddrSectionOffset(s Sym, t interface{}, ofs int64) + CurrentOffset(s Sym) int64 + RecordDclReference(from Sym, to Sym, dclIdx int, inlIndex int) + RecordChildDieOffsets(s Sym, vars []*Var, offsets []int32) + AddString(s Sym, v string) + AddFileRef(s Sym, f interface{}) + Logf(format string, args ...interface{}) +} + +// AppendUleb128 appends v to b using DWARF's unsigned LEB128 encoding. +func AppendUleb128(b []byte, v uint64) []byte { + for { + c := uint8(v & 0x7f) + v >>= 7 + if v != 0 { + c |= 0x80 + } + b = append(b, c) + if c&0x80 == 0 { + break + } + } + return b +} + +// AppendSleb128 appends v to b using DWARF's signed LEB128 encoding. +func AppendSleb128(b []byte, v int64) []byte { + for { + c := uint8(v & 0x7f) + s := uint8(v & 0x40) + v >>= 7 + if (v != -1 || s == 0) && (v != 0 || s != 0) { + c |= 0x80 + } + b = append(b, c) + if c&0x80 == 0 { + break + } + } + return b +} + +// sevenbits contains all unsigned seven bit numbers, indexed by their value. +var sevenbits = [...]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, +} + +// sevenBitU returns the unsigned LEB128 encoding of v if v is seven bits and nil otherwise. +// The contents of the returned slice must not be modified. +func sevenBitU(v int64) []byte { + if uint64(v) < uint64(len(sevenbits)) { + return sevenbits[v : v+1] + } + return nil +} + +// sevenBitS returns the signed LEB128 encoding of v if v is seven bits and nil otherwise. +// The contents of the returned slice must not be modified. +func sevenBitS(v int64) []byte { + if uint64(v) <= 63 { + return sevenbits[v : v+1] + } + if uint64(-v) <= 64 { + return sevenbits[128+v : 128+v+1] + } + return nil +} + +// Uleb128put appends v to s using DWARF's unsigned LEB128 encoding. +func Uleb128put(ctxt Context, s Sym, v int64) { + b := sevenBitU(v) + if b == nil { + var encbuf [20]byte + b = AppendUleb128(encbuf[:0], uint64(v)) + } + ctxt.AddBytes(s, b) +} + +// Sleb128put appends v to s using DWARF's signed LEB128 encoding. +func Sleb128put(ctxt Context, s Sym, v int64) { + b := sevenBitS(v) + if b == nil { + var encbuf [20]byte + b = AppendSleb128(encbuf[:0], v) + } + ctxt.AddBytes(s, b) +} + +/* + * Defining Abbrevs. This is hardcoded on a per-platform basis (that is, + * each platform will see a fixed abbrev table for all objects); the number + * of abbrev entries is fairly small (compared to C++ objects). The DWARF + * spec places no restriction on the ordering of attributes in the + * Abbrevs and DIEs, and we will always write them out in the order + * of declaration in the abbrev. + */ +type dwAttrForm struct { + attr uint16 + form uint8 +} + +// Go-specific type attributes. +const ( + DW_AT_go_kind = 0x2900 + DW_AT_go_key = 0x2901 + DW_AT_go_elem = 0x2902 + // Attribute for DW_TAG_member of a struct type. + // Nonzero value indicates the struct field is an embedded field. + DW_AT_go_embedded_field = 0x2903 + DW_AT_go_runtime_type = 0x2904 + + DW_AT_go_package_name = 0x2905 // Attribute for DW_TAG_compile_unit + + DW_AT_internal_location = 253 // params and locals; not emitted +) + +// Index into the abbrevs table below. +// Keep in sync with ispubname() and ispubtype() in ld/dwarf.go. +// ispubtype considers >= NULLTYPE public +const ( + DW_ABRV_NULL = iota + DW_ABRV_COMPUNIT + DW_ABRV_COMPUNIT_TEXTLESS + DW_ABRV_FUNCTION + DW_ABRV_FUNCTION_ABSTRACT + DW_ABRV_FUNCTION_CONCRETE + DW_ABRV_INLINED_SUBROUTINE + DW_ABRV_INLINED_SUBROUTINE_RANGES + DW_ABRV_VARIABLE + DW_ABRV_INT_CONSTANT + DW_ABRV_AUTO + DW_ABRV_AUTO_LOCLIST + DW_ABRV_AUTO_ABSTRACT + DW_ABRV_AUTO_CONCRETE + DW_ABRV_AUTO_CONCRETE_LOCLIST + DW_ABRV_PARAM + DW_ABRV_PARAM_LOCLIST + DW_ABRV_PARAM_ABSTRACT + DW_ABRV_PARAM_CONCRETE + DW_ABRV_PARAM_CONCRETE_LOCLIST + DW_ABRV_LEXICAL_BLOCK_RANGES + DW_ABRV_LEXICAL_BLOCK_SIMPLE + DW_ABRV_STRUCTFIELD + DW_ABRV_FUNCTYPEPARAM + DW_ABRV_DOTDOTDOT + DW_ABRV_ARRAYRANGE + DW_ABRV_NULLTYPE + DW_ABRV_BASETYPE + DW_ABRV_ARRAYTYPE + DW_ABRV_CHANTYPE + DW_ABRV_FUNCTYPE + DW_ABRV_IFACETYPE + DW_ABRV_MAPTYPE + DW_ABRV_PTRTYPE + DW_ABRV_BARE_PTRTYPE // only for void*, no DW_AT_type attr to please gdb 6. + DW_ABRV_SLICETYPE + DW_ABRV_STRINGTYPE + DW_ABRV_STRUCTTYPE + DW_ABRV_TYPEDECL + DW_NABRV +) + +type dwAbbrev struct { + tag uint8 + children uint8 + attr []dwAttrForm +} + +var abbrevsFinalized bool + +// expandPseudoForm takes an input DW_FORM_xxx value and translates it +// into a platform-appropriate concrete form. Existing concrete/real +// DW_FORM values are left untouched. For the moment the only +// pseudo-form is DW_FORM_udata_pseudo, which gets expanded to +// DW_FORM_data4 on Darwin and DW_FORM_udata everywhere else. See +// issue #31459 for more context. +func expandPseudoForm(form uint8) uint8 { + // Is this a pseudo-form? + if form != DW_FORM_udata_pseudo { + return form + } + expandedForm := DW_FORM_udata + if objabi.GOOS == "darwin" { + expandedForm = DW_FORM_data4 + } + return uint8(expandedForm) +} + +// Abbrevs() returns the finalized abbrev array for the platform, +// expanding any DW_FORM pseudo-ops to real values. +func Abbrevs() []dwAbbrev { + if abbrevsFinalized { + return abbrevs[:] + } + for i := 1; i < DW_NABRV; i++ { + for j := 0; j < len(abbrevs[i].attr); j++ { + abbrevs[i].attr[j].form = expandPseudoForm(abbrevs[i].attr[j].form) + } + } + abbrevsFinalized = true + return abbrevs[:] +} + +// abbrevs is a raw table of abbrev entries; it needs to be post-processed +// by the Abbrevs() function above prior to being consumed, to expand +// the 'pseudo-form' entries below to real DWARF form values. + +var abbrevs = [DW_NABRV]dwAbbrev{ + /* The mandatory DW_ABRV_NULL entry. */ + {0, 0, []dwAttrForm{}}, + + /* COMPUNIT */ + { + DW_TAG_compile_unit, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_language, DW_FORM_data1}, + {DW_AT_stmt_list, DW_FORM_sec_offset}, + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_ranges, DW_FORM_sec_offset}, + {DW_AT_comp_dir, DW_FORM_string}, + {DW_AT_producer, DW_FORM_string}, + {DW_AT_go_package_name, DW_FORM_string}, + }, + }, + + /* COMPUNIT_TEXTLESS */ + { + DW_TAG_compile_unit, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_language, DW_FORM_data1}, + {DW_AT_comp_dir, DW_FORM_string}, + {DW_AT_producer, DW_FORM_string}, + {DW_AT_go_package_name, DW_FORM_string}, + }, + }, + + /* FUNCTION */ + { + DW_TAG_subprogram, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_high_pc, DW_FORM_addr}, + {DW_AT_frame_base, DW_FORM_block1}, + {DW_AT_decl_file, DW_FORM_data4}, + {DW_AT_external, DW_FORM_flag}, + }, + }, + + /* FUNCTION_ABSTRACT */ + { + DW_TAG_subprogram, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_inline, DW_FORM_data1}, + {DW_AT_external, DW_FORM_flag}, + }, + }, + + /* FUNCTION_CONCRETE */ + { + DW_TAG_subprogram, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_high_pc, DW_FORM_addr}, + {DW_AT_frame_base, DW_FORM_block1}, + }, + }, + + /* INLINED_SUBROUTINE */ + { + DW_TAG_inlined_subroutine, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_high_pc, DW_FORM_addr}, + {DW_AT_call_file, DW_FORM_data4}, + {DW_AT_call_line, DW_FORM_udata_pseudo}, // pseudo-form + }, + }, + + /* INLINED_SUBROUTINE_RANGES */ + { + DW_TAG_inlined_subroutine, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_ranges, DW_FORM_sec_offset}, + {DW_AT_call_file, DW_FORM_data4}, + {DW_AT_call_line, DW_FORM_udata_pseudo}, // pseudo-form + }, + }, + + /* VARIABLE */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_location, DW_FORM_block1}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_external, DW_FORM_flag}, + }, + }, + + /* INT CONSTANT */ + { + DW_TAG_constant, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_const_value, DW_FORM_sdata}, + }, + }, + + /* AUTO */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* AUTO_LOCLIST */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + + /* AUTO_ABSTRACT */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + }, + }, + + /* AUTO_CONCRETE */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* AUTO_CONCRETE_LOCLIST */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + + /* PARAM */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* PARAM_LOCLIST */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + + /* PARAM_ABSTRACT */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_type, DW_FORM_ref_addr}, + }, + }, + + /* PARAM_CONCRETE */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* PARAM_CONCRETE_LOCLIST */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + + /* LEXICAL_BLOCK_RANGES */ + { + DW_TAG_lexical_block, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_ranges, DW_FORM_sec_offset}, + }, + }, + + /* LEXICAL_BLOCK_SIMPLE */ + { + DW_TAG_lexical_block, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_high_pc, DW_FORM_addr}, + }, + }, + + /* STRUCTFIELD */ + { + DW_TAG_member, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_data_member_location, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_embedded_field, DW_FORM_flag}, + }, + }, + + /* FUNCTYPEPARAM */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + + // No name! + []dwAttrForm{ + {DW_AT_type, DW_FORM_ref_addr}, + }, + }, + + /* DOTDOTDOT */ + { + DW_TAG_unspecified_parameters, + DW_CHILDREN_no, + []dwAttrForm{}, + }, + + /* ARRAYRANGE */ + { + DW_TAG_subrange_type, + DW_CHILDREN_no, + + // No name! + []dwAttrForm{ + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_count, DW_FORM_udata}, + }, + }, + + // Below here are the types considered public by ispubtype + /* NULLTYPE */ + { + DW_TAG_unspecified_type, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + }, + }, + + /* BASETYPE */ + { + DW_TAG_base_type, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_encoding, DW_FORM_data1}, + {DW_AT_byte_size, DW_FORM_data1}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + }, + }, + + /* ARRAYTYPE */ + // child is subrange with upper bound + { + DW_TAG_array_type, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_byte_size, DW_FORM_udata}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + }, + }, + + /* CHANTYPE */ + { + DW_TAG_typedef, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + {DW_AT_go_elem, DW_FORM_ref_addr}, + }, + }, + + /* FUNCTYPE */ + { + DW_TAG_subroutine_type, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_byte_size, DW_FORM_udata}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + }, + }, + + /* IFACETYPE */ + { + DW_TAG_typedef, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + }, + }, + + /* MAPTYPE */ + { + DW_TAG_typedef, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + {DW_AT_go_key, DW_FORM_ref_addr}, + {DW_AT_go_elem, DW_FORM_ref_addr}, + }, + }, + + /* PTRTYPE */ + { + DW_TAG_pointer_type, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + }, + }, + + /* BARE_PTRTYPE */ + { + DW_TAG_pointer_type, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + }, + }, + + /* SLICETYPE */ + { + DW_TAG_structure_type, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_byte_size, DW_FORM_udata}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + {DW_AT_go_elem, DW_FORM_ref_addr}, + }, + }, + + /* STRINGTYPE */ + { + DW_TAG_structure_type, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_byte_size, DW_FORM_udata}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + }, + }, + + /* STRUCTTYPE */ + { + DW_TAG_structure_type, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_byte_size, DW_FORM_udata}, + {DW_AT_go_kind, DW_FORM_data1}, + {DW_AT_go_runtime_type, DW_FORM_addr}, + }, + }, + + /* TYPEDECL */ + { + DW_TAG_typedef, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + }, + }, +} + +// GetAbbrev returns the contents of the .debug_abbrev section. +func GetAbbrev() []byte { + abbrevs := Abbrevs() + var buf []byte + for i := 1; i < DW_NABRV; i++ { + // See section 7.5.3 + buf = AppendUleb128(buf, uint64(i)) + buf = AppendUleb128(buf, uint64(abbrevs[i].tag)) + buf = append(buf, abbrevs[i].children) + for _, f := range abbrevs[i].attr { + buf = AppendUleb128(buf, uint64(f.attr)) + buf = AppendUleb128(buf, uint64(f.form)) + } + buf = append(buf, 0, 0) + } + return append(buf, 0) +} + +/* + * Debugging Information Entries and their attributes. + */ + +// DWAttr represents an attribute of a DWDie. +// +// For DW_CLS_string and _block, value should contain the length, and +// data the data, for _reference, value is 0 and data is a DWDie* to +// the referenced instance, for all others, value is the whole thing +// and data is null. +type DWAttr struct { + Link *DWAttr + Atr uint16 // DW_AT_ + Cls uint8 // DW_CLS_ + Value int64 + Data interface{} +} + +// DWDie represents a DWARF debug info entry. +type DWDie struct { + Abbrev int + Link *DWDie + Child *DWDie + Attr *DWAttr + Sym Sym +} + +func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, data interface{}) error { + switch form { + case DW_FORM_addr: // address + // Allow nil addresses for DW_AT_go_runtime_type. + if data == nil && value == 0 { + ctxt.AddInt(s, ctxt.PtrSize(), 0) + break + } + if cls == DW_CLS_GO_TYPEREF { + ctxt.AddSectionOffset(s, ctxt.PtrSize(), data, value) + break + } + ctxt.AddAddress(s, data, value) + + case DW_FORM_block1: // block + if cls == DW_CLS_ADDRESS { + ctxt.AddInt(s, 1, int64(1+ctxt.PtrSize())) + ctxt.AddInt(s, 1, DW_OP_addr) + ctxt.AddAddress(s, data, 0) + break + } + + value &= 0xff + ctxt.AddInt(s, 1, value) + p := data.([]byte)[:value] + ctxt.AddBytes(s, p) + + case DW_FORM_block2: // block + value &= 0xffff + + ctxt.AddInt(s, 2, value) + p := data.([]byte)[:value] + ctxt.AddBytes(s, p) + + case DW_FORM_block4: // block + value &= 0xffffffff + + ctxt.AddInt(s, 4, value) + p := data.([]byte)[:value] + ctxt.AddBytes(s, p) + + case DW_FORM_block: // block + Uleb128put(ctxt, s, value) + + p := data.([]byte)[:value] + ctxt.AddBytes(s, p) + + case DW_FORM_data1: // constant + ctxt.AddInt(s, 1, value) + + case DW_FORM_data2: // constant + ctxt.AddInt(s, 2, value) + + case DW_FORM_data4: // constant, {line,loclist,mac,rangelist}ptr + if cls == DW_CLS_PTR { // DW_AT_stmt_list and DW_AT_ranges + ctxt.AddDWARFAddrSectionOffset(s, data, value) + break + } + ctxt.AddInt(s, 4, value) + + case DW_FORM_data8: // constant, {line,loclist,mac,rangelist}ptr + ctxt.AddInt(s, 8, value) + + case DW_FORM_sdata: // constant + Sleb128put(ctxt, s, value) + + case DW_FORM_udata: // constant + Uleb128put(ctxt, s, value) + + case DW_FORM_string: // string + str := data.(string) + ctxt.AddString(s, str) + // TODO(ribrdb): verify padded strings are never used and remove this + for i := int64(len(str)); i < value; i++ { + ctxt.AddInt(s, 1, 0) + } + + case DW_FORM_flag: // flag + if value != 0 { + ctxt.AddInt(s, 1, 1) + } else { + ctxt.AddInt(s, 1, 0) + } + + // As of DWARF 3 the ref_addr is always 32 bits, unless emitting a large + // (> 4 GB of debug info aka "64-bit") unit, which we don't implement. + case DW_FORM_ref_addr: // reference to a DIE in the .info section + fallthrough + case DW_FORM_sec_offset: // offset into a DWARF section other than .info + if data == nil { + return fmt.Errorf("dwarf: null reference in %d", abbrev) + } + ctxt.AddDWARFAddrSectionOffset(s, data, value) + + case DW_FORM_ref1, // reference within the compilation unit + DW_FORM_ref2, // reference + DW_FORM_ref4, // reference + DW_FORM_ref8, // reference + DW_FORM_ref_udata, // reference + + DW_FORM_strp, // string + DW_FORM_indirect: // (see Section 7.5.3) + fallthrough + default: + return fmt.Errorf("dwarf: unsupported attribute form %d / class %d", form, cls) + } + return nil +} + +// PutAttrs writes the attributes for a DIE to symbol 's'. +// +// Note that we can (and do) add arbitrary attributes to a DIE, but +// only the ones actually listed in the Abbrev will be written out. +func PutAttrs(ctxt Context, s Sym, abbrev int, attr *DWAttr) { + abbrevs := Abbrevs() +Outer: + for _, f := range abbrevs[abbrev].attr { + for ap := attr; ap != nil; ap = ap.Link { + if ap.Atr == f.attr { + putattr(ctxt, s, abbrev, int(f.form), int(ap.Cls), ap.Value, ap.Data) + continue Outer + } + } + + putattr(ctxt, s, abbrev, int(f.form), 0, 0, nil) + } +} + +// HasChildren reports whether 'die' uses an abbrev that supports children. +func HasChildren(die *DWDie) bool { + abbrevs := Abbrevs() + return abbrevs[die.Abbrev].children != 0 +} + +// PutIntConst writes a DIE for an integer constant +func PutIntConst(ctxt Context, info, typ Sym, name string, val int64) { + Uleb128put(ctxt, info, DW_ABRV_INT_CONSTANT) + putattr(ctxt, info, DW_ABRV_INT_CONSTANT, DW_FORM_string, DW_CLS_STRING, int64(len(name)), name) + putattr(ctxt, info, DW_ABRV_INT_CONSTANT, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, typ) + putattr(ctxt, info, DW_ABRV_INT_CONSTANT, DW_FORM_sdata, DW_CLS_CONSTANT, val, nil) +} + +// PutBasedRanges writes a range table to sym. All addresses in ranges are +// relative to some base address, which must be arranged by the caller +// (e.g., with a DW_AT_low_pc attribute, or in a BASE-prefixed range). +func PutBasedRanges(ctxt Context, sym Sym, ranges []Range) { + ps := ctxt.PtrSize() + // Write ranges. + for _, r := range ranges { + ctxt.AddInt(sym, ps, r.Start) + ctxt.AddInt(sym, ps, r.End) + } + // Write trailer. + ctxt.AddInt(sym, ps, 0) + ctxt.AddInt(sym, ps, 0) +} + +// PutRanges writes a range table to s.Ranges. +// All addresses in ranges are relative to s.base. +func (s *FnState) PutRanges(ctxt Context, ranges []Range) { + ps := ctxt.PtrSize() + sym, base := s.Ranges, s.StartPC + + if s.UseBASEntries { + // Using a Base Address Selection Entry reduces the number of relocations, but + // this is not done on macOS because it is not supported by dsymutil/dwarfdump/lldb + ctxt.AddInt(sym, ps, -1) + ctxt.AddAddress(sym, base, 0) + PutBasedRanges(ctxt, sym, ranges) + return + } + + // Write ranges full of relocations + for _, r := range ranges { + ctxt.AddCURelativeAddress(sym, base, r.Start) + ctxt.AddCURelativeAddress(sym, base, r.End) + } + // Write trailer. + ctxt.AddInt(sym, ps, 0) + ctxt.AddInt(sym, ps, 0) +} + +// Return TRUE if the inlined call in the specified slot is empty, +// meaning it has a zero-length range (no instructions), and all +// of its children are empty. +func isEmptyInlinedCall(slot int, calls *InlCalls) bool { + ic := &calls.Calls[slot] + if ic.InlIndex == -2 { + return true + } + live := false + for _, k := range ic.Children { + if !isEmptyInlinedCall(k, calls) { + live = true + } + } + if len(ic.Ranges) > 0 { + live = true + } + if !live { + ic.InlIndex = -2 + } + return !live +} + +// Slot -1: return top-level inlines +// Slot >= 0: return children of that slot +func inlChildren(slot int, calls *InlCalls) []int { + var kids []int + if slot != -1 { + for _, k := range calls.Calls[slot].Children { + if !isEmptyInlinedCall(k, calls) { + kids = append(kids, k) + } + } + } else { + for k := 0; k < len(calls.Calls); k += 1 { + if calls.Calls[k].Root && !isEmptyInlinedCall(k, calls) { + kids = append(kids, k) + } + } + } + return kids +} + +func inlinedVarTable(inlcalls *InlCalls) map[*Var]bool { + vars := make(map[*Var]bool) + for _, ic := range inlcalls.Calls { + for _, v := range ic.InlVars { + vars[v] = true + } + } + return vars +} + +// The s.Scopes slice contains variables were originally part of the +// function being emitted, as well as variables that were imported +// from various callee functions during the inlining process. This +// function prunes out any variables from the latter category (since +// they will be emitted as part of DWARF inlined_subroutine DIEs) and +// then generates scopes for vars in the former category. +func putPrunedScopes(ctxt Context, s *FnState, fnabbrev int) error { + if len(s.Scopes) == 0 { + return nil + } + scopes := make([]Scope, len(s.Scopes), len(s.Scopes)) + pvars := inlinedVarTable(&s.InlCalls) + for k, s := range s.Scopes { + var pruned Scope = Scope{Parent: s.Parent, Ranges: s.Ranges} + for i := 0; i < len(s.Vars); i++ { + _, found := pvars[s.Vars[i]] + if !found { + pruned.Vars = append(pruned.Vars, s.Vars[i]) + } + } + sort.Sort(byChildIndex(pruned.Vars)) + scopes[k] = pruned + } + var encbuf [20]byte + if putscope(ctxt, s, scopes, 0, fnabbrev, encbuf[:0]) < int32(len(scopes)) { + return errors.New("multiple toplevel scopes") + } + return nil +} + +// Emit DWARF attributes and child DIEs for an 'abstract' subprogram. +// The abstract subprogram DIE for a function contains its +// location-independent attributes (name, type, etc). Other instances +// of the function (any inlined copy of it, or the single out-of-line +// 'concrete' instance) will contain a pointer back to this abstract +// DIE (as a space-saving measure, so that name/type etc doesn't have +// to be repeated for each inlined copy). +func PutAbstractFunc(ctxt Context, s *FnState) error { + + if logDwarf { + ctxt.Logf("PutAbstractFunc(%v)\n", s.Absfn) + } + + abbrev := DW_ABRV_FUNCTION_ABSTRACT + Uleb128put(ctxt, s.Absfn, int64(abbrev)) + + fullname := s.Name + if strings.HasPrefix(s.Name, "\"\".") { + // Generate a fully qualified name for the function in the + // abstract case. This is so as to avoid the need for the + // linker to process the DIE with patchDWARFName(); we can't + // allow the name attribute of an abstract subprogram DIE to + // be rewritten, since it would change the offsets of the + // child DIEs (which we're relying on in order for abstract + // origin references to work). + fullname = objabi.PathToPrefix(s.Importpath) + "." + s.Name[3:] + } + putattr(ctxt, s.Absfn, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(fullname)), fullname) + + // DW_AT_inlined value + putattr(ctxt, s.Absfn, abbrev, DW_FORM_data1, DW_CLS_CONSTANT, int64(DW_INL_inlined), nil) + + var ev int64 + if s.External { + ev = 1 + } + putattr(ctxt, s.Absfn, abbrev, DW_FORM_flag, DW_CLS_FLAG, ev, 0) + + // Child variables (may be empty) + var flattened []*Var + + // This slice will hold the offset in bytes for each child var DIE + // with respect to the start of the parent subprogram DIE. + var offsets []int32 + + // Scopes/vars + if len(s.Scopes) > 0 { + // For abstract subprogram DIEs we want to flatten out scope info: + // lexical scope DIEs contain range and/or hi/lo PC attributes, + // which we explicitly don't want for the abstract subprogram DIE. + pvars := inlinedVarTable(&s.InlCalls) + for _, scope := range s.Scopes { + for i := 0; i < len(scope.Vars); i++ { + _, found := pvars[scope.Vars[i]] + if found || !scope.Vars[i].IsInAbstract { + continue + } + flattened = append(flattened, scope.Vars[i]) + } + } + if len(flattened) > 0 { + sort.Sort(byChildIndex(flattened)) + + if logDwarf { + ctxt.Logf("putAbstractScope(%v): vars:", s.Info) + for i, v := range flattened { + ctxt.Logf(" %d:%s", i, v.Name) + } + ctxt.Logf("\n") + } + + // This slice will hold the offset in bytes for each child + // variable DIE with respect to the start of the parent + // subprogram DIE. + for _, v := range flattened { + offsets = append(offsets, int32(ctxt.CurrentOffset(s.Absfn))) + putAbstractVar(ctxt, s.Absfn, v) + } + } + } + ctxt.RecordChildDieOffsets(s.Absfn, flattened, offsets) + + Uleb128put(ctxt, s.Absfn, 0) + return nil +} + +// Emit DWARF attributes and child DIEs for an inlined subroutine. The +// first attribute of an inlined subroutine DIE is a reference back to +// its corresponding 'abstract' DIE (containing location-independent +// attributes such as name, type, etc). Inlined subroutine DIEs can +// have other inlined subroutine DIEs as children. +func PutInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error { + ic := s.InlCalls.Calls[callIdx] + callee := ic.AbsFunSym + + abbrev := DW_ABRV_INLINED_SUBROUTINE_RANGES + if len(ic.Ranges) == 1 { + abbrev = DW_ABRV_INLINED_SUBROUTINE + } + Uleb128put(ctxt, s.Info, int64(abbrev)) + + if logDwarf { + ctxt.Logf("PutInlinedFunc(caller=%v,callee=%v,abbrev=%d)\n", callersym, callee, abbrev) + } + + // Abstract origin. + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, callee) + + if abbrev == DW_ABRV_INLINED_SUBROUTINE_RANGES { + putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, s.Ranges.Length(ctxt), s.Ranges) + s.PutRanges(ctxt, ic.Ranges) + } else { + st := ic.Ranges[0].Start + en := ic.Ranges[0].End + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, st, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, en, s.StartPC) + } + + // Emit call file, line attrs. + ctxt.AddFileRef(s.Info, ic.CallFile) + form := int(expandPseudoForm(DW_FORM_udata_pseudo)) + putattr(ctxt, s.Info, abbrev, form, DW_CLS_CONSTANT, int64(ic.CallLine), nil) + + // Variables associated with this inlined routine instance. + vars := ic.InlVars + sort.Sort(byChildIndex(vars)) + inlIndex := ic.InlIndex + var encbuf [20]byte + for _, v := range vars { + if !v.IsInAbstract { + continue + } + putvar(ctxt, s, v, callee, abbrev, inlIndex, encbuf[:0]) + } + + // Children of this inline. + for _, sib := range inlChildren(callIdx, &s.InlCalls) { + absfn := s.InlCalls.Calls[sib].AbsFunSym + err := PutInlinedFunc(ctxt, s, absfn, sib) + if err != nil { + return err + } + } + + Uleb128put(ctxt, s.Info, 0) + return nil +} + +// Emit DWARF attributes and child DIEs for a 'concrete' subprogram, +// meaning the out-of-line copy of a function that was inlined at some +// point during the compilation of its containing package. The first +// attribute for a concrete DIE is a reference to the 'abstract' DIE +// for the function (which holds location-independent attributes such +// as name, type), then the remainder of the attributes are specific +// to this instance (location, frame base, etc). +func PutConcreteFunc(ctxt Context, s *FnState) error { + if logDwarf { + ctxt.Logf("PutConcreteFunc(%v)\n", s.Info) + } + abbrev := DW_ABRV_FUNCTION_CONCRETE + Uleb128put(ctxt, s.Info, int64(abbrev)) + + // Abstract origin. + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, s.Absfn) + + // Start/end PC. + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, 0, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, s.Size, s.StartPC) + + // cfa / frame base + putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, 1, []byte{DW_OP_call_frame_cfa}) + + // Scopes + if err := putPrunedScopes(ctxt, s, abbrev); err != nil { + return err + } + + // Inlined subroutines. + for _, sib := range inlChildren(-1, &s.InlCalls) { + absfn := s.InlCalls.Calls[sib].AbsFunSym + err := PutInlinedFunc(ctxt, s, absfn, sib) + if err != nil { + return err + } + } + + Uleb128put(ctxt, s.Info, 0) + return nil +} + +// Emit DWARF attributes and child DIEs for a subprogram. Here +// 'default' implies that the function in question was not inlined +// when its containing package was compiled (hence there is no need to +// emit an abstract version for it to use as a base for inlined +// routine records). +func PutDefaultFunc(ctxt Context, s *FnState) error { + if logDwarf { + ctxt.Logf("PutDefaultFunc(%v)\n", s.Info) + } + abbrev := DW_ABRV_FUNCTION + Uleb128put(ctxt, s.Info, int64(abbrev)) + + // Expand '"".' to import path. + name := s.Name + if s.Importpath != "" { + name = strings.Replace(name, "\"\".", objabi.PathToPrefix(s.Importpath)+".", -1) + } + + putattr(ctxt, s.Info, DW_ABRV_FUNCTION, DW_FORM_string, DW_CLS_STRING, int64(len(name)), name) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, 0, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, s.Size, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, 1, []byte{DW_OP_call_frame_cfa}) + ctxt.AddFileRef(s.Info, s.Filesym) + + var ev int64 + if s.External { + ev = 1 + } + putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, ev, 0) + + // Scopes + if err := putPrunedScopes(ctxt, s, abbrev); err != nil { + return err + } + + // Inlined subroutines. + for _, sib := range inlChildren(-1, &s.InlCalls) { + absfn := s.InlCalls.Calls[sib].AbsFunSym + err := PutInlinedFunc(ctxt, s, absfn, sib) + if err != nil { + return err + } + } + + Uleb128put(ctxt, s.Info, 0) + return nil +} + +func putscope(ctxt Context, s *FnState, scopes []Scope, curscope int32, fnabbrev int, encbuf []byte) int32 { + + if logDwarf { + ctxt.Logf("putscope(%v,%d): vars:", s.Info, curscope) + for i, v := range scopes[curscope].Vars { + ctxt.Logf(" %d:%d:%s", i, v.ChildIndex, v.Name) + } + ctxt.Logf("\n") + } + + for _, v := range scopes[curscope].Vars { + putvar(ctxt, s, v, s.Absfn, fnabbrev, -1, encbuf) + } + this := curscope + curscope++ + for curscope < int32(len(scopes)) { + scope := scopes[curscope] + if scope.Parent != this { + return curscope + } + + if len(scopes[curscope].Vars) == 0 { + curscope = putscope(ctxt, s, scopes, curscope, fnabbrev, encbuf) + continue + } + + if len(scope.Ranges) == 1 { + Uleb128put(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_SIMPLE) + putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_SIMPLE, DW_FORM_addr, DW_CLS_ADDRESS, scope.Ranges[0].Start, s.StartPC) + putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_SIMPLE, DW_FORM_addr, DW_CLS_ADDRESS, scope.Ranges[0].End, s.StartPC) + } else { + Uleb128put(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_RANGES) + putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_RANGES, DW_FORM_sec_offset, DW_CLS_PTR, s.Ranges.Length(ctxt), s.Ranges) + + s.PutRanges(ctxt, scope.Ranges) + } + + curscope = putscope(ctxt, s, scopes, curscope, fnabbrev, encbuf) + + Uleb128put(ctxt, s.Info, 0) + } + return curscope +} + +// Given a default var abbrev code, select corresponding concrete code. +func concreteVarAbbrev(varAbbrev int) int { + switch varAbbrev { + case DW_ABRV_AUTO: + return DW_ABRV_AUTO_CONCRETE + case DW_ABRV_PARAM: + return DW_ABRV_PARAM_CONCRETE + case DW_ABRV_AUTO_LOCLIST: + return DW_ABRV_AUTO_CONCRETE_LOCLIST + case DW_ABRV_PARAM_LOCLIST: + return DW_ABRV_PARAM_CONCRETE_LOCLIST + default: + panic("should never happen") + } +} + +// Pick the correct abbrev code for variable or parameter DIE. +func determineVarAbbrev(v *Var, fnabbrev int) (int, bool, bool) { + abbrev := v.Abbrev + + // If the variable was entirely optimized out, don't emit a location list; + // convert to an inline abbreviation and emit an empty location. + missing := false + switch { + case abbrev == DW_ABRV_AUTO_LOCLIST && v.PutLocationList == nil: + missing = true + abbrev = DW_ABRV_AUTO + case abbrev == DW_ABRV_PARAM_LOCLIST && v.PutLocationList == nil: + missing = true + abbrev = DW_ABRV_PARAM + } + + // Determine whether to use a concrete variable or regular variable DIE. + concrete := true + switch fnabbrev { + case DW_ABRV_FUNCTION: + concrete = false + break + case DW_ABRV_FUNCTION_CONCRETE: + // If we're emitting a concrete subprogram DIE and the variable + // in question is not part of the corresponding abstract function DIE, + // then use the default (non-concrete) abbrev for this param. + if !v.IsInAbstract { + concrete = false + } + case DW_ABRV_INLINED_SUBROUTINE, DW_ABRV_INLINED_SUBROUTINE_RANGES: + default: + panic("should never happen") + } + + // Select proper abbrev based on concrete/non-concrete + if concrete { + abbrev = concreteVarAbbrev(abbrev) + } + + return abbrev, missing, concrete +} + +func abbrevUsesLoclist(abbrev int) bool { + switch abbrev { + case DW_ABRV_AUTO_LOCLIST, DW_ABRV_AUTO_CONCRETE_LOCLIST, + DW_ABRV_PARAM_LOCLIST, DW_ABRV_PARAM_CONCRETE_LOCLIST: + return true + default: + return false + } +} + +// Emit DWARF attributes for a variable belonging to an 'abstract' subprogram. +func putAbstractVar(ctxt Context, info Sym, v *Var) { + // Remap abbrev + abbrev := v.Abbrev + switch abbrev { + case DW_ABRV_AUTO, DW_ABRV_AUTO_LOCLIST: + abbrev = DW_ABRV_AUTO_ABSTRACT + case DW_ABRV_PARAM, DW_ABRV_PARAM_LOCLIST: + abbrev = DW_ABRV_PARAM_ABSTRACT + } + + Uleb128put(ctxt, info, int64(abbrev)) + putattr(ctxt, info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(v.Name)), v.Name) + + // Isreturn attribute if this is a param + if abbrev == DW_ABRV_PARAM_ABSTRACT { + var isReturn int64 + if v.IsReturnValue { + isReturn = 1 + } + putattr(ctxt, info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) + } + + // Line + if abbrev != DW_ABRV_PARAM_ABSTRACT { + // See issue 23374 for more on why decl line is skipped for abs params. + putattr(ctxt, info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) + } + + // Type + putattr(ctxt, info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) + + // Var has no children => no terminator +} + +func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int, encbuf []byte) { + // Remap abbrev according to parent DIE abbrev + abbrev, missing, concrete := determineVarAbbrev(v, fnabbrev) + + Uleb128put(ctxt, s.Info, int64(abbrev)) + + // Abstract origin for concrete / inlined case + if concrete { + // Here we are making a reference to a child DIE of an abstract + // function subprogram DIE. The child DIE has no LSym, so instead + // after the call to 'putattr' below we make a call to register + // the child DIE reference. + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, absfn) + ctxt.RecordDclReference(s.Info, absfn, int(v.ChildIndex), inlIndex) + } else { + // Var name, line for abstract and default cases + n := v.Name + putattr(ctxt, s.Info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(n)), n) + if abbrev == DW_ABRV_PARAM || abbrev == DW_ABRV_PARAM_LOCLIST || abbrev == DW_ABRV_PARAM_ABSTRACT { + var isReturn int64 + if v.IsReturnValue { + isReturn = 1 + } + putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) + } + putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) + } + + if abbrevUsesLoclist(abbrev) { + putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, s.Loc.Length(ctxt), s.Loc) + v.PutLocationList(s.Loc, s.StartPC) + } else { + loc := encbuf[:0] + switch { + case missing: + break // no location + case v.StackOffset == 0: + loc = append(loc, DW_OP_call_frame_cfa) + default: + loc = append(loc, DW_OP_fbreg) + loc = AppendSleb128(loc, int64(v.StackOffset)) + } + putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, int64(len(loc)), loc) + } + + // Var has no children => no terminator +} + +// VarsByOffset attaches the methods of sort.Interface to []*Var, +// sorting in increasing StackOffset. +type VarsByOffset []*Var + +func (s VarsByOffset) Len() int { return len(s) } +func (s VarsByOffset) Less(i, j int) bool { return s[i].StackOffset < s[j].StackOffset } +func (s VarsByOffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// byChildIndex implements sort.Interface for []*dwarf.Var by child index. +type byChildIndex []*Var + +func (s byChildIndex) Len() int { return len(s) } +func (s byChildIndex) Less(i, j int) bool { return s[i].ChildIndex < s[j].ChildIndex } +func (s byChildIndex) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// IsDWARFEnabledOnAIX returns true if DWARF is possible on the +// current extld. +// AIX ld doesn't support DWARF with -bnoobjreorder with version +// prior to 7.2.2. +func IsDWARFEnabledOnAIXLd(extld string) (bool, error) { + out, err := exec.Command(extld, "-Wl,-V").CombinedOutput() + if err != nil { + // The normal output should display ld version and + // then fails because ".main" is not defined: + // ld: 0711-317 ERROR: Undefined symbol: .main + if !bytes.Contains(out, []byte("0711-317")) { + return false, fmt.Errorf("%s -Wl,-V failed: %v\n%s", extld, err, out) + } + } + // gcc -Wl,-V output should be: + // /usr/bin/ld: LD X.X.X(date) + // ... + out = bytes.TrimPrefix(out, []byte("/usr/bin/ld: LD ")) + vers := string(bytes.Split(out, []byte("("))[0]) + subvers := strings.Split(vers, ".") + if len(subvers) != 3 { + return false, fmt.Errorf("cannot parse %s -Wl,-V (%s): %v\n", extld, out, err) + } + if v, err := strconv.Atoi(subvers[0]); err != nil || v < 7 { + return false, nil + } else if v > 7 { + return true, nil + } + if v, err := strconv.Atoi(subvers[1]); err != nil || v < 2 { + return false, nil + } else if v > 2 { + return true, nil + } + if v, err := strconv.Atoi(subvers[2]); err != nil || v < 2 { + return false, nil + } + return true, nil +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf_defs.go b/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf_defs.go new file mode 100644 index 0000000..e2716e5 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/dwarf/dwarf_defs.go @@ -0,0 +1,493 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarf + +// Cut, pasted, tr-and-awk'ed from tables in +// http://dwarfstd.org/doc/Dwarf3.pdf + +// Table 18 +const ( + DW_TAG_array_type = 0x01 + DW_TAG_class_type = 0x02 + DW_TAG_entry_point = 0x03 + DW_TAG_enumeration_type = 0x04 + DW_TAG_formal_parameter = 0x05 + DW_TAG_imported_declaration = 0x08 + DW_TAG_label = 0x0a + DW_TAG_lexical_block = 0x0b + DW_TAG_member = 0x0d + DW_TAG_pointer_type = 0x0f + DW_TAG_reference_type = 0x10 + DW_TAG_compile_unit = 0x11 + DW_TAG_string_type = 0x12 + DW_TAG_structure_type = 0x13 + DW_TAG_subroutine_type = 0x15 + DW_TAG_typedef = 0x16 + DW_TAG_union_type = 0x17 + DW_TAG_unspecified_parameters = 0x18 + DW_TAG_variant = 0x19 + DW_TAG_common_block = 0x1a + DW_TAG_common_inclusion = 0x1b + DW_TAG_inheritance = 0x1c + DW_TAG_inlined_subroutine = 0x1d + DW_TAG_module = 0x1e + DW_TAG_ptr_to_member_type = 0x1f + DW_TAG_set_type = 0x20 + DW_TAG_subrange_type = 0x21 + DW_TAG_with_stmt = 0x22 + DW_TAG_access_declaration = 0x23 + DW_TAG_base_type = 0x24 + DW_TAG_catch_block = 0x25 + DW_TAG_const_type = 0x26 + DW_TAG_constant = 0x27 + DW_TAG_enumerator = 0x28 + DW_TAG_file_type = 0x29 + DW_TAG_friend = 0x2a + DW_TAG_namelist = 0x2b + DW_TAG_namelist_item = 0x2c + DW_TAG_packed_type = 0x2d + DW_TAG_subprogram = 0x2e + DW_TAG_template_type_parameter = 0x2f + DW_TAG_template_value_parameter = 0x30 + DW_TAG_thrown_type = 0x31 + DW_TAG_try_block = 0x32 + DW_TAG_variant_part = 0x33 + DW_TAG_variable = 0x34 + DW_TAG_volatile_type = 0x35 + // Dwarf3 + DW_TAG_dwarf_procedure = 0x36 + DW_TAG_restrict_type = 0x37 + DW_TAG_interface_type = 0x38 + DW_TAG_namespace = 0x39 + DW_TAG_imported_module = 0x3a + DW_TAG_unspecified_type = 0x3b + DW_TAG_partial_unit = 0x3c + DW_TAG_imported_unit = 0x3d + DW_TAG_condition = 0x3f + DW_TAG_shared_type = 0x40 + // Dwarf4 + DW_TAG_type_unit = 0x41 + DW_TAG_rvalue_reference_type = 0x42 + DW_TAG_template_alias = 0x43 + + // User defined + DW_TAG_lo_user = 0x4080 + DW_TAG_hi_user = 0xffff +) + +// Table 19 +const ( + DW_CHILDREN_no = 0x00 + DW_CHILDREN_yes = 0x01 +) + +// Not from the spec, but logically belongs here +const ( + DW_CLS_ADDRESS = 0x01 + iota + DW_CLS_BLOCK + DW_CLS_CONSTANT + DW_CLS_FLAG + DW_CLS_PTR // lineptr, loclistptr, macptr, rangelistptr + DW_CLS_REFERENCE + DW_CLS_ADDRLOC + DW_CLS_STRING + + // Go-specific internal hackery. + DW_CLS_GO_TYPEREF +) + +// Table 20 +const ( + DW_AT_sibling = 0x01 // reference + DW_AT_location = 0x02 // block, loclistptr + DW_AT_name = 0x03 // string + DW_AT_ordering = 0x09 // constant + DW_AT_byte_size = 0x0b // block, constant, reference + DW_AT_bit_offset = 0x0c // block, constant, reference + DW_AT_bit_size = 0x0d // block, constant, reference + DW_AT_stmt_list = 0x10 // lineptr + DW_AT_low_pc = 0x11 // address + DW_AT_high_pc = 0x12 // address + DW_AT_language = 0x13 // constant + DW_AT_discr = 0x15 // reference + DW_AT_discr_value = 0x16 // constant + DW_AT_visibility = 0x17 // constant + DW_AT_import = 0x18 // reference + DW_AT_string_length = 0x19 // block, loclistptr + DW_AT_common_reference = 0x1a // reference + DW_AT_comp_dir = 0x1b // string + DW_AT_const_value = 0x1c // block, constant, string + DW_AT_containing_type = 0x1d // reference + DW_AT_default_value = 0x1e // reference + DW_AT_inline = 0x20 // constant + DW_AT_is_optional = 0x21 // flag + DW_AT_lower_bound = 0x22 // block, constant, reference + DW_AT_producer = 0x25 // string + DW_AT_prototyped = 0x27 // flag + DW_AT_return_addr = 0x2a // block, loclistptr + DW_AT_start_scope = 0x2c // constant + DW_AT_bit_stride = 0x2e // constant + DW_AT_upper_bound = 0x2f // block, constant, reference + DW_AT_abstract_origin = 0x31 // reference + DW_AT_accessibility = 0x32 // constant + DW_AT_address_class = 0x33 // constant + DW_AT_artificial = 0x34 // flag + DW_AT_base_types = 0x35 // reference + DW_AT_calling_convention = 0x36 // constant + DW_AT_count = 0x37 // block, constant, reference + DW_AT_data_member_location = 0x38 // block, constant, loclistptr + DW_AT_decl_column = 0x39 // constant + DW_AT_decl_file = 0x3a // constant + DW_AT_decl_line = 0x3b // constant + DW_AT_declaration = 0x3c // flag + DW_AT_discr_list = 0x3d // block + DW_AT_encoding = 0x3e // constant + DW_AT_external = 0x3f // flag + DW_AT_frame_base = 0x40 // block, loclistptr + DW_AT_friend = 0x41 // reference + DW_AT_identifier_case = 0x42 // constant + DW_AT_macro_info = 0x43 // macptr + DW_AT_namelist_item = 0x44 // block + DW_AT_priority = 0x45 // reference + DW_AT_segment = 0x46 // block, loclistptr + DW_AT_specification = 0x47 // reference + DW_AT_static_link = 0x48 // block, loclistptr + DW_AT_type = 0x49 // reference + DW_AT_use_location = 0x4a // block, loclistptr + DW_AT_variable_parameter = 0x4b // flag + DW_AT_virtuality = 0x4c // constant + DW_AT_vtable_elem_location = 0x4d // block, loclistptr + // Dwarf3 + DW_AT_allocated = 0x4e // block, constant, reference + DW_AT_associated = 0x4f // block, constant, reference + DW_AT_data_location = 0x50 // block + DW_AT_byte_stride = 0x51 // block, constant, reference + DW_AT_entry_pc = 0x52 // address + DW_AT_use_UTF8 = 0x53 // flag + DW_AT_extension = 0x54 // reference + DW_AT_ranges = 0x55 // rangelistptr + DW_AT_trampoline = 0x56 // address, flag, reference, string + DW_AT_call_column = 0x57 // constant + DW_AT_call_file = 0x58 // constant + DW_AT_call_line = 0x59 // constant + DW_AT_description = 0x5a // string + DW_AT_binary_scale = 0x5b // constant + DW_AT_decimal_scale = 0x5c // constant + DW_AT_small = 0x5d // reference + DW_AT_decimal_sign = 0x5e // constant + DW_AT_digit_count = 0x5f // constant + DW_AT_picture_string = 0x60 // string + DW_AT_mutable = 0x61 // flag + DW_AT_threads_scaled = 0x62 // flag + DW_AT_explicit = 0x63 // flag + DW_AT_object_pointer = 0x64 // reference + DW_AT_endianity = 0x65 // constant + DW_AT_elemental = 0x66 // flag + DW_AT_pure = 0x67 // flag + DW_AT_recursive = 0x68 // flag + + DW_AT_lo_user = 0x2000 // --- + DW_AT_hi_user = 0x3fff // --- +) + +// Table 21 +const ( + DW_FORM_addr = 0x01 // address + DW_FORM_block2 = 0x03 // block + DW_FORM_block4 = 0x04 // block + DW_FORM_data2 = 0x05 // constant + DW_FORM_data4 = 0x06 // constant, lineptr, loclistptr, macptr, rangelistptr + DW_FORM_data8 = 0x07 // constant, lineptr, loclistptr, macptr, rangelistptr + DW_FORM_string = 0x08 // string + DW_FORM_block = 0x09 // block + DW_FORM_block1 = 0x0a // block + DW_FORM_data1 = 0x0b // constant + DW_FORM_flag = 0x0c // flag + DW_FORM_sdata = 0x0d // constant + DW_FORM_strp = 0x0e // string + DW_FORM_udata = 0x0f // constant + DW_FORM_ref_addr = 0x10 // reference + DW_FORM_ref1 = 0x11 // reference + DW_FORM_ref2 = 0x12 // reference + DW_FORM_ref4 = 0x13 // reference + DW_FORM_ref8 = 0x14 // reference + DW_FORM_ref_udata = 0x15 // reference + DW_FORM_indirect = 0x16 // (see Section 7.5.3) + // Dwarf4 + DW_FORM_sec_offset = 0x17 // lineptr, loclistptr, macptr, rangelistptr + DW_FORM_exprloc = 0x18 // exprloc + DW_FORM_flag_present = 0x19 // flag + DW_FORM_ref_sig8 = 0x20 // reference + // Pseudo-form: expanded to data4 on IOS, udata elsewhere. + DW_FORM_udata_pseudo = 0x99 +) + +// Table 24 (#operands, notes) +const ( + DW_OP_addr = 0x03 // 1 constant address (size target specific) + DW_OP_deref = 0x06 // 0 + DW_OP_const1u = 0x08 // 1 1-byte constant + DW_OP_const1s = 0x09 // 1 1-byte constant + DW_OP_const2u = 0x0a // 1 2-byte constant + DW_OP_const2s = 0x0b // 1 2-byte constant + DW_OP_const4u = 0x0c // 1 4-byte constant + DW_OP_const4s = 0x0d // 1 4-byte constant + DW_OP_const8u = 0x0e // 1 8-byte constant + DW_OP_const8s = 0x0f // 1 8-byte constant + DW_OP_constu = 0x10 // 1 ULEB128 constant + DW_OP_consts = 0x11 // 1 SLEB128 constant + DW_OP_dup = 0x12 // 0 + DW_OP_drop = 0x13 // 0 + DW_OP_over = 0x14 // 0 + DW_OP_pick = 0x15 // 1 1-byte stack index + DW_OP_swap = 0x16 // 0 + DW_OP_rot = 0x17 // 0 + DW_OP_xderef = 0x18 // 0 + DW_OP_abs = 0x19 // 0 + DW_OP_and = 0x1a // 0 + DW_OP_div = 0x1b // 0 + DW_OP_minus = 0x1c // 0 + DW_OP_mod = 0x1d // 0 + DW_OP_mul = 0x1e // 0 + DW_OP_neg = 0x1f // 0 + DW_OP_not = 0x20 // 0 + DW_OP_or = 0x21 // 0 + DW_OP_plus = 0x22 // 0 + DW_OP_plus_uconst = 0x23 // 1 ULEB128 addend + DW_OP_shl = 0x24 // 0 + DW_OP_shr = 0x25 // 0 + DW_OP_shra = 0x26 // 0 + DW_OP_xor = 0x27 // 0 + DW_OP_skip = 0x2f // 1 signed 2-byte constant + DW_OP_bra = 0x28 // 1 signed 2-byte constant + DW_OP_eq = 0x29 // 0 + DW_OP_ge = 0x2a // 0 + DW_OP_gt = 0x2b // 0 + DW_OP_le = 0x2c // 0 + DW_OP_lt = 0x2d // 0 + DW_OP_ne = 0x2e // 0 + DW_OP_lit0 = 0x30 // 0 ... + DW_OP_lit31 = 0x4f // 0 literals 0..31 = (DW_OP_lit0 + literal) + DW_OP_reg0 = 0x50 // 0 .. + DW_OP_reg31 = 0x6f // 0 reg 0..31 = (DW_OP_reg0 + regnum) + DW_OP_breg0 = 0x70 // 1 ... + DW_OP_breg31 = 0x8f // 1 SLEB128 offset base register 0..31 = (DW_OP_breg0 + regnum) + DW_OP_regx = 0x90 // 1 ULEB128 register + DW_OP_fbreg = 0x91 // 1 SLEB128 offset + DW_OP_bregx = 0x92 // 2 ULEB128 register followed by SLEB128 offset + DW_OP_piece = 0x93 // 1 ULEB128 size of piece addressed + DW_OP_deref_size = 0x94 // 1 1-byte size of data retrieved + DW_OP_xderef_size = 0x95 // 1 1-byte size of data retrieved + DW_OP_nop = 0x96 // 0 + DW_OP_push_object_address = 0x97 // 0 + DW_OP_call2 = 0x98 // 1 2-byte offset of DIE + DW_OP_call4 = 0x99 // 1 4-byte offset of DIE + DW_OP_call_ref = 0x9a // 1 4- or 8-byte offset of DIE + DW_OP_form_tls_address = 0x9b // 0 + DW_OP_call_frame_cfa = 0x9c // 0 + DW_OP_bit_piece = 0x9d // 2 + DW_OP_lo_user = 0xe0 + DW_OP_hi_user = 0xff +) + +// Table 25 +const ( + DW_ATE_address = 0x01 + DW_ATE_boolean = 0x02 + DW_ATE_complex_float = 0x03 + DW_ATE_float = 0x04 + DW_ATE_signed = 0x05 + DW_ATE_signed_char = 0x06 + DW_ATE_unsigned = 0x07 + DW_ATE_unsigned_char = 0x08 + DW_ATE_imaginary_float = 0x09 + DW_ATE_packed_decimal = 0x0a + DW_ATE_numeric_string = 0x0b + DW_ATE_edited = 0x0c + DW_ATE_signed_fixed = 0x0d + DW_ATE_unsigned_fixed = 0x0e + DW_ATE_decimal_float = 0x0f + DW_ATE_lo_user = 0x80 + DW_ATE_hi_user = 0xff +) + +// Table 26 +const ( + DW_DS_unsigned = 0x01 + DW_DS_leading_overpunch = 0x02 + DW_DS_trailing_overpunch = 0x03 + DW_DS_leading_separate = 0x04 + DW_DS_trailing_separate = 0x05 +) + +// Table 27 +const ( + DW_END_default = 0x00 + DW_END_big = 0x01 + DW_END_little = 0x02 + DW_END_lo_user = 0x40 + DW_END_hi_user = 0xff +) + +// Table 28 +const ( + DW_ACCESS_public = 0x01 + DW_ACCESS_protected = 0x02 + DW_ACCESS_private = 0x03 +) + +// Table 29 +const ( + DW_VIS_local = 0x01 + DW_VIS_exported = 0x02 + DW_VIS_qualified = 0x03 +) + +// Table 30 +const ( + DW_VIRTUALITY_none = 0x00 + DW_VIRTUALITY_virtual = 0x01 + DW_VIRTUALITY_pure_virtual = 0x02 +) + +// Table 31 +const ( + DW_LANG_C89 = 0x0001 + DW_LANG_C = 0x0002 + DW_LANG_Ada83 = 0x0003 + DW_LANG_C_plus_plus = 0x0004 + DW_LANG_Cobol74 = 0x0005 + DW_LANG_Cobol85 = 0x0006 + DW_LANG_Fortran77 = 0x0007 + DW_LANG_Fortran90 = 0x0008 + DW_LANG_Pascal83 = 0x0009 + DW_LANG_Modula2 = 0x000a + // Dwarf3 + DW_LANG_Java = 0x000b + DW_LANG_C99 = 0x000c + DW_LANG_Ada95 = 0x000d + DW_LANG_Fortran95 = 0x000e + DW_LANG_PLI = 0x000f + DW_LANG_ObjC = 0x0010 + DW_LANG_ObjC_plus_plus = 0x0011 + DW_LANG_UPC = 0x0012 + DW_LANG_D = 0x0013 + // Dwarf4 + DW_LANG_Python = 0x0014 + // Dwarf5 + DW_LANG_Go = 0x0016 + + DW_LANG_lo_user = 0x8000 + DW_LANG_hi_user = 0xffff +) + +// Table 32 +const ( + DW_ID_case_sensitive = 0x00 + DW_ID_up_case = 0x01 + DW_ID_down_case = 0x02 + DW_ID_case_insensitive = 0x03 +) + +// Table 33 +const ( + DW_CC_normal = 0x01 + DW_CC_program = 0x02 + DW_CC_nocall = 0x03 + DW_CC_lo_user = 0x40 + DW_CC_hi_user = 0xff +) + +// Table 34 +const ( + DW_INL_not_inlined = 0x00 + DW_INL_inlined = 0x01 + DW_INL_declared_not_inlined = 0x02 + DW_INL_declared_inlined = 0x03 +) + +// Table 35 +const ( + DW_ORD_row_major = 0x00 + DW_ORD_col_major = 0x01 +) + +// Table 36 +const ( + DW_DSC_label = 0x00 + DW_DSC_range = 0x01 +) + +// Table 37 +const ( + DW_LNS_copy = 0x01 + DW_LNS_advance_pc = 0x02 + DW_LNS_advance_line = 0x03 + DW_LNS_set_file = 0x04 + DW_LNS_set_column = 0x05 + DW_LNS_negate_stmt = 0x06 + DW_LNS_set_basic_block = 0x07 + DW_LNS_const_add_pc = 0x08 + DW_LNS_fixed_advance_pc = 0x09 + // Dwarf3 + DW_LNS_set_prologue_end = 0x0a + DW_LNS_set_epilogue_begin = 0x0b + DW_LNS_set_isa = 0x0c +) + +// Table 38 +const ( + DW_LNE_end_sequence = 0x01 + DW_LNE_set_address = 0x02 + DW_LNE_define_file = 0x03 + DW_LNE_lo_user = 0x80 + DW_LNE_hi_user = 0xff +) + +// Table 39 +const ( + DW_MACINFO_define = 0x01 + DW_MACINFO_undef = 0x02 + DW_MACINFO_start_file = 0x03 + DW_MACINFO_end_file = 0x04 + DW_MACINFO_vendor_ext = 0xff +) + +// Table 40. +const ( + // operand,... + DW_CFA_nop = 0x00 + DW_CFA_set_loc = 0x01 // address + DW_CFA_advance_loc1 = 0x02 // 1-byte delta + DW_CFA_advance_loc2 = 0x03 // 2-byte delta + DW_CFA_advance_loc4 = 0x04 // 4-byte delta + DW_CFA_offset_extended = 0x05 // ULEB128 register, ULEB128 offset + DW_CFA_restore_extended = 0x06 // ULEB128 register + DW_CFA_undefined = 0x07 // ULEB128 register + DW_CFA_same_value = 0x08 // ULEB128 register + DW_CFA_register = 0x09 // ULEB128 register, ULEB128 register + DW_CFA_remember_state = 0x0a + DW_CFA_restore_state = 0x0b + + DW_CFA_def_cfa = 0x0c // ULEB128 register, ULEB128 offset + DW_CFA_def_cfa_register = 0x0d // ULEB128 register + DW_CFA_def_cfa_offset = 0x0e // ULEB128 offset + DW_CFA_def_cfa_expression = 0x0f // BLOCK + DW_CFA_expression = 0x10 // ULEB128 register, BLOCK + DW_CFA_offset_extended_sf = 0x11 // ULEB128 register, SLEB128 offset + DW_CFA_def_cfa_sf = 0x12 // ULEB128 register, SLEB128 offset + DW_CFA_def_cfa_offset_sf = 0x13 // SLEB128 offset + DW_CFA_val_offset = 0x14 // ULEB128, ULEB128 + DW_CFA_val_offset_sf = 0x15 // ULEB128, SLEB128 + DW_CFA_val_expression = 0x16 // ULEB128, BLOCK + + DW_CFA_lo_user = 0x1c + DW_CFA_hi_user = 0x3f + + // Opcodes that take an addend operand. + DW_CFA_advance_loc = 0x1 << 6 // +delta + DW_CFA_offset = 0x2 << 6 // +register (ULEB128 offset) + DW_CFA_restore = 0x3 << 6 // +register +) diff --git a/vendor/github.com/twitchyliquid64/golang-asm/goobj/builtin.go b/vendor/github.com/twitchyliquid64/golang-asm/goobj/builtin.go new file mode 100644 index 0000000..e7d612a --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/goobj/builtin.go @@ -0,0 +1,45 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goobj + +// Builtin (compiler-generated) function references appear +// frequently. We assign special indices for them, so they +// don't need to be referenced by name. + +// NBuiltin returns the number of listed builtin +// symbols. +func NBuiltin() int { + return len(builtins) +} + +// BuiltinName returns the name and ABI of the i-th +// builtin symbol. +func BuiltinName(i int) (string, int) { + return builtins[i].name, builtins[i].abi +} + +// BuiltinIdx returns the index of the builtin with the +// given name and abi, or -1 if it is not a builtin. +func BuiltinIdx(name string, abi int) int { + i, ok := builtinMap[name] + if !ok { + return -1 + } + if builtins[i].abi != abi { + return -1 + } + return i +} + +//go:generate go run mkbuiltin.go + +var builtinMap map[string]int + +func init() { + builtinMap = make(map[string]int, len(builtins)) + for i, b := range builtins { + builtinMap[b.name] = i + } +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/goobj/builtinlist.go b/vendor/github.com/twitchyliquid64/golang-asm/goobj/builtinlist.go new file mode 100644 index 0000000..0cca752 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/goobj/builtinlist.go @@ -0,0 +1,245 @@ +// Code generated by mkbuiltin.go. DO NOT EDIT. + +package goobj + +var builtins = [...]struct { + name string + abi int +}{ + {"runtime.newobject", 1}, + {"runtime.mallocgc", 1}, + {"runtime.panicdivide", 1}, + {"runtime.panicshift", 1}, + {"runtime.panicmakeslicelen", 1}, + {"runtime.panicmakeslicecap", 1}, + {"runtime.throwinit", 1}, + {"runtime.panicwrap", 1}, + {"runtime.gopanic", 1}, + {"runtime.gorecover", 1}, + {"runtime.goschedguarded", 1}, + {"runtime.goPanicIndex", 1}, + {"runtime.goPanicIndexU", 1}, + {"runtime.goPanicSliceAlen", 1}, + {"runtime.goPanicSliceAlenU", 1}, + {"runtime.goPanicSliceAcap", 1}, + {"runtime.goPanicSliceAcapU", 1}, + {"runtime.goPanicSliceB", 1}, + {"runtime.goPanicSliceBU", 1}, + {"runtime.goPanicSlice3Alen", 1}, + {"runtime.goPanicSlice3AlenU", 1}, + {"runtime.goPanicSlice3Acap", 1}, + {"runtime.goPanicSlice3AcapU", 1}, + {"runtime.goPanicSlice3B", 1}, + {"runtime.goPanicSlice3BU", 1}, + {"runtime.goPanicSlice3C", 1}, + {"runtime.goPanicSlice3CU", 1}, + {"runtime.printbool", 1}, + {"runtime.printfloat", 1}, + {"runtime.printint", 1}, + {"runtime.printhex", 1}, + {"runtime.printuint", 1}, + {"runtime.printcomplex", 1}, + {"runtime.printstring", 1}, + {"runtime.printpointer", 1}, + {"runtime.printiface", 1}, + {"runtime.printeface", 1}, + {"runtime.printslice", 1}, + {"runtime.printnl", 1}, + {"runtime.printsp", 1}, + {"runtime.printlock", 1}, + {"runtime.printunlock", 1}, + {"runtime.concatstring2", 1}, + {"runtime.concatstring3", 1}, + {"runtime.concatstring4", 1}, + {"runtime.concatstring5", 1}, + {"runtime.concatstrings", 1}, + {"runtime.cmpstring", 1}, + {"runtime.intstring", 1}, + {"runtime.slicebytetostring", 1}, + {"runtime.slicebytetostringtmp", 1}, + {"runtime.slicerunetostring", 1}, + {"runtime.stringtoslicebyte", 1}, + {"runtime.stringtoslicerune", 1}, + {"runtime.slicecopy", 1}, + {"runtime.slicestringcopy", 1}, + {"runtime.decoderune", 1}, + {"runtime.countrunes", 1}, + {"runtime.convI2I", 1}, + {"runtime.convT16", 1}, + {"runtime.convT32", 1}, + {"runtime.convT64", 1}, + {"runtime.convTstring", 1}, + {"runtime.convTslice", 1}, + {"runtime.convT2E", 1}, + {"runtime.convT2Enoptr", 1}, + {"runtime.convT2I", 1}, + {"runtime.convT2Inoptr", 1}, + {"runtime.assertE2I", 1}, + {"runtime.assertE2I2", 1}, + {"runtime.assertI2I", 1}, + {"runtime.assertI2I2", 1}, + {"runtime.panicdottypeE", 1}, + {"runtime.panicdottypeI", 1}, + {"runtime.panicnildottype", 1}, + {"runtime.ifaceeq", 1}, + {"runtime.efaceeq", 1}, + {"runtime.fastrand", 1}, + {"runtime.makemap64", 1}, + {"runtime.makemap", 1}, + {"runtime.makemap_small", 1}, + {"runtime.mapaccess1", 1}, + {"runtime.mapaccess1_fast32", 1}, + {"runtime.mapaccess1_fast64", 1}, + {"runtime.mapaccess1_faststr", 1}, + {"runtime.mapaccess1_fat", 1}, + {"runtime.mapaccess2", 1}, + {"runtime.mapaccess2_fast32", 1}, + {"runtime.mapaccess2_fast64", 1}, + {"runtime.mapaccess2_faststr", 1}, + {"runtime.mapaccess2_fat", 1}, + {"runtime.mapassign", 1}, + {"runtime.mapassign_fast32", 1}, + {"runtime.mapassign_fast32ptr", 1}, + {"runtime.mapassign_fast64", 1}, + {"runtime.mapassign_fast64ptr", 1}, + {"runtime.mapassign_faststr", 1}, + {"runtime.mapiterinit", 1}, + {"runtime.mapdelete", 1}, + {"runtime.mapdelete_fast32", 1}, + {"runtime.mapdelete_fast64", 1}, + {"runtime.mapdelete_faststr", 1}, + {"runtime.mapiternext", 1}, + {"runtime.mapclear", 1}, + {"runtime.makechan64", 1}, + {"runtime.makechan", 1}, + {"runtime.chanrecv1", 1}, + {"runtime.chanrecv2", 1}, + {"runtime.chansend1", 1}, + {"runtime.closechan", 1}, + {"runtime.writeBarrier", 0}, + {"runtime.typedmemmove", 1}, + {"runtime.typedmemclr", 1}, + {"runtime.typedslicecopy", 1}, + {"runtime.selectnbsend", 1}, + {"runtime.selectnbrecv", 1}, + {"runtime.selectnbrecv2", 1}, + {"runtime.selectsetpc", 1}, + {"runtime.selectgo", 1}, + {"runtime.block", 1}, + {"runtime.makeslice", 1}, + {"runtime.makeslice64", 1}, + {"runtime.makeslicecopy", 1}, + {"runtime.growslice", 1}, + {"runtime.memmove", 1}, + {"runtime.memclrNoHeapPointers", 1}, + {"runtime.memclrHasPointers", 1}, + {"runtime.memequal", 1}, + {"runtime.memequal0", 1}, + {"runtime.memequal8", 1}, + {"runtime.memequal16", 1}, + {"runtime.memequal32", 1}, + {"runtime.memequal64", 1}, + {"runtime.memequal128", 1}, + {"runtime.f32equal", 1}, + {"runtime.f64equal", 1}, + {"runtime.c64equal", 1}, + {"runtime.c128equal", 1}, + {"runtime.strequal", 1}, + {"runtime.interequal", 1}, + {"runtime.nilinterequal", 1}, + {"runtime.memhash", 1}, + {"runtime.memhash0", 1}, + {"runtime.memhash8", 1}, + {"runtime.memhash16", 1}, + {"runtime.memhash32", 1}, + {"runtime.memhash64", 1}, + {"runtime.memhash128", 1}, + {"runtime.f32hash", 1}, + {"runtime.f64hash", 1}, + {"runtime.c64hash", 1}, + {"runtime.c128hash", 1}, + {"runtime.strhash", 1}, + {"runtime.interhash", 1}, + {"runtime.nilinterhash", 1}, + {"runtime.int64div", 1}, + {"runtime.uint64div", 1}, + {"runtime.int64mod", 1}, + {"runtime.uint64mod", 1}, + {"runtime.float64toint64", 1}, + {"runtime.float64touint64", 1}, + {"runtime.float64touint32", 1}, + {"runtime.int64tofloat64", 1}, + {"runtime.uint64tofloat64", 1}, + {"runtime.uint32tofloat64", 1}, + {"runtime.complex128div", 1}, + {"runtime.racefuncenter", 1}, + {"runtime.racefuncenterfp", 1}, + {"runtime.racefuncexit", 1}, + {"runtime.raceread", 1}, + {"runtime.racewrite", 1}, + {"runtime.racereadrange", 1}, + {"runtime.racewriterange", 1}, + {"runtime.msanread", 1}, + {"runtime.msanwrite", 1}, + {"runtime.checkptrAlignment", 1}, + {"runtime.checkptrArithmetic", 1}, + {"runtime.libfuzzerTraceCmp1", 1}, + {"runtime.libfuzzerTraceCmp2", 1}, + {"runtime.libfuzzerTraceCmp4", 1}, + {"runtime.libfuzzerTraceCmp8", 1}, + {"runtime.libfuzzerTraceConstCmp1", 1}, + {"runtime.libfuzzerTraceConstCmp2", 1}, + {"runtime.libfuzzerTraceConstCmp4", 1}, + {"runtime.libfuzzerTraceConstCmp8", 1}, + {"runtime.x86HasPOPCNT", 0}, + {"runtime.x86HasSSE41", 0}, + {"runtime.x86HasFMA", 0}, + {"runtime.armHasVFPv4", 0}, + {"runtime.arm64HasATOMICS", 0}, + {"runtime.deferproc", 1}, + {"runtime.deferprocStack", 1}, + {"runtime.deferreturn", 1}, + {"runtime.newproc", 1}, + {"runtime.panicoverflow", 1}, + {"runtime.sigpanic", 1}, + {"runtime.gcWriteBarrier", 0}, + {"runtime.morestack", 0}, + {"runtime.morestackc", 0}, + {"runtime.morestack_noctxt", 0}, + {"type.int8", 0}, + {"type.*int8", 0}, + {"type.uint8", 0}, + {"type.*uint8", 0}, + {"type.int16", 0}, + {"type.*int16", 0}, + {"type.uint16", 0}, + {"type.*uint16", 0}, + {"type.int32", 0}, + {"type.*int32", 0}, + {"type.uint32", 0}, + {"type.*uint32", 0}, + {"type.int64", 0}, + {"type.*int64", 0}, + {"type.uint64", 0}, + {"type.*uint64", 0}, + {"type.float32", 0}, + {"type.*float32", 0}, + {"type.float64", 0}, + {"type.*float64", 0}, + {"type.complex64", 0}, + {"type.*complex64", 0}, + {"type.complex128", 0}, + {"type.*complex128", 0}, + {"type.unsafe.Pointer", 0}, + {"type.*unsafe.Pointer", 0}, + {"type.uintptr", 0}, + {"type.*uintptr", 0}, + {"type.bool", 0}, + {"type.*bool", 0}, + {"type.string", 0}, + {"type.*string", 0}, + {"type.error", 0}, + {"type.*error", 0}, + {"type.func(error) string", 0}, + {"type.*func(error) string", 0}, +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go b/vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go new file mode 100644 index 0000000..9e19233 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/goobj/funcinfo.go @@ -0,0 +1,233 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goobj + +import ( + "bytes" + "github.com/twitchyliquid64/golang-asm/objabi" + "encoding/binary" +) + +// CUFileIndex is used to index the filenames that are stored in the +// per-package/per-CU FileList. +type CUFileIndex uint32 + +// FuncInfo is serialized as a symbol (aux symbol). The symbol data is +// the binary encoding of the struct below. +// +// TODO: make each pcdata a separate symbol? +type FuncInfo struct { + Args uint32 + Locals uint32 + FuncID objabi.FuncID + + Pcsp uint32 + Pcfile uint32 + Pcline uint32 + Pcinline uint32 + Pcdata []uint32 + PcdataEnd uint32 + Funcdataoff []uint32 + File []CUFileIndex + + InlTree []InlTreeNode +} + +func (a *FuncInfo) Write(w *bytes.Buffer) { + var b [4]byte + writeUint32 := func(x uint32) { + binary.LittleEndian.PutUint32(b[:], x) + w.Write(b[:]) + } + + writeUint32(a.Args) + writeUint32(a.Locals) + writeUint32(uint32(a.FuncID)) + + writeUint32(a.Pcsp) + writeUint32(a.Pcfile) + writeUint32(a.Pcline) + writeUint32(a.Pcinline) + writeUint32(uint32(len(a.Pcdata))) + for _, x := range a.Pcdata { + writeUint32(x) + } + writeUint32(a.PcdataEnd) + writeUint32(uint32(len(a.Funcdataoff))) + for _, x := range a.Funcdataoff { + writeUint32(x) + } + writeUint32(uint32(len(a.File))) + for _, f := range a.File { + writeUint32(uint32(f)) + } + writeUint32(uint32(len(a.InlTree))) + for i := range a.InlTree { + a.InlTree[i].Write(w) + } +} + +func (a *FuncInfo) Read(b []byte) { + readUint32 := func() uint32 { + x := binary.LittleEndian.Uint32(b) + b = b[4:] + return x + } + + a.Args = readUint32() + a.Locals = readUint32() + a.FuncID = objabi.FuncID(readUint32()) + + a.Pcsp = readUint32() + a.Pcfile = readUint32() + a.Pcline = readUint32() + a.Pcinline = readUint32() + pcdatalen := readUint32() + a.Pcdata = make([]uint32, pcdatalen) + for i := range a.Pcdata { + a.Pcdata[i] = readUint32() + } + a.PcdataEnd = readUint32() + funcdataofflen := readUint32() + a.Funcdataoff = make([]uint32, funcdataofflen) + for i := range a.Funcdataoff { + a.Funcdataoff[i] = readUint32() + } + filelen := readUint32() + a.File = make([]CUFileIndex, filelen) + for i := range a.File { + a.File[i] = CUFileIndex(readUint32()) + } + inltreelen := readUint32() + a.InlTree = make([]InlTreeNode, inltreelen) + for i := range a.InlTree { + b = a.InlTree[i].Read(b) + } +} + +// FuncInfoLengths is a cache containing a roadmap of offsets and +// lengths for things within a serialized FuncInfo. Each length field +// stores the number of items (e.g. files, inltree nodes, etc), and the +// corresponding "off" field stores the byte offset of the start of +// the items in question. +type FuncInfoLengths struct { + NumPcdata uint32 + PcdataOff uint32 + NumFuncdataoff uint32 + FuncdataoffOff uint32 + NumFile uint32 + FileOff uint32 + NumInlTree uint32 + InlTreeOff uint32 + Initialized bool +} + +func (*FuncInfo) ReadFuncInfoLengths(b []byte) FuncInfoLengths { + var result FuncInfoLengths + + const numpcdataOff = 28 + result.NumPcdata = binary.LittleEndian.Uint32(b[numpcdataOff:]) + result.PcdataOff = numpcdataOff + 4 + + numfuncdataoffOff := result.PcdataOff + 4*(result.NumPcdata+1) + result.NumFuncdataoff = binary.LittleEndian.Uint32(b[numfuncdataoffOff:]) + result.FuncdataoffOff = numfuncdataoffOff + 4 + + numfileOff := result.FuncdataoffOff + 4*result.NumFuncdataoff + result.NumFile = binary.LittleEndian.Uint32(b[numfileOff:]) + result.FileOff = numfileOff + 4 + + numinltreeOff := result.FileOff + 4*result.NumFile + result.NumInlTree = binary.LittleEndian.Uint32(b[numinltreeOff:]) + result.InlTreeOff = numinltreeOff + 4 + + result.Initialized = true + + return result +} + +func (*FuncInfo) ReadArgs(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func (*FuncInfo) ReadLocals(b []byte) uint32 { return binary.LittleEndian.Uint32(b[4:]) } + +func (*FuncInfo) ReadFuncID(b []byte) uint32 { return binary.LittleEndian.Uint32(b[8:]) } + +// return start and end offsets. +func (*FuncInfo) ReadPcsp(b []byte) (uint32, uint32) { + return binary.LittleEndian.Uint32(b[12:]), binary.LittleEndian.Uint32(b[16:]) +} + +// return start and end offsets. +func (*FuncInfo) ReadPcfile(b []byte) (uint32, uint32) { + return binary.LittleEndian.Uint32(b[16:]), binary.LittleEndian.Uint32(b[20:]) +} + +// return start and end offsets. +func (*FuncInfo) ReadPcline(b []byte) (uint32, uint32) { + return binary.LittleEndian.Uint32(b[20:]), binary.LittleEndian.Uint32(b[24:]) +} + +// return start and end offsets. +func (*FuncInfo) ReadPcinline(b []byte, pcdataoffset uint32) (uint32, uint32) { + return binary.LittleEndian.Uint32(b[24:]), binary.LittleEndian.Uint32(b[pcdataoffset:]) +} + +// return start and end offsets. +func (*FuncInfo) ReadPcdata(b []byte, pcdataoffset uint32, k uint32) (uint32, uint32) { + return binary.LittleEndian.Uint32(b[pcdataoffset+4*k:]), binary.LittleEndian.Uint32(b[pcdataoffset+4+4*k:]) +} + +func (*FuncInfo) ReadFuncdataoff(b []byte, funcdataofffoff uint32, k uint32) int64 { + return int64(binary.LittleEndian.Uint32(b[funcdataofffoff+4*k:])) +} + +func (*FuncInfo) ReadFile(b []byte, filesoff uint32, k uint32) CUFileIndex { + return CUFileIndex(binary.LittleEndian.Uint32(b[filesoff+4*k:])) +} + +func (*FuncInfo) ReadInlTree(b []byte, inltreeoff uint32, k uint32) InlTreeNode { + const inlTreeNodeSize = 4 * 6 + var result InlTreeNode + result.Read(b[inltreeoff+k*inlTreeNodeSize:]) + return result +} + +// InlTreeNode is the serialized form of FileInfo.InlTree. +type InlTreeNode struct { + Parent int32 + File CUFileIndex + Line int32 + Func SymRef + ParentPC int32 +} + +func (inl *InlTreeNode) Write(w *bytes.Buffer) { + var b [4]byte + writeUint32 := func(x uint32) { + binary.LittleEndian.PutUint32(b[:], x) + w.Write(b[:]) + } + writeUint32(uint32(inl.Parent)) + writeUint32(uint32(inl.File)) + writeUint32(uint32(inl.Line)) + writeUint32(inl.Func.PkgIdx) + writeUint32(inl.Func.SymIdx) + writeUint32(uint32(inl.ParentPC)) +} + +// Read an InlTreeNode from b, return the remaining bytes. +func (inl *InlTreeNode) Read(b []byte) []byte { + readUint32 := func() uint32 { + x := binary.LittleEndian.Uint32(b) + b = b[4:] + return x + } + inl.Parent = int32(readUint32()) + inl.File = CUFileIndex(readUint32()) + inl.Line = int32(readUint32()) + inl.Func = SymRef{readUint32(), readUint32()} + inl.ParentPC = int32(readUint32()) + return b +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go b/vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go new file mode 100644 index 0000000..3303549 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/goobj/objfile.go @@ -0,0 +1,871 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package defines the Go object file format, and provide "low-level" functions +// for reading and writing object files. + +// The object file is understood by the compiler, assembler, linker, and tools. They +// have "high level" code that operates on object files, handling application-specific +// logics, and use this package for the actual reading and writing. Specifically, the +// code below: +// +// - cmd/internal/obj/objfile.go (used by cmd/asm and cmd/compile) +// - cmd/internal/objfile/goobj.go (used cmd/nm, cmd/objdump) +// - cmd/link/internal/loader package (used by cmd/link) +// +// If the object file format changes, they may (or may not) need to change. + +package goobj + +import ( + "bytes" + "github.com/twitchyliquid64/golang-asm/bio" + "crypto/sha1" + "encoding/binary" + "errors" + "fmt" + "github.com/twitchyliquid64/golang-asm/unsafeheader" + "io" + "unsafe" +) + +// New object file format. +// +// Header struct { +// Magic [...]byte // "\x00go116ld" +// Fingerprint [8]byte +// Flags uint32 +// Offsets [...]uint32 // byte offset of each block below +// } +// +// Strings [...]struct { +// Data [...]byte +// } +// +// Autolib [...]struct { // imported packages (for file loading) +// Pkg string +// Fingerprint [8]byte +// } +// +// PkgIndex [...]string // referenced packages by index +// +// Files [...]string +// +// SymbolDefs [...]struct { +// Name string +// ABI uint16 +// Type uint8 +// Flag uint8 +// Flag2 uint8 +// Size uint32 +// } +// Hashed64Defs [...]struct { // short hashed (content-addressable) symbol definitions +// ... // same as SymbolDefs +// } +// HashedDefs [...]struct { // hashed (content-addressable) symbol definitions +// ... // same as SymbolDefs +// } +// NonPkgDefs [...]struct { // non-pkg symbol definitions +// ... // same as SymbolDefs +// } +// NonPkgRefs [...]struct { // non-pkg symbol references +// ... // same as SymbolDefs +// } +// +// RefFlags [...]struct { // referenced symbol flags +// Sym symRef +// Flag uint8 +// Flag2 uint8 +// } +// +// Hash64 [...][8]byte +// Hash [...][N]byte +// +// RelocIndex [...]uint32 // index to Relocs +// AuxIndex [...]uint32 // index to Aux +// DataIndex [...]uint32 // offset to Data +// +// Relocs [...]struct { +// Off int32 +// Size uint8 +// Type uint8 +// Add int64 +// Sym symRef +// } +// +// Aux [...]struct { +// Type uint8 +// Sym symRef +// } +// +// Data [...]byte +// Pcdata [...]byte +// +// // blocks only used by tools (objdump, nm) +// +// RefNames [...]struct { // referenced symbol names +// Sym symRef +// Name string +// // TODO: include ABI version as well? +// } +// +// string is encoded as is a uint32 length followed by a uint32 offset +// that points to the corresponding string bytes. +// +// symRef is struct { PkgIdx, SymIdx uint32 }. +// +// Slice type (e.g. []symRef) is encoded as a length prefix (uint32) +// followed by that number of elements. +// +// The types below correspond to the encoded data structure in the +// object file. + +// Symbol indexing. +// +// Each symbol is referenced with a pair of indices, { PkgIdx, SymIdx }, +// as the symRef struct above. +// +// PkgIdx is either a predeclared index (see PkgIdxNone below) or +// an index of an imported package. For the latter case, PkgIdx is the +// index of the package in the PkgIndex array. 0 is an invalid index. +// +// SymIdx is the index of the symbol in the given package. +// - If PkgIdx is PkgIdxSelf, SymIdx is the index of the symbol in the +// SymbolDefs array. +// - If PkgIdx is PkgIdxHashed64, SymIdx is the index of the symbol in the +// Hashed64Defs array. +// - If PkgIdx is PkgIdxHashed, SymIdx is the index of the symbol in the +// HashedDefs array. +// - If PkgIdx is PkgIdxNone, SymIdx is the index of the symbol in the +// NonPkgDefs array (could natually overflow to NonPkgRefs array). +// - Otherwise, SymIdx is the index of the symbol in some other package's +// SymbolDefs array. +// +// {0, 0} represents a nil symbol. Otherwise PkgIdx should not be 0. +// +// Hash contains the content hashes of content-addressable symbols, of +// which PkgIdx is PkgIdxHashed, in the same order of HashedDefs array. +// Hash64 is similar, for PkgIdxHashed64 symbols. +// +// RelocIndex, AuxIndex, and DataIndex contains indices/offsets to +// Relocs/Aux/Data blocks, one element per symbol, first for all the +// defined symbols, then all the defined hashed and non-package symbols, +// in the same order of SymbolDefs/Hashed64Defs/HashedDefs/NonPkgDefs +// arrays. For N total defined symbols, the array is of length N+1. The +// last element is the total number of relocations (aux symbols, data +// blocks, etc.). +// +// They can be accessed by index. For the i-th symbol, its relocations +// are the RelocIndex[i]-th (inclusive) to RelocIndex[i+1]-th (exclusive) +// elements in the Relocs array. Aux/Data are likewise. (The index is +// 0-based.) + +// Auxiliary symbols. +// +// Each symbol may (or may not) be associated with a number of auxiliary +// symbols. They are described in the Aux block. See Aux struct below. +// Currently a symbol's Gotype, FuncInfo, and associated DWARF symbols +// are auxiliary symbols. + +const stringRefSize = 8 // two uint32s + +type FingerprintType [8]byte + +func (fp FingerprintType) IsZero() bool { return fp == FingerprintType{} } + +// Package Index. +const ( + PkgIdxNone = (1<<31 - 1) - iota // Non-package symbols + PkgIdxHashed64 // Short hashed (content-addressable) symbols + PkgIdxHashed // Hashed (content-addressable) symbols + PkgIdxBuiltin // Predefined runtime symbols (ex: runtime.newobject) + PkgIdxSelf // Symbols defined in the current package + PkgIdxInvalid = 0 + // The index of other referenced packages starts from 1. +) + +// Blocks +const ( + BlkAutolib = iota + BlkPkgIdx + BlkFile + BlkSymdef + BlkHashed64def + BlkHasheddef + BlkNonpkgdef + BlkNonpkgref + BlkRefFlags + BlkHash64 + BlkHash + BlkRelocIdx + BlkAuxIdx + BlkDataIdx + BlkReloc + BlkAux + BlkData + BlkPcdata + BlkRefName + BlkEnd + NBlk +) + +// File header. +// TODO: probably no need to export this. +type Header struct { + Magic string + Fingerprint FingerprintType + Flags uint32 + Offsets [NBlk]uint32 +} + +const Magic = "\x00go116ld" + +func (h *Header) Write(w *Writer) { + w.RawString(h.Magic) + w.Bytes(h.Fingerprint[:]) + w.Uint32(h.Flags) + for _, x := range h.Offsets { + w.Uint32(x) + } +} + +func (h *Header) Read(r *Reader) error { + b := r.BytesAt(0, len(Magic)) + h.Magic = string(b) + if h.Magic != Magic { + return errors.New("wrong magic, not a Go object file") + } + off := uint32(len(h.Magic)) + copy(h.Fingerprint[:], r.BytesAt(off, len(h.Fingerprint))) + off += 8 + h.Flags = r.uint32At(off) + off += 4 + for i := range h.Offsets { + h.Offsets[i] = r.uint32At(off) + off += 4 + } + return nil +} + +func (h *Header) Size() int { + return len(h.Magic) + 4 + 4*len(h.Offsets) +} + +// Autolib +type ImportedPkg struct { + Pkg string + Fingerprint FingerprintType +} + +const importedPkgSize = stringRefSize + 8 + +func (p *ImportedPkg) Write(w *Writer) { + w.StringRef(p.Pkg) + w.Bytes(p.Fingerprint[:]) +} + +// Symbol definition. +// +// Serialized format: +// Sym struct { +// Name string +// ABI uint16 +// Type uint8 +// Flag uint8 +// Flag2 uint8 +// Siz uint32 +// Align uint32 +// } +type Sym [SymSize]byte + +const SymSize = stringRefSize + 2 + 1 + 1 + 1 + 4 + 4 + +const SymABIstatic = ^uint16(0) + +const ( + ObjFlagShared = 1 << iota // this object is built with -shared + ObjFlagNeedNameExpansion // the linker needs to expand `"".` to package path in symbol names + ObjFlagFromAssembly // object is from asm src, not go +) + +// Sym.Flag +const ( + SymFlagDupok = 1 << iota + SymFlagLocal + SymFlagTypelink + SymFlagLeaf + SymFlagNoSplit + SymFlagReflectMethod + SymFlagGoType + SymFlagTopFrame +) + +// Sym.Flag2 +const ( + SymFlagUsedInIface = 1 << iota + SymFlagItab +) + +// Returns the length of the name of the symbol. +func (s *Sym) NameLen(r *Reader) int { + return int(binary.LittleEndian.Uint32(s[:])) +} + +func (s *Sym) Name(r *Reader) string { + len := binary.LittleEndian.Uint32(s[:]) + off := binary.LittleEndian.Uint32(s[4:]) + return r.StringAt(off, len) +} + +func (s *Sym) ABI() uint16 { return binary.LittleEndian.Uint16(s[8:]) } +func (s *Sym) Type() uint8 { return s[10] } +func (s *Sym) Flag() uint8 { return s[11] } +func (s *Sym) Flag2() uint8 { return s[12] } +func (s *Sym) Siz() uint32 { return binary.LittleEndian.Uint32(s[13:]) } +func (s *Sym) Align() uint32 { return binary.LittleEndian.Uint32(s[17:]) } + +func (s *Sym) Dupok() bool { return s.Flag()&SymFlagDupok != 0 } +func (s *Sym) Local() bool { return s.Flag()&SymFlagLocal != 0 } +func (s *Sym) Typelink() bool { return s.Flag()&SymFlagTypelink != 0 } +func (s *Sym) Leaf() bool { return s.Flag()&SymFlagLeaf != 0 } +func (s *Sym) NoSplit() bool { return s.Flag()&SymFlagNoSplit != 0 } +func (s *Sym) ReflectMethod() bool { return s.Flag()&SymFlagReflectMethod != 0 } +func (s *Sym) IsGoType() bool { return s.Flag()&SymFlagGoType != 0 } +func (s *Sym) TopFrame() bool { return s.Flag()&SymFlagTopFrame != 0 } +func (s *Sym) UsedInIface() bool { return s.Flag2()&SymFlagUsedInIface != 0 } +func (s *Sym) IsItab() bool { return s.Flag2()&SymFlagItab != 0 } + +func (s *Sym) SetName(x string, w *Writer) { + binary.LittleEndian.PutUint32(s[:], uint32(len(x))) + binary.LittleEndian.PutUint32(s[4:], w.stringOff(x)) +} + +func (s *Sym) SetABI(x uint16) { binary.LittleEndian.PutUint16(s[8:], x) } +func (s *Sym) SetType(x uint8) { s[10] = x } +func (s *Sym) SetFlag(x uint8) { s[11] = x } +func (s *Sym) SetFlag2(x uint8) { s[12] = x } +func (s *Sym) SetSiz(x uint32) { binary.LittleEndian.PutUint32(s[13:], x) } +func (s *Sym) SetAlign(x uint32) { binary.LittleEndian.PutUint32(s[17:], x) } + +func (s *Sym) Write(w *Writer) { w.Bytes(s[:]) } + +// for testing +func (s *Sym) fromBytes(b []byte) { copy(s[:], b) } + +// Symbol reference. +type SymRef struct { + PkgIdx uint32 + SymIdx uint32 +} + +// Hash64 +type Hash64Type [Hash64Size]byte + +const Hash64Size = 8 + +// Hash +type HashType [HashSize]byte + +const HashSize = sha1.Size + +// Relocation. +// +// Serialized format: +// Reloc struct { +// Off int32 +// Siz uint8 +// Type uint8 +// Add int64 +// Sym SymRef +// } +type Reloc [RelocSize]byte + +const RelocSize = 4 + 1 + 1 + 8 + 8 + +func (r *Reloc) Off() int32 { return int32(binary.LittleEndian.Uint32(r[:])) } +func (r *Reloc) Siz() uint8 { return r[4] } +func (r *Reloc) Type() uint8 { return r[5] } +func (r *Reloc) Add() int64 { return int64(binary.LittleEndian.Uint64(r[6:])) } +func (r *Reloc) Sym() SymRef { + return SymRef{binary.LittleEndian.Uint32(r[14:]), binary.LittleEndian.Uint32(r[18:])} +} + +func (r *Reloc) SetOff(x int32) { binary.LittleEndian.PutUint32(r[:], uint32(x)) } +func (r *Reloc) SetSiz(x uint8) { r[4] = x } +func (r *Reloc) SetType(x uint8) { r[5] = x } +func (r *Reloc) SetAdd(x int64) { binary.LittleEndian.PutUint64(r[6:], uint64(x)) } +func (r *Reloc) SetSym(x SymRef) { + binary.LittleEndian.PutUint32(r[14:], x.PkgIdx) + binary.LittleEndian.PutUint32(r[18:], x.SymIdx) +} + +func (r *Reloc) Set(off int32, size uint8, typ uint8, add int64, sym SymRef) { + r.SetOff(off) + r.SetSiz(size) + r.SetType(typ) + r.SetAdd(add) + r.SetSym(sym) +} + +func (r *Reloc) Write(w *Writer) { w.Bytes(r[:]) } + +// for testing +func (r *Reloc) fromBytes(b []byte) { copy(r[:], b) } + +// Aux symbol info. +// +// Serialized format: +// Aux struct { +// Type uint8 +// Sym SymRef +// } +type Aux [AuxSize]byte + +const AuxSize = 1 + 8 + +// Aux Type +const ( + AuxGotype = iota + AuxFuncInfo + AuxFuncdata + AuxDwarfInfo + AuxDwarfLoc + AuxDwarfRanges + AuxDwarfLines + + // TODO: more. Pcdata? +) + +func (a *Aux) Type() uint8 { return a[0] } +func (a *Aux) Sym() SymRef { + return SymRef{binary.LittleEndian.Uint32(a[1:]), binary.LittleEndian.Uint32(a[5:])} +} + +func (a *Aux) SetType(x uint8) { a[0] = x } +func (a *Aux) SetSym(x SymRef) { + binary.LittleEndian.PutUint32(a[1:], x.PkgIdx) + binary.LittleEndian.PutUint32(a[5:], x.SymIdx) +} + +func (a *Aux) Write(w *Writer) { w.Bytes(a[:]) } + +// for testing +func (a *Aux) fromBytes(b []byte) { copy(a[:], b) } + +// Referenced symbol flags. +// +// Serialized format: +// RefFlags struct { +// Sym symRef +// Flag uint8 +// Flag2 uint8 +// } +type RefFlags [RefFlagsSize]byte + +const RefFlagsSize = 8 + 1 + 1 + +func (r *RefFlags) Sym() SymRef { + return SymRef{binary.LittleEndian.Uint32(r[:]), binary.LittleEndian.Uint32(r[4:])} +} +func (r *RefFlags) Flag() uint8 { return r[8] } +func (r *RefFlags) Flag2() uint8 { return r[9] } + +func (r *RefFlags) SetSym(x SymRef) { + binary.LittleEndian.PutUint32(r[:], x.PkgIdx) + binary.LittleEndian.PutUint32(r[4:], x.SymIdx) +} +func (r *RefFlags) SetFlag(x uint8) { r[8] = x } +func (r *RefFlags) SetFlag2(x uint8) { r[9] = x } + +func (r *RefFlags) Write(w *Writer) { w.Bytes(r[:]) } + +// Referenced symbol name. +// +// Serialized format: +// RefName struct { +// Sym symRef +// Name string +// } +type RefName [RefNameSize]byte + +const RefNameSize = 8 + stringRefSize + +func (n *RefName) Sym() SymRef { + return SymRef{binary.LittleEndian.Uint32(n[:]), binary.LittleEndian.Uint32(n[4:])} +} +func (n *RefName) Name(r *Reader) string { + len := binary.LittleEndian.Uint32(n[8:]) + off := binary.LittleEndian.Uint32(n[12:]) + return r.StringAt(off, len) +} + +func (n *RefName) SetSym(x SymRef) { + binary.LittleEndian.PutUint32(n[:], x.PkgIdx) + binary.LittleEndian.PutUint32(n[4:], x.SymIdx) +} +func (n *RefName) SetName(x string, w *Writer) { + binary.LittleEndian.PutUint32(n[8:], uint32(len(x))) + binary.LittleEndian.PutUint32(n[12:], w.stringOff(x)) +} + +func (n *RefName) Write(w *Writer) { w.Bytes(n[:]) } + +type Writer struct { + wr *bio.Writer + stringMap map[string]uint32 + off uint32 // running offset +} + +func NewWriter(wr *bio.Writer) *Writer { + return &Writer{wr: wr, stringMap: make(map[string]uint32)} +} + +func (w *Writer) AddString(s string) { + if _, ok := w.stringMap[s]; ok { + return + } + w.stringMap[s] = w.off + w.RawString(s) +} + +func (w *Writer) stringOff(s string) uint32 { + off, ok := w.stringMap[s] + if !ok { + panic(fmt.Sprintf("writeStringRef: string not added: %q", s)) + } + return off +} + +func (w *Writer) StringRef(s string) { + w.Uint32(uint32(len(s))) + w.Uint32(w.stringOff(s)) +} + +func (w *Writer) RawString(s string) { + w.wr.WriteString(s) + w.off += uint32(len(s)) +} + +func (w *Writer) Bytes(s []byte) { + w.wr.Write(s) + w.off += uint32(len(s)) +} + +func (w *Writer) Uint64(x uint64) { + var b [8]byte + binary.LittleEndian.PutUint64(b[:], x) + w.wr.Write(b[:]) + w.off += 8 +} + +func (w *Writer) Uint32(x uint32) { + var b [4]byte + binary.LittleEndian.PutUint32(b[:], x) + w.wr.Write(b[:]) + w.off += 4 +} + +func (w *Writer) Uint16(x uint16) { + var b [2]byte + binary.LittleEndian.PutUint16(b[:], x) + w.wr.Write(b[:]) + w.off += 2 +} + +func (w *Writer) Uint8(x uint8) { + w.wr.WriteByte(x) + w.off++ +} + +func (w *Writer) Offset() uint32 { + return w.off +} + +type Reader struct { + b []byte // mmapped bytes, if not nil + readonly bool // whether b is backed with read-only memory + + rd io.ReaderAt + start uint32 + h Header // keep block offsets +} + +func NewReaderFromBytes(b []byte, readonly bool) *Reader { + r := &Reader{b: b, readonly: readonly, rd: bytes.NewReader(b), start: 0} + err := r.h.Read(r) + if err != nil { + return nil + } + return r +} + +func (r *Reader) BytesAt(off uint32, len int) []byte { + if len == 0 { + return nil + } + end := int(off) + len + return r.b[int(off):end:end] +} + +func (r *Reader) uint64At(off uint32) uint64 { + b := r.BytesAt(off, 8) + return binary.LittleEndian.Uint64(b) +} + +func (r *Reader) int64At(off uint32) int64 { + return int64(r.uint64At(off)) +} + +func (r *Reader) uint32At(off uint32) uint32 { + b := r.BytesAt(off, 4) + return binary.LittleEndian.Uint32(b) +} + +func (r *Reader) int32At(off uint32) int32 { + return int32(r.uint32At(off)) +} + +func (r *Reader) uint16At(off uint32) uint16 { + b := r.BytesAt(off, 2) + return binary.LittleEndian.Uint16(b) +} + +func (r *Reader) uint8At(off uint32) uint8 { + b := r.BytesAt(off, 1) + return b[0] +} + +func (r *Reader) StringAt(off uint32, len uint32) string { + b := r.b[off : off+len] + if r.readonly { + return toString(b) // backed by RO memory, ok to make unsafe string + } + return string(b) +} + +func toString(b []byte) string { + if len(b) == 0 { + return "" + } + + var s string + hdr := (*unsafeheader.String)(unsafe.Pointer(&s)) + hdr.Data = unsafe.Pointer(&b[0]) + hdr.Len = len(b) + + return s +} + +func (r *Reader) StringRef(off uint32) string { + l := r.uint32At(off) + return r.StringAt(r.uint32At(off+4), l) +} + +func (r *Reader) Fingerprint() FingerprintType { + return r.h.Fingerprint +} + +func (r *Reader) Autolib() []ImportedPkg { + n := (r.h.Offsets[BlkAutolib+1] - r.h.Offsets[BlkAutolib]) / importedPkgSize + s := make([]ImportedPkg, n) + off := r.h.Offsets[BlkAutolib] + for i := range s { + s[i].Pkg = r.StringRef(off) + copy(s[i].Fingerprint[:], r.BytesAt(off+stringRefSize, len(s[i].Fingerprint))) + off += importedPkgSize + } + return s +} + +func (r *Reader) Pkglist() []string { + n := (r.h.Offsets[BlkPkgIdx+1] - r.h.Offsets[BlkPkgIdx]) / stringRefSize + s := make([]string, n) + off := r.h.Offsets[BlkPkgIdx] + for i := range s { + s[i] = r.StringRef(off) + off += stringRefSize + } + return s +} + +func (r *Reader) NPkg() int { + return int(r.h.Offsets[BlkPkgIdx+1]-r.h.Offsets[BlkPkgIdx]) / stringRefSize +} + +func (r *Reader) Pkg(i int) string { + off := r.h.Offsets[BlkPkgIdx] + uint32(i)*stringRefSize + return r.StringRef(off) +} + +func (r *Reader) NFile() int { + return int(r.h.Offsets[BlkFile+1]-r.h.Offsets[BlkFile]) / stringRefSize +} + +func (r *Reader) File(i int) string { + off := r.h.Offsets[BlkFile] + uint32(i)*stringRefSize + return r.StringRef(off) +} + +func (r *Reader) NSym() int { + return int(r.h.Offsets[BlkSymdef+1]-r.h.Offsets[BlkSymdef]) / SymSize +} + +func (r *Reader) NHashed64def() int { + return int(r.h.Offsets[BlkHashed64def+1]-r.h.Offsets[BlkHashed64def]) / SymSize +} + +func (r *Reader) NHasheddef() int { + return int(r.h.Offsets[BlkHasheddef+1]-r.h.Offsets[BlkHasheddef]) / SymSize +} + +func (r *Reader) NNonpkgdef() int { + return int(r.h.Offsets[BlkNonpkgdef+1]-r.h.Offsets[BlkNonpkgdef]) / SymSize +} + +func (r *Reader) NNonpkgref() int { + return int(r.h.Offsets[BlkNonpkgref+1]-r.h.Offsets[BlkNonpkgref]) / SymSize +} + +// SymOff returns the offset of the i-th symbol. +func (r *Reader) SymOff(i uint32) uint32 { + return r.h.Offsets[BlkSymdef] + uint32(i*SymSize) +} + +// Sym returns a pointer to the i-th symbol. +func (r *Reader) Sym(i uint32) *Sym { + off := r.SymOff(i) + return (*Sym)(unsafe.Pointer(&r.b[off])) +} + +// NRefFlags returns the number of referenced symbol flags. +func (r *Reader) NRefFlags() int { + return int(r.h.Offsets[BlkRefFlags+1]-r.h.Offsets[BlkRefFlags]) / RefFlagsSize +} + +// RefFlags returns a pointer to the i-th referenced symbol flags. +// Note: here i is not a local symbol index, just a counter. +func (r *Reader) RefFlags(i int) *RefFlags { + off := r.h.Offsets[BlkRefFlags] + uint32(i*RefFlagsSize) + return (*RefFlags)(unsafe.Pointer(&r.b[off])) +} + +// Hash64 returns the i-th short hashed symbol's hash. +// Note: here i is the index of short hashed symbols, not all symbols +// (unlike other accessors). +func (r *Reader) Hash64(i uint32) uint64 { + off := r.h.Offsets[BlkHash64] + uint32(i*Hash64Size) + return r.uint64At(off) +} + +// Hash returns a pointer to the i-th hashed symbol's hash. +// Note: here i is the index of hashed symbols, not all symbols +// (unlike other accessors). +func (r *Reader) Hash(i uint32) *HashType { + off := r.h.Offsets[BlkHash] + uint32(i*HashSize) + return (*HashType)(unsafe.Pointer(&r.b[off])) +} + +// NReloc returns the number of relocations of the i-th symbol. +func (r *Reader) NReloc(i uint32) int { + relocIdxOff := r.h.Offsets[BlkRelocIdx] + uint32(i*4) + return int(r.uint32At(relocIdxOff+4) - r.uint32At(relocIdxOff)) +} + +// RelocOff returns the offset of the j-th relocation of the i-th symbol. +func (r *Reader) RelocOff(i uint32, j int) uint32 { + relocIdxOff := r.h.Offsets[BlkRelocIdx] + uint32(i*4) + relocIdx := r.uint32At(relocIdxOff) + return r.h.Offsets[BlkReloc] + (relocIdx+uint32(j))*uint32(RelocSize) +} + +// Reloc returns a pointer to the j-th relocation of the i-th symbol. +func (r *Reader) Reloc(i uint32, j int) *Reloc { + off := r.RelocOff(i, j) + return (*Reloc)(unsafe.Pointer(&r.b[off])) +} + +// Relocs returns a pointer to the relocations of the i-th symbol. +func (r *Reader) Relocs(i uint32) []Reloc { + off := r.RelocOff(i, 0) + n := r.NReloc(i) + return (*[1 << 20]Reloc)(unsafe.Pointer(&r.b[off]))[:n:n] +} + +// NAux returns the number of aux symbols of the i-th symbol. +func (r *Reader) NAux(i uint32) int { + auxIdxOff := r.h.Offsets[BlkAuxIdx] + i*4 + return int(r.uint32At(auxIdxOff+4) - r.uint32At(auxIdxOff)) +} + +// AuxOff returns the offset of the j-th aux symbol of the i-th symbol. +func (r *Reader) AuxOff(i uint32, j int) uint32 { + auxIdxOff := r.h.Offsets[BlkAuxIdx] + i*4 + auxIdx := r.uint32At(auxIdxOff) + return r.h.Offsets[BlkAux] + (auxIdx+uint32(j))*uint32(AuxSize) +} + +// Aux returns a pointer to the j-th aux symbol of the i-th symbol. +func (r *Reader) Aux(i uint32, j int) *Aux { + off := r.AuxOff(i, j) + return (*Aux)(unsafe.Pointer(&r.b[off])) +} + +// Auxs returns the aux symbols of the i-th symbol. +func (r *Reader) Auxs(i uint32) []Aux { + off := r.AuxOff(i, 0) + n := r.NAux(i) + return (*[1 << 20]Aux)(unsafe.Pointer(&r.b[off]))[:n:n] +} + +// DataOff returns the offset of the i-th symbol's data. +func (r *Reader) DataOff(i uint32) uint32 { + dataIdxOff := r.h.Offsets[BlkDataIdx] + i*4 + return r.h.Offsets[BlkData] + r.uint32At(dataIdxOff) +} + +// DataSize returns the size of the i-th symbol's data. +func (r *Reader) DataSize(i uint32) int { + dataIdxOff := r.h.Offsets[BlkDataIdx] + i*4 + return int(r.uint32At(dataIdxOff+4) - r.uint32At(dataIdxOff)) +} + +// Data returns the i-th symbol's data. +func (r *Reader) Data(i uint32) []byte { + dataIdxOff := r.h.Offsets[BlkDataIdx] + i*4 + base := r.h.Offsets[BlkData] + off := r.uint32At(dataIdxOff) + end := r.uint32At(dataIdxOff + 4) + return r.BytesAt(base+off, int(end-off)) +} + +// AuxDataBase returns the base offset of the aux data block. +func (r *Reader) PcdataBase() uint32 { + return r.h.Offsets[BlkPcdata] +} + +// NRefName returns the number of referenced symbol names. +func (r *Reader) NRefName() int { + return int(r.h.Offsets[BlkRefName+1]-r.h.Offsets[BlkRefName]) / RefNameSize +} + +// RefName returns a pointer to the i-th referenced symbol name. +// Note: here i is not a local symbol index, just a counter. +func (r *Reader) RefName(i int) *RefName { + off := r.h.Offsets[BlkRefName] + uint32(i*RefNameSize) + return (*RefName)(unsafe.Pointer(&r.b[off])) +} + +// ReadOnly returns whether r.BytesAt returns read-only bytes. +func (r *Reader) ReadOnly() bool { + return r.readonly +} + +// Flags returns the flag bits read from the object file header. +func (r *Reader) Flags() uint32 { + return r.h.Flags +} + +func (r *Reader) Shared() bool { return r.Flags()&ObjFlagShared != 0 } +func (r *Reader) NeedNameExpansion() bool { return r.Flags()&ObjFlagNeedNameExpansion != 0 } +func (r *Reader) FromAssembly() bool { return r.Flags()&ObjFlagFromAssembly != 0 } diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/abi_string.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/abi_string.go new file mode 100644 index 0000000..a439da3 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/abi_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type ABI"; DO NOT EDIT. + +package obj + +import "strconv" + +const _ABI_name = "ABI0ABIInternalABICount" + +var _ABI_index = [...]uint8{0, 4, 15, 23} + +func (i ABI) String() string { + if i >= ABI(len(_ABI_index)-1) { + return "ABI(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ABI_name[_ABI_index[i]:_ABI_index[i+1]] +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/addrtype_string.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/addrtype_string.go new file mode 100644 index 0000000..71f0dd9 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/addrtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type AddrType"; DO NOT EDIT. + +package obj + +import "strconv" + +const _AddrType_name = "TYPE_NONETYPE_BRANCHTYPE_TEXTSIZETYPE_MEMTYPE_CONSTTYPE_FCONSTTYPE_SCONSTTYPE_REGTYPE_ADDRTYPE_SHIFTTYPE_REGREGTYPE_REGREG2TYPE_INDIRTYPE_REGLIST" + +var _AddrType_index = [...]uint8{0, 9, 20, 33, 41, 51, 62, 73, 81, 90, 100, 111, 123, 133, 145} + +func (i AddrType) String() string { + if i >= AddrType(len(_AddrType_index)-1) { + return "AddrType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AddrType_name[_AddrType_index[i]:_AddrType_index[i+1]] +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/a.out.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/a.out.go new file mode 100644 index 0000000..8f8b8db --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/a.out.go @@ -0,0 +1,410 @@ +// Inferno utils/5c/5.out.h +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5c/5.out.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm + +import "github.com/twitchyliquid64/golang-asm/obj" + +//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p arm + +const ( + NSNAME = 8 + NSYM = 50 + NREG = 16 +) + +/* -1 disables use of REGARG */ +const ( + REGARG = -1 +) + +const ( + REG_R0 = obj.RBaseARM + iota // must be 16-aligned + REG_R1 + REG_R2 + REG_R3 + REG_R4 + REG_R5 + REG_R6 + REG_R7 + REG_R8 + REG_R9 + REG_R10 + REG_R11 + REG_R12 + REG_R13 + REG_R14 + REG_R15 + + REG_F0 // must be 16-aligned + REG_F1 + REG_F2 + REG_F3 + REG_F4 + REG_F5 + REG_F6 + REG_F7 + REG_F8 + REG_F9 + REG_F10 + REG_F11 + REG_F12 + REG_F13 + REG_F14 + REG_F15 + + REG_FPSR // must be 2-aligned + REG_FPCR + + REG_CPSR // must be 2-aligned + REG_SPSR + + REGRET = REG_R0 + /* compiler allocates R1 up as temps */ + /* compiler allocates register variables R3 up */ + /* compiler allocates external registers R10 down */ + REGEXT = REG_R10 + /* these two registers are declared in runtime.h */ + REGG = REGEXT - 0 + REGM = REGEXT - 1 + + REGCTXT = REG_R7 + REGTMP = REG_R11 + REGSP = REG_R13 + REGLINK = REG_R14 + REGPC = REG_R15 + + NFREG = 16 + /* compiler allocates register variables F0 up */ + /* compiler allocates external registers F7 down */ + FREGRET = REG_F0 + FREGEXT = REG_F7 + FREGTMP = REG_F15 +) + +// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0040b/IHI0040B_aadwarf.pdf +var ARMDWARFRegisters = map[int16]int16{} + +func init() { + // f assigns dwarfregisters[from:to] = (base):(step*(to-from)+base) + f := func(from, to, base, step int16) { + for r := int16(from); r <= to; r++ { + ARMDWARFRegisters[r] = step*(r-from) + base + } + } + f(REG_R0, REG_R15, 0, 1) + f(REG_F0, REG_F15, 64, 2) // Use d0 through D15, aka S0, S2, ..., S30 +} + +// Special registers, after subtracting obj.RBaseARM, bit 9 indicates +// a special register and the low bits select the register. +const ( + REG_SPECIAL = obj.RBaseARM + 1<<9 + iota + REG_MB_SY + REG_MB_ST + REG_MB_ISH + REG_MB_ISHST + REG_MB_NSH + REG_MB_NSHST + REG_MB_OSH + REG_MB_OSHST + + MAXREG +) + +const ( + C_NONE = iota + C_REG + C_REGREG + C_REGREG2 + C_REGLIST + C_SHIFT /* register shift R>>x */ + C_SHIFTADDR /* memory address with shifted offset R>>x(R) */ + C_FREG + C_PSR + C_FCR + C_SPR /* REG_MB_SY */ + + C_RCON /* 0xff rotated */ + C_NCON /* ~RCON */ + C_RCON2A /* OR of two disjoint C_RCON constants */ + C_RCON2S /* subtraction of two disjoint C_RCON constants */ + C_SCON /* 0xffff */ + C_LCON + C_LCONADDR + C_ZFCON + C_SFCON + C_LFCON + + C_RACON + C_LACON + + C_SBRA + C_LBRA + + C_HAUTO /* halfword insn offset (-0xff to 0xff) */ + C_FAUTO /* float insn offset (0 to 0x3fc, word aligned) */ + C_HFAUTO /* both H and F */ + C_SAUTO /* -0xfff to 0xfff */ + C_LAUTO + + C_HOREG + C_FOREG + C_HFOREG + C_SOREG + C_ROREG + C_SROREG /* both nil and R */ + C_LOREG + + C_PC + C_SP + C_HREG + + C_ADDR /* reference to relocatable address */ + + // TLS "var" in local exec mode: will become a constant offset from + // thread local base that is ultimately chosen by the program linker. + C_TLS_LE + + // TLS "var" in initial exec mode: will become a memory address (chosen + // by the program linker) that the dynamic linker will fill with the + // offset from the thread local base. + C_TLS_IE + + C_TEXTSIZE + + C_GOK + + C_NCLASS /* must be the last */ +) + +const ( + AAND = obj.ABaseARM + obj.A_ARCHSPECIFIC + iota + AEOR + ASUB + ARSB + AADD + AADC + ASBC + ARSC + ATST + ATEQ + ACMP + ACMN + AORR + ABIC + + AMVN + + /* + * Do not reorder or fragment the conditional branch + * opcodes, or the predication code will break + */ + ABEQ + ABNE + ABCS + ABHS + ABCC + ABLO + ABMI + ABPL + ABVS + ABVC + ABHI + ABLS + ABGE + ABLT + ABGT + ABLE + + AMOVWD + AMOVWF + AMOVDW + AMOVFW + AMOVFD + AMOVDF + AMOVF + AMOVD + + ACMPF + ACMPD + AADDF + AADDD + ASUBF + ASUBD + AMULF + AMULD + ANMULF + ANMULD + AMULAF + AMULAD + ANMULAF + ANMULAD + AMULSF + AMULSD + ANMULSF + ANMULSD + AFMULAF + AFMULAD + AFNMULAF + AFNMULAD + AFMULSF + AFMULSD + AFNMULSF + AFNMULSD + ADIVF + ADIVD + ASQRTF + ASQRTD + AABSF + AABSD + ANEGF + ANEGD + + ASRL + ASRA + ASLL + AMULU + ADIVU + AMUL + AMMUL + ADIV + AMOD + AMODU + ADIVHW + ADIVUHW + + AMOVB + AMOVBS + AMOVBU + AMOVH + AMOVHS + AMOVHU + AMOVW + AMOVM + ASWPBU + ASWPW + + ARFE + ASWI + AMULA + AMULS + AMMULA + AMMULS + + AWORD + + AMULL + AMULAL + AMULLU + AMULALU + + ABX + ABXRET + ADWORD + + ALDREX + ASTREX + ALDREXD + ASTREXD + + ADMB + + APLD + + ACLZ + AREV + AREV16 + AREVSH + ARBIT + + AXTAB + AXTAH + AXTABU + AXTAHU + + ABFX + ABFXU + ABFC + ABFI + + AMULWT + AMULWB + AMULBB + AMULAWT + AMULAWB + AMULABB + + AMRC // MRC/MCR + + ALAST + + // aliases + AB = obj.AJMP + ABL = obj.ACALL +) + +/* scond byte */ +const ( + C_SCOND = (1 << 4) - 1 + C_SBIT = 1 << 4 + C_PBIT = 1 << 5 + C_WBIT = 1 << 6 + C_FBIT = 1 << 7 /* psr flags-only */ + C_UBIT = 1 << 7 /* up bit, unsigned bit */ + + // These constants are the ARM condition codes encodings, + // XORed with 14 so that C_SCOND_NONE has value 0, + // so that a zeroed Prog.scond means "always execute". + C_SCOND_XOR = 14 + + C_SCOND_EQ = 0 ^ C_SCOND_XOR + C_SCOND_NE = 1 ^ C_SCOND_XOR + C_SCOND_HS = 2 ^ C_SCOND_XOR + C_SCOND_LO = 3 ^ C_SCOND_XOR + C_SCOND_MI = 4 ^ C_SCOND_XOR + C_SCOND_PL = 5 ^ C_SCOND_XOR + C_SCOND_VS = 6 ^ C_SCOND_XOR + C_SCOND_VC = 7 ^ C_SCOND_XOR + C_SCOND_HI = 8 ^ C_SCOND_XOR + C_SCOND_LS = 9 ^ C_SCOND_XOR + C_SCOND_GE = 10 ^ C_SCOND_XOR + C_SCOND_LT = 11 ^ C_SCOND_XOR + C_SCOND_GT = 12 ^ C_SCOND_XOR + C_SCOND_LE = 13 ^ C_SCOND_XOR + C_SCOND_NONE = 14 ^ C_SCOND_XOR + C_SCOND_NV = 15 ^ C_SCOND_XOR + + /* D_SHIFT type */ + SHIFT_LL = 0 << 5 + SHIFT_LR = 1 << 5 + SHIFT_AR = 2 << 5 + SHIFT_RR = 3 << 5 +) diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames.go new file mode 100644 index 0000000..8a05133 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames.go @@ -0,0 +1,144 @@ +// Code generated by stringer -i a.out.go -o anames.go -p arm; DO NOT EDIT. + +package arm + +import "github.com/twitchyliquid64/golang-asm/obj" + +var Anames = []string{ + obj.A_ARCHSPECIFIC: "AND", + "EOR", + "SUB", + "RSB", + "ADD", + "ADC", + "SBC", + "RSC", + "TST", + "TEQ", + "CMP", + "CMN", + "ORR", + "BIC", + "MVN", + "BEQ", + "BNE", + "BCS", + "BHS", + "BCC", + "BLO", + "BMI", + "BPL", + "BVS", + "BVC", + "BHI", + "BLS", + "BGE", + "BLT", + "BGT", + "BLE", + "MOVWD", + "MOVWF", + "MOVDW", + "MOVFW", + "MOVFD", + "MOVDF", + "MOVF", + "MOVD", + "CMPF", + "CMPD", + "ADDF", + "ADDD", + "SUBF", + "SUBD", + "MULF", + "MULD", + "NMULF", + "NMULD", + "MULAF", + "MULAD", + "NMULAF", + "NMULAD", + "MULSF", + "MULSD", + "NMULSF", + "NMULSD", + "FMULAF", + "FMULAD", + "FNMULAF", + "FNMULAD", + "FMULSF", + "FMULSD", + "FNMULSF", + "FNMULSD", + "DIVF", + "DIVD", + "SQRTF", + "SQRTD", + "ABSF", + "ABSD", + "NEGF", + "NEGD", + "SRL", + "SRA", + "SLL", + "MULU", + "DIVU", + "MUL", + "MMUL", + "DIV", + "MOD", + "MODU", + "DIVHW", + "DIVUHW", + "MOVB", + "MOVBS", + "MOVBU", + "MOVH", + "MOVHS", + "MOVHU", + "MOVW", + "MOVM", + "SWPBU", + "SWPW", + "RFE", + "SWI", + "MULA", + "MULS", + "MMULA", + "MMULS", + "WORD", + "MULL", + "MULAL", + "MULLU", + "MULALU", + "BX", + "BXRET", + "DWORD", + "LDREX", + "STREX", + "LDREXD", + "STREXD", + "DMB", + "PLD", + "CLZ", + "REV", + "REV16", + "REVSH", + "RBIT", + "XTAB", + "XTAH", + "XTABU", + "XTAHU", + "BFX", + "BFXU", + "BFC", + "BFI", + "MULWT", + "MULWB", + "MULBB", + "MULAWT", + "MULAWB", + "MULABB", + "MRC", + "LAST", +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames5.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames5.go new file mode 100644 index 0000000..78fcd55 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/anames5.go @@ -0,0 +1,77 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm + +var cnames5 = []string{ + "NONE", + "REG", + "REGREG", + "REGREG2", + "REGLIST", + "SHIFT", + "SHIFTADDR", + "FREG", + "PSR", + "FCR", + "SPR", + "RCON", + "NCON", + "RCON2A", + "RCON2S", + "SCON", + "LCON", + "LCONADDR", + "ZFCON", + "SFCON", + "LFCON", + "RACON", + "LACON", + "SBRA", + "LBRA", + "HAUTO", + "FAUTO", + "HFAUTO", + "SAUTO", + "LAUTO", + "HOREG", + "FOREG", + "HFOREG", + "SOREG", + "ROREG", + "SROREG", + "LOREG", + "PC", + "SP", + "HREG", + "ADDR", + "C_TLS_LE", + "C_TLS_IE", + "TEXTSIZE", + "GOK", + "NCLASS", + "SCOND = (1<<4)-1", + "SBIT = 1<<4", + "PBIT = 1<<5", + "WBIT = 1<<6", + "FBIT = 1<<7", + "UBIT = 1<<7", + "SCOND_XOR = 14", + "SCOND_EQ = 0 ^ C_SCOND_XOR", + "SCOND_NE = 1 ^ C_SCOND_XOR", + "SCOND_HS = 2 ^ C_SCOND_XOR", + "SCOND_LO = 3 ^ C_SCOND_XOR", + "SCOND_MI = 4 ^ C_SCOND_XOR", + "SCOND_PL = 5 ^ C_SCOND_XOR", + "SCOND_VS = 6 ^ C_SCOND_XOR", + "SCOND_VC = 7 ^ C_SCOND_XOR", + "SCOND_HI = 8 ^ C_SCOND_XOR", + "SCOND_LS = 9 ^ C_SCOND_XOR", + "SCOND_GE = 10 ^ C_SCOND_XOR", + "SCOND_LT = 11 ^ C_SCOND_XOR", + "SCOND_GT = 12 ^ C_SCOND_XOR", + "SCOND_LE = 13 ^ C_SCOND_XOR", + "SCOND_NONE = 14 ^ C_SCOND_XOR", + "SCOND_NV = 15 ^ C_SCOND_XOR", +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go new file mode 100644 index 0000000..924657f --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/asm5.go @@ -0,0 +1,3096 @@ +// Inferno utils/5l/span.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5l/span.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" + "fmt" + "log" + "math" + "sort" +) + +// ctxt5 holds state while assembling a single function. +// Each function gets a fresh ctxt5. +// This allows for multiple functions to be safely concurrently assembled. +type ctxt5 struct { + ctxt *obj.Link + newprog obj.ProgAlloc + cursym *obj.LSym + printp *obj.Prog + blitrl *obj.Prog + elitrl *obj.Prog + autosize int64 + instoffset int64 + pc int64 + pool struct { + start uint32 + size uint32 + extra uint32 + } +} + +type Optab struct { + as obj.As + a1 uint8 + a2 int8 + a3 uint8 + type_ uint8 + size int8 + param int16 + flag int8 + pcrelsiz uint8 + scond uint8 // optional flags accepted by the instruction +} + +type Opcross [32][2][32]uint8 + +const ( + LFROM = 1 << 0 + LTO = 1 << 1 + LPOOL = 1 << 2 + LPCREL = 1 << 3 +) + +var optab = []Optab{ + /* struct Optab: + OPCODE, from, prog->reg, to, type, size, param, flag, extra data size, optional suffix */ + {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0, 0}, + {AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AAND, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AAND, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AORR, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AORR, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AMVN, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0, 0}, + {AADD, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AADD, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AAND, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AAND, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AORR, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AORR, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AMOVW, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, 0}, + {AMVN, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, 0}, + {ACMP, C_RCON, C_REG, C_NONE, 2, 4, 0, 0, 0, 0}, + {AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AAND, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AAND, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AORR, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AORR, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0, 0}, + {AMOVW, C_RACON, C_NONE, C_REG, 4, 4, REGSP, 0, 0, C_SBIT}, + {AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, LPOOL, 0, 0}, + {ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, + {ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0, 0}, + {ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, + {ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // prediction hinted form, hint ignored + {AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0, 0}, + {ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0, 0}, + {ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0, 0}, + {ABX, C_NONE, C_NONE, C_ROREG, 75, 12, 0, 0, 0, 0}, + {ABXRET, C_NONE, C_NONE, C_ROREG, 76, 4, 0, 0, 0, 0}, + {ASLL, C_RCON, C_REG, C_REG, 8, 4, 0, 0, 0, C_SBIT}, + {ASLL, C_RCON, C_NONE, C_REG, 8, 4, 0, 0, 0, C_SBIT}, + {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0, C_SBIT}, + {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0, C_SBIT}, + {ASWI, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0, 0}, + {ASWI, C_NONE, C_NONE, C_LCON, 10, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_LCON, 11, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_LCONADDR, 11, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_ADDR, 11, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_TLS_LE, 103, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_TLS_IE, 104, 4, 0, 0, 0, 0}, + {AMOVW, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, + {AMOVW, C_SCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, + {AMOVW, C_LCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0, 0}, + {AMOVW, C_LCONADDR, C_NONE, C_REG, 12, 4, 0, LFROM | LPCREL, 4, 0}, + {AMVN, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, + {AADD, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AADD, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {ACMP, C_NCON, C_REG, C_NONE, 13, 8, 0, 0, 0, 0}, + {AADD, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AADD, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AMVN, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, 0}, + {ACMP, C_SCON, C_REG, C_NONE, 13, 8, 0, 0, 0, 0}, + {AADD, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0, 0}, + {AADD, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0, 0}, + {AORR, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0, 0}, + {AORR, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0, 0}, + {AADD, C_RCON2S, C_REG, C_REG, 107, 8, 0, 0, 0, 0}, + {AADD, C_RCON2S, C_NONE, C_REG, 107, 8, 0, 0, 0, 0}, + {AADD, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AADD, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AAND, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AAND, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AORR, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AORR, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AMVN, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, 0}, + {ACMP, C_LCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0, 0}, + {AMOVB, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, 0}, + {AMOVBS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_REG, 58, 4, 0, 0, 0, 0}, + {AMOVH, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, 0}, + {AMOVHS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, + {AMOVHU, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, + {AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0, C_SBIT}, + {AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0, C_SBIT}, + {ADIV, C_REG, C_REG, C_REG, 16, 4, 0, 0, 0, 0}, + {ADIV, C_REG, C_NONE, C_REG, 16, 4, 0, 0, 0, 0}, + {ADIVHW, C_REG, C_REG, C_REG, 105, 4, 0, 0, 0, 0}, + {ADIVHW, C_REG, C_NONE, C_REG, 105, 4, 0, 0, 0, 0}, + {AMULL, C_REG, C_REG, C_REGREG, 17, 4, 0, 0, 0, C_SBIT}, + {ABFX, C_LCON, C_REG, C_REG, 18, 4, 0, 0, 0, 0}, // width in From, LSB in From3 + {ABFX, C_LCON, C_NONE, C_REG, 18, 4, 0, 0, 0, 0}, // width in From, LSB in From3 + {AMOVW, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AXTAB, C_SHIFT, C_REG, C_REG, 22, 4, 0, 0, 0, 0}, + {AXTAB, C_SHIFT, C_NONE, C_REG, 22, 4, 0, 0, 0, 0}, + {AMOVW, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, C_SBIT}, + {AMOVB, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVBS, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVBU, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVH, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVHS, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVHU, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_TLS_LE, C_NONE, C_REG, 101, 4, 0, LFROM, 0, 0}, + {AMOVW, C_TLS_IE, C_NONE, C_REG, 102, 8, 0, LFROM, 0, 0}, + {AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0, C_SBIT}, + {AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_PSR, 36, 4, 0, 0, 0, 0}, + {AMOVW, C_RCON, C_NONE, C_PSR, 37, 4, 0, 0, 0, 0}, + {AMOVM, C_REGLIST, C_NONE, C_SOREG, 38, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVM, C_SOREG, C_NONE, C_REGLIST, 39, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {ASWPW, C_SOREG, C_REG, C_REG, 40, 4, 0, 0, 0, 0}, + {ARFE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0, 0}, + {AMOVF, C_FREG, C_NONE, C_FAUTO, 50, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_FOREG, 50, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FAUTO, C_NONE, C_FREG, 51, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FOREG, C_NONE, C_FREG, 51, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_LAUTO, 52, 12, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_LOREG, 52, 12, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_LAUTO, C_NONE, C_FREG, 53, 12, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_LOREG, C_NONE, C_FREG, 53, 12, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_ADDR, 68, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_ADDR, C_NONE, C_FREG, 69, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AADDF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0, 0}, + {AADDF, C_FREG, C_FREG, C_FREG, 54, 4, 0, 0, 0, 0}, + {AMOVF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0, 0}, + {ANEGF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_FCR, 56, 4, 0, 0, 0, 0}, + {AMOVW, C_FCR, C_NONE, C_REG, 57, 4, 0, 0, 0, 0}, + {AMOVW, C_SHIFTADDR, C_NONE, C_REG, 59, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_SHIFTADDR, C_NONE, C_REG, 59, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {ALDREX, C_SOREG, C_NONE, C_REG, 77, 4, 0, 0, 0, 0}, + {ASTREX, C_SOREG, C_REG, C_REG, 78, 4, 0, 0, 0, 0}, + {ADMB, C_NONE, C_NONE, C_NONE, 110, 4, 0, 0, 0, 0}, + {ADMB, C_LCON, C_NONE, C_NONE, 110, 4, 0, 0, 0, 0}, + {ADMB, C_SPR, C_NONE, C_NONE, 110, 4, 0, 0, 0, 0}, + {AMOVF, C_ZFCON, C_NONE, C_FREG, 80, 8, 0, 0, 0, 0}, + {AMOVF, C_SFCON, C_NONE, C_FREG, 81, 4, 0, 0, 0, 0}, + {ACMPF, C_FREG, C_FREG, C_NONE, 82, 8, 0, 0, 0, 0}, + {ACMPF, C_FREG, C_NONE, C_NONE, 83, 8, 0, 0, 0, 0}, + {AMOVFW, C_FREG, C_NONE, C_FREG, 84, 4, 0, 0, 0, C_UBIT}, + {AMOVWF, C_FREG, C_NONE, C_FREG, 85, 4, 0, 0, 0, C_UBIT}, + {AMOVFW, C_FREG, C_NONE, C_REG, 86, 8, 0, 0, 0, C_UBIT}, + {AMOVWF, C_REG, C_NONE, C_FREG, 87, 8, 0, 0, 0, C_UBIT}, + {AMOVW, C_REG, C_NONE, C_FREG, 88, 4, 0, 0, 0, 0}, + {AMOVW, C_FREG, C_NONE, C_REG, 89, 4, 0, 0, 0, 0}, + {ALDREXD, C_SOREG, C_NONE, C_REG, 91, 4, 0, 0, 0, 0}, + {ASTREXD, C_SOREG, C_REG, C_REG, 92, 4, 0, 0, 0, 0}, + {APLD, C_SOREG, C_NONE, C_NONE, 95, 4, 0, 0, 0, 0}, + {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 96, 4, 0, 0, 0, 0}, + {ACLZ, C_REG, C_NONE, C_REG, 97, 4, 0, 0, 0, 0}, + {AMULWT, C_REG, C_REG, C_REG, 98, 4, 0, 0, 0, 0}, + {AMULA, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0, C_SBIT}, + {AMULAWT, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0, 0}, + {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0, 0}, + {obj.AFUNCDATA, C_LCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0, 0}, + {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, + {obj.ANOP, C_LCON, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, // nop variants, see #40689 + {obj.ANOP, C_REG, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, + {obj.ANOP, C_FREG, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, + {obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL + {obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL + {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0, 0}, +} + +var mbOp = []struct { + reg int16 + enc uint32 +}{ + {REG_MB_SY, 15}, + {REG_MB_ST, 14}, + {REG_MB_ISH, 11}, + {REG_MB_ISHST, 10}, + {REG_MB_NSH, 7}, + {REG_MB_NSHST, 6}, + {REG_MB_OSH, 3}, + {REG_MB_OSHST, 2}, +} + +var oprange [ALAST & obj.AMask][]Optab + +var xcmp [C_GOK + 1][C_GOK + 1]bool + +var ( + deferreturn *obj.LSym + symdiv *obj.LSym + symdivu *obj.LSym + symmod *obj.LSym + symmodu *obj.LSym +) + +// Note about encoding: Prog.scond holds the condition encoding, +// but XOR'ed with C_SCOND_XOR, so that C_SCOND_NONE == 0. +// The code that shifts the value << 28 has the responsibility +// for XORing with C_SCOND_XOR too. + +func checkSuffix(c *ctxt5, p *obj.Prog, o *Optab) { + if p.Scond&C_SBIT != 0 && o.scond&C_SBIT == 0 { + c.ctxt.Diag("invalid .S suffix: %v", p) + } + if p.Scond&C_PBIT != 0 && o.scond&C_PBIT == 0 { + c.ctxt.Diag("invalid .P suffix: %v", p) + } + if p.Scond&C_WBIT != 0 && o.scond&C_WBIT == 0 { + c.ctxt.Diag("invalid .W suffix: %v", p) + } + if p.Scond&C_UBIT != 0 && o.scond&C_UBIT == 0 { + c.ctxt.Diag("invalid .U suffix: %v", p) + } +} + +func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + if ctxt.Retpoline { + ctxt.Diag("-spectre=ret not supported on arm") + ctxt.Retpoline = false // don't keep printing + } + + var p *obj.Prog + var op *obj.Prog + + p = cursym.Func.Text + if p == nil || p.Link == nil { // handle external functions and ELF section symbols + return + } + + if oprange[AAND&obj.AMask] == nil { + ctxt.Diag("arm ops not initialized, call arm.buildop first") + } + + c := ctxt5{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: p.To.Offset + 4} + pc := int32(0) + + op = p + p = p.Link + var m int + var o *Optab + for ; p != nil || c.blitrl != nil; op, p = p, p.Link { + if p == nil { + if c.checkpool(op, pc) { + p = op + continue + } + + // can't happen: blitrl is not nil, but checkpool didn't flushpool + ctxt.Diag("internal inconsistency") + + break + } + + p.Pc = int64(pc) + o = c.oplook(p) + m = int(o.size) + + if m%4 != 0 || p.Pc%4 != 0 { + ctxt.Diag("!pc invalid: %v size=%d", p, m) + } + + // must check literal pool here in case p generates many instructions + if c.blitrl != nil { + // Emit the constant pool just before p if p + // would push us over the immediate size limit. + if c.checkpool(op, pc+int32(m)) { + // Back up to the instruction just + // before the pool and continue with + // the first instruction of the pool. + p = op + continue + } + } + + if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.ANOP) { + ctxt.Diag("zero-width instruction\n%v", p) + continue + } + + switch o.flag & (LFROM | LTO | LPOOL) { + case LFROM: + c.addpool(p, &p.From) + + case LTO: + c.addpool(p, &p.To) + + case LPOOL: + if p.Scond&C_SCOND == C_SCOND_NONE { + c.flushpool(p, 0, 0) + } + } + + if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE { + c.flushpool(p, 0, 0) + } + + pc += int32(m) + } + + c.cursym.Size = int64(pc) + + /* + * if any procedure is large enough to + * generate a large SBRA branch, then + * generate extra passes putting branches + * around jmps to fix. this is rare. + */ + times := 0 + + var bflag int + var opc int32 + var out [6 + 3]uint32 + for { + bflag = 0 + pc = 0 + times++ + c.cursym.Func.Text.Pc = 0 // force re-layout the code. + for p = c.cursym.Func.Text; p != nil; p = p.Link { + o = c.oplook(p) + if int64(pc) > p.Pc { + p.Pc = int64(pc) + } + + /* very large branches + if(o->type == 6 && p->pcond) { + otxt = p->pcond->pc - c; + if(otxt < 0) + otxt = -otxt; + if(otxt >= (1L<<17) - 10) { + q = emallocz(sizeof(Prog)); + q->link = p->link; + p->link = q; + q->as = AB; + q->to.type = TYPE_BRANCH; + q->pcond = p->pcond; + p->pcond = q; + q = emallocz(sizeof(Prog)); + q->link = p->link; + p->link = q; + q->as = AB; + q->to.type = TYPE_BRANCH; + q->pcond = q->link->link; + bflag = 1; + } + } + */ + opc = int32(p.Pc) + m = int(o.size) + if p.Pc != int64(opc) { + bflag = 1 + } + + //print("%v pc changed %d to %d in iter. %d\n", p, opc, (int32)p->pc, times); + pc = int32(p.Pc + int64(m)) + + if m%4 != 0 || p.Pc%4 != 0 { + ctxt.Diag("pc invalid: %v size=%d", p, m) + } + + if m/4 > len(out) { + ctxt.Diag("instruction size too large: %d > %d", m/4, len(out)) + } + if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != obj.ANOP) { + if p.As == obj.ATEXT { + c.autosize = p.To.Offset + 4 + continue + } + + ctxt.Diag("zero-width instruction\n%v", p) + continue + } + } + + c.cursym.Size = int64(pc) + if bflag == 0 { + break + } + } + + if pc%4 != 0 { + ctxt.Diag("sym->size=%d, invalid", pc) + } + + /* + * lay out the code. all the pc-relative code references, + * even cross-function, are resolved now; + * only data references need to be relocated. + * with more work we could leave cross-function + * code references to be relocated too, and then + * perhaps we'd be able to parallelize the span loop above. + */ + + p = c.cursym.Func.Text + c.autosize = p.To.Offset + 4 + c.cursym.Grow(c.cursym.Size) + + bp := c.cursym.P + pc = int32(p.Pc) // even p->link might need extra padding + var v int + for p = p.Link; p != nil; p = p.Link { + c.pc = p.Pc + o = c.oplook(p) + opc = int32(p.Pc) + c.asmout(p, o, out[:]) + m = int(o.size) + + if m%4 != 0 || p.Pc%4 != 0 { + ctxt.Diag("final stage: pc invalid: %v size=%d", p, m) + } + + if int64(pc) > p.Pc { + ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, pc, p) + } + for int64(pc) != p.Pc { + // emit 0xe1a00000 (MOVW R0, R0) + bp[0] = 0x00 + bp = bp[1:] + + bp[0] = 0x00 + bp = bp[1:] + bp[0] = 0xa0 + bp = bp[1:] + bp[0] = 0xe1 + bp = bp[1:] + pc += 4 + } + + for i := 0; i < m/4; i++ { + v = int(out[i]) + bp[0] = byte(v) + bp = bp[1:] + bp[0] = byte(v >> 8) + bp = bp[1:] + bp[0] = byte(v >> 16) + bp = bp[1:] + bp[0] = byte(v >> 24) + bp = bp[1:] + } + + pc += int32(m) + } +} + +// checkpool flushes the literal pool when the first reference to +// it threatens to go out of range of a 12-bit PC-relative offset. +// +// nextpc is the tentative next PC at which the pool could be emitted. +// checkpool should be called *before* emitting the instruction that +// would cause the PC to reach nextpc. +// If nextpc is too far from the first pool reference, checkpool will +// flush the pool immediately after p. +// The caller should resume processing a p.Link. +func (c *ctxt5) checkpool(p *obj.Prog, nextpc int32) bool { + poolLast := nextpc + poolLast += 4 // the AB instruction to jump around the pool + poolLast += int32(c.pool.size) - 4 // the offset of the last pool entry + + refPC := int32(c.pool.start) // PC of the first pool reference + + v := poolLast - refPC - 8 // 12-bit PC-relative offset (see omvl) + + if c.pool.size >= 0xff0 || immaddr(v) == 0 { + return c.flushpool(p, 1, 0) + } else if p.Link == nil { + return c.flushpool(p, 2, 0) + } + return false +} + +func (c *ctxt5) flushpool(p *obj.Prog, skip int, force int) bool { + if c.blitrl != nil { + if skip != 0 { + if false && skip == 1 { + fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), c.pool.size, c.pool.start) + } + q := c.newprog() + q.As = AB + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(p.Link) + q.Link = c.blitrl + q.Pos = p.Pos + c.blitrl = q + } else if force == 0 && (p.Pc+int64(c.pool.size)-int64(c.pool.start) < 2048) { + return false + } + + // The line number for constant pool entries doesn't really matter. + // We set it to the line number of the preceding instruction so that + // there are no deltas to encode in the pc-line tables. + for q := c.blitrl; q != nil; q = q.Link { + q.Pos = p.Pos + } + + c.elitrl.Link = p.Link + p.Link = c.blitrl + + c.blitrl = nil /* BUG: should refer back to values until out-of-range */ + c.elitrl = nil + c.pool.size = 0 + c.pool.start = 0 + c.pool.extra = 0 + return true + } + + return false +} + +func (c *ctxt5) addpool(p *obj.Prog, a *obj.Addr) { + t := c.newprog() + t.As = AWORD + + switch c.aclass(a) { + default: + t.To.Offset = a.Offset + t.To.Sym = a.Sym + t.To.Type = a.Type + t.To.Name = a.Name + + if c.ctxt.Flag_shared && t.To.Sym != nil { + t.Rel = p + } + + case C_SROREG, + C_LOREG, + C_ROREG, + C_FOREG, + C_SOREG, + C_HOREG, + C_FAUTO, + C_SAUTO, + C_LAUTO, + C_LACON: + t.To.Type = obj.TYPE_CONST + t.To.Offset = c.instoffset + } + + if t.Rel == nil { + for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ + if q.Rel == nil && q.To == t.To { + p.Pool = q + return + } + } + } + + q := c.newprog() + *q = *t + q.Pc = int64(c.pool.size) + + if c.blitrl == nil { + c.blitrl = q + c.pool.start = uint32(p.Pc) + } else { + c.elitrl.Link = q + } + c.elitrl = q + c.pool.size += 4 + + // Store the link to the pool entry in Pool. + p.Pool = q +} + +func (c *ctxt5) regoff(a *obj.Addr) int32 { + c.instoffset = 0 + c.aclass(a) + return int32(c.instoffset) +} + +func immrot(v uint32) int32 { + for i := 0; i < 16; i++ { + if v&^0xff == 0 { + return int32(uint32(int32(i)<<8) | v | 1<<25) + } + v = v<<2 | v>>30 + } + + return 0 +} + +// immrot2a returns bits encoding the immediate constant fields of two instructions, +// such that the encoded constants x, y satisfy x|y==v, x&y==0. +// Returns 0,0 if no such decomposition of v exists. +func immrot2a(v uint32) (uint32, uint32) { + for i := uint(1); i < 32; i++ { + m := uint32(1<= 0 && v <= 0xfff { + return v&0xfff | 1<<24 | 1<<23 /* pre indexing */ /* pre indexing, up */ + } + if v >= -0xfff && v < 0 { + return -v&0xfff | 1<<24 /* pre indexing */ + } + return 0 +} + +func immfloat(v int32) bool { + return v&0xC03 == 0 /* offset will fit in floating-point load/store */ +} + +func immhalf(v int32) bool { + if v >= 0 && v <= 0xff { + return v|1<<24|1<<23 != 0 /* pre indexing */ /* pre indexing, up */ + } + if v >= -0xff && v < 0 { + return -v&0xff|1<<24 != 0 /* pre indexing */ + } + return false +} + +func (c *ctxt5) aclass(a *obj.Addr) int { + switch a.Type { + case obj.TYPE_NONE: + return C_NONE + + case obj.TYPE_REG: + c.instoffset = 0 + if REG_R0 <= a.Reg && a.Reg <= REG_R15 { + return C_REG + } + if REG_F0 <= a.Reg && a.Reg <= REG_F15 { + return C_FREG + } + if a.Reg == REG_FPSR || a.Reg == REG_FPCR { + return C_FCR + } + if a.Reg == REG_CPSR || a.Reg == REG_SPSR { + return C_PSR + } + if a.Reg >= REG_SPECIAL { + return C_SPR + } + return C_GOK + + case obj.TYPE_REGREG: + return C_REGREG + + case obj.TYPE_REGREG2: + return C_REGREG2 + + case obj.TYPE_REGLIST: + return C_REGLIST + + case obj.TYPE_SHIFT: + if a.Reg == 0 { + // register shift R>>i + return C_SHIFT + } else { + // memory address with shifted offset R>>i(R) + return C_SHIFTADDR + } + + case obj.TYPE_MEM: + switch a.Name { + case obj.NAME_EXTERN, + obj.NAME_GOTREF, + obj.NAME_STATIC: + if a.Sym == nil || a.Sym.Name == "" { + fmt.Printf("null sym external\n") + return C_GOK + } + + c.instoffset = 0 // s.b. unused but just in case + if a.Sym.Type == objabi.STLSBSS { + if c.ctxt.Flag_shared { + return C_TLS_IE + } else { + return C_TLS_LE + } + } + + return C_ADDR + + case obj.NAME_AUTO: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-SP. + a.Reg = obj.REG_NONE + } + c.instoffset = c.autosize + a.Offset + if t := immaddr(int32(c.instoffset)); t != 0 { + if immhalf(int32(c.instoffset)) { + if immfloat(t) { + return C_HFAUTO + } + return C_HAUTO + } + + if immfloat(t) { + return C_FAUTO + } + return C_SAUTO + } + + return C_LAUTO + + case obj.NAME_PARAM: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-FP. + a.Reg = obj.REG_NONE + } + c.instoffset = c.autosize + a.Offset + 4 + if t := immaddr(int32(c.instoffset)); t != 0 { + if immhalf(int32(c.instoffset)) { + if immfloat(t) { + return C_HFAUTO + } + return C_HAUTO + } + + if immfloat(t) { + return C_FAUTO + } + return C_SAUTO + } + + return C_LAUTO + + case obj.NAME_NONE: + c.instoffset = a.Offset + if t := immaddr(int32(c.instoffset)); t != 0 { + if immhalf(int32(c.instoffset)) { /* n.b. that it will also satisfy immrot */ + if immfloat(t) { + return C_HFOREG + } + return C_HOREG + } + + if immfloat(t) { + return C_FOREG /* n.b. that it will also satisfy immrot */ + } + if immrot(uint32(c.instoffset)) != 0 { + return C_SROREG + } + if immhalf(int32(c.instoffset)) { + return C_HOREG + } + return C_SOREG + } + + if immrot(uint32(c.instoffset)) != 0 { + return C_ROREG + } + return C_LOREG + } + + return C_GOK + + case obj.TYPE_FCONST: + if c.chipzero5(a.Val.(float64)) >= 0 { + return C_ZFCON + } + if c.chipfloat5(a.Val.(float64)) >= 0 { + return C_SFCON + } + return C_LFCON + + case obj.TYPE_TEXTSIZE: + return C_TEXTSIZE + + case obj.TYPE_CONST, + obj.TYPE_ADDR: + switch a.Name { + case obj.NAME_NONE: + c.instoffset = a.Offset + if a.Reg != 0 { + return c.aconsize() + } + + if immrot(uint32(c.instoffset)) != 0 { + return C_RCON + } + if immrot(^uint32(c.instoffset)) != 0 { + return C_NCON + } + if uint32(c.instoffset) <= 0xffff && objabi.GOARM == 7 { + return C_SCON + } + if x, y := immrot2a(uint32(c.instoffset)); x != 0 && y != 0 { + return C_RCON2A + } + if y, x := immrot2s(uint32(c.instoffset)); x != 0 && y != 0 { + return C_RCON2S + } + return C_LCON + + case obj.NAME_EXTERN, + obj.NAME_GOTREF, + obj.NAME_STATIC: + s := a.Sym + if s == nil { + break + } + c.instoffset = 0 // s.b. unused but just in case + return C_LCONADDR + + case obj.NAME_AUTO: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-SP. + a.Reg = obj.REG_NONE + } + c.instoffset = c.autosize + a.Offset + return c.aconsize() + + case obj.NAME_PARAM: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-FP. + a.Reg = obj.REG_NONE + } + c.instoffset = c.autosize + a.Offset + 4 + return c.aconsize() + } + + return C_GOK + + case obj.TYPE_BRANCH: + return C_SBRA + } + + return C_GOK +} + +func (c *ctxt5) aconsize() int { + if immrot(uint32(c.instoffset)) != 0 { + return C_RACON + } + if immrot(uint32(-c.instoffset)) != 0 { + return C_RACON + } + return C_LACON +} + +func (c *ctxt5) oplook(p *obj.Prog) *Optab { + a1 := int(p.Optab) + if a1 != 0 { + return &optab[a1-1] + } + a1 = int(p.From.Class) + if a1 == 0 { + a1 = c.aclass(&p.From) + 1 + p.From.Class = int8(a1) + } + + a1-- + a3 := int(p.To.Class) + if a3 == 0 { + a3 = c.aclass(&p.To) + 1 + p.To.Class = int8(a3) + } + + a3-- + a2 := C_NONE + if p.Reg != 0 { + switch { + case REG_F0 <= p.Reg && p.Reg <= REG_F15: + a2 = C_FREG + case REG_R0 <= p.Reg && p.Reg <= REG_R15: + a2 = C_REG + default: + c.ctxt.Diag("invalid register in %v", p) + } + } + + // check illegal base register + switch a1 { + case C_SOREG, C_LOREG, C_HOREG, C_FOREG, C_ROREG, C_HFOREG, C_SROREG, C_SHIFTADDR: + if p.From.Reg < REG_R0 || REG_R15 < p.From.Reg { + c.ctxt.Diag("illegal base register: %v", p) + } + default: + } + switch a3 { + case C_SOREG, C_LOREG, C_HOREG, C_FOREG, C_ROREG, C_HFOREG, C_SROREG, C_SHIFTADDR: + if p.To.Reg < REG_R0 || REG_R15 < p.To.Reg { + c.ctxt.Diag("illegal base register: %v", p) + } + default: + } + + // If current instruction has a .S suffix (flags update), + // we must use the constant pool instead of splitting it. + if (a1 == C_RCON2A || a1 == C_RCON2S) && p.Scond&C_SBIT != 0 { + a1 = C_LCON + } + if (a3 == C_RCON2A || a3 == C_RCON2S) && p.Scond&C_SBIT != 0 { + a3 = C_LCON + } + + if false { /*debug['O']*/ + fmt.Printf("oplook %v %v %v %v\n", p.As, DRconv(a1), DRconv(a2), DRconv(a3)) + fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type) + } + + ops := oprange[p.As&obj.AMask] + c1 := &xcmp[a1] + c3 := &xcmp[a3] + for i := range ops { + op := &ops[i] + if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] { + p.Optab = uint16(cap(optab) - cap(ops) + i + 1) + checkSuffix(c, p, op) + return op + } + } + + c.ctxt.Diag("illegal combination %v; %v %v %v; from %d %d; to %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.From.Name, p.To.Type, p.To.Name) + if ops == nil { + ops = optab + } + return &ops[0] +} + +func cmp(a int, b int) bool { + if a == b { + return true + } + switch a { + case C_LCON: + if b == C_RCON || b == C_NCON || b == C_SCON || b == C_RCON2A || b == C_RCON2S { + return true + } + + case C_LACON: + if b == C_RACON { + return true + } + + case C_LFCON: + if b == C_ZFCON || b == C_SFCON { + return true + } + + case C_HFAUTO: + return b == C_HAUTO || b == C_FAUTO + + case C_FAUTO, C_HAUTO: + return b == C_HFAUTO + + case C_SAUTO: + return cmp(C_HFAUTO, b) + + case C_LAUTO: + return cmp(C_SAUTO, b) + + case C_HFOREG: + return b == C_HOREG || b == C_FOREG + + case C_FOREG, C_HOREG: + return b == C_HFOREG + + case C_SROREG: + return cmp(C_SOREG, b) || cmp(C_ROREG, b) + + case C_SOREG, C_ROREG: + return b == C_SROREG || cmp(C_HFOREG, b) + + case C_LOREG: + return cmp(C_SROREG, b) + + case C_LBRA: + if b == C_SBRA { + return true + } + + case C_HREG: + return cmp(C_SP, b) || cmp(C_PC, b) + } + + return false +} + +type ocmp []Optab + +func (x ocmp) Len() int { + return len(x) +} + +func (x ocmp) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} + +func (x ocmp) Less(i, j int) bool { + p1 := &x[i] + p2 := &x[j] + n := int(p1.as) - int(p2.as) + if n != 0 { + return n < 0 + } + n = int(p1.a1) - int(p2.a1) + if n != 0 { + return n < 0 + } + n = int(p1.a2) - int(p2.a2) + if n != 0 { + return n < 0 + } + n = int(p1.a3) - int(p2.a3) + if n != 0 { + return n < 0 + } + return false +} + +func opset(a, b0 obj.As) { + oprange[a&obj.AMask] = oprange[b0] +} + +func buildop(ctxt *obj.Link) { + if oprange[AAND&obj.AMask] != nil { + // Already initialized; stop now. + // This happens in the cmd/asm tests, + // each of which re-initializes the arch. + return + } + + deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal) + + symdiv = ctxt.Lookup("runtime._div") + symdivu = ctxt.Lookup("runtime._divu") + symmod = ctxt.Lookup("runtime._mod") + symmodu = ctxt.Lookup("runtime._modu") + + var n int + + for i := 0; i < C_GOK; i++ { + for n = 0; n < C_GOK; n++ { + if cmp(n, i) { + xcmp[i][n] = true + } + } + } + for n = 0; optab[n].as != obj.AXXX; n++ { + if optab[n].flag&LPCREL != 0 { + if ctxt.Flag_shared { + optab[n].size += int8(optab[n].pcrelsiz) + } else { + optab[n].flag &^= LPCREL + } + } + } + + sort.Sort(ocmp(optab[:n])) + for i := 0; i < n; i++ { + r := optab[i].as + r0 := r & obj.AMask + start := i + for optab[i].as == r { + i++ + } + oprange[r0] = optab[start:i] + i-- + + switch r { + default: + ctxt.Diag("unknown op in build: %v", r) + ctxt.DiagFlush() + log.Fatalf("bad code") + + case AADD: + opset(ASUB, r0) + opset(ARSB, r0) + opset(AADC, r0) + opset(ASBC, r0) + opset(ARSC, r0) + + case AORR: + opset(AEOR, r0) + opset(ABIC, r0) + + case ACMP: + opset(ATEQ, r0) + opset(ACMN, r0) + opset(ATST, r0) + + case AMVN: + break + + case ABEQ: + opset(ABNE, r0) + opset(ABCS, r0) + opset(ABHS, r0) + opset(ABCC, r0) + opset(ABLO, r0) + opset(ABMI, r0) + opset(ABPL, r0) + opset(ABVS, r0) + opset(ABVC, r0) + opset(ABHI, r0) + opset(ABLS, r0) + opset(ABGE, r0) + opset(ABLT, r0) + opset(ABGT, r0) + opset(ABLE, r0) + + case ASLL: + opset(ASRL, r0) + opset(ASRA, r0) + + case AMUL: + opset(AMULU, r0) + + case ADIV: + opset(AMOD, r0) + opset(AMODU, r0) + opset(ADIVU, r0) + + case ADIVHW: + opset(ADIVUHW, r0) + + case AMOVW, + AMOVB, + AMOVBS, + AMOVBU, + AMOVH, + AMOVHS, + AMOVHU: + break + + case ASWPW: + opset(ASWPBU, r0) + + case AB, + ABL, + ABX, + ABXRET, + obj.ADUFFZERO, + obj.ADUFFCOPY, + ASWI, + AWORD, + AMOVM, + ARFE, + obj.ATEXT: + break + + case AADDF: + opset(AADDD, r0) + opset(ASUBF, r0) + opset(ASUBD, r0) + opset(AMULF, r0) + opset(AMULD, r0) + opset(ANMULF, r0) + opset(ANMULD, r0) + opset(AMULAF, r0) + opset(AMULAD, r0) + opset(AMULSF, r0) + opset(AMULSD, r0) + opset(ANMULAF, r0) + opset(ANMULAD, r0) + opset(ANMULSF, r0) + opset(ANMULSD, r0) + opset(AFMULAF, r0) + opset(AFMULAD, r0) + opset(AFMULSF, r0) + opset(AFMULSD, r0) + opset(AFNMULAF, r0) + opset(AFNMULAD, r0) + opset(AFNMULSF, r0) + opset(AFNMULSD, r0) + opset(ADIVF, r0) + opset(ADIVD, r0) + + case ANEGF: + opset(ANEGD, r0) + opset(ASQRTF, r0) + opset(ASQRTD, r0) + opset(AMOVFD, r0) + opset(AMOVDF, r0) + opset(AABSF, r0) + opset(AABSD, r0) + + case ACMPF: + opset(ACMPD, r0) + + case AMOVF: + opset(AMOVD, r0) + + case AMOVFW: + opset(AMOVDW, r0) + + case AMOVWF: + opset(AMOVWD, r0) + + case AMULL: + opset(AMULAL, r0) + opset(AMULLU, r0) + opset(AMULALU, r0) + + case AMULWT: + opset(AMULWB, r0) + opset(AMULBB, r0) + opset(AMMUL, r0) + + case AMULAWT: + opset(AMULAWB, r0) + opset(AMULABB, r0) + opset(AMULS, r0) + opset(AMMULA, r0) + opset(AMMULS, r0) + + case ABFX: + opset(ABFXU, r0) + opset(ABFC, r0) + opset(ABFI, r0) + + case ACLZ: + opset(AREV, r0) + opset(AREV16, r0) + opset(AREVSH, r0) + opset(ARBIT, r0) + + case AXTAB: + opset(AXTAH, r0) + opset(AXTABU, r0) + opset(AXTAHU, r0) + + case ALDREX, + ASTREX, + ALDREXD, + ASTREXD, + ADMB, + APLD, + AAND, + AMULA, + obj.AUNDEF, + obj.AFUNCDATA, + obj.APCDATA, + obj.ANOP: + break + } + } +} + +func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { + c.printp = p + o1 := uint32(0) + o2 := uint32(0) + o3 := uint32(0) + o4 := uint32(0) + o5 := uint32(0) + o6 := uint32(0) + if false { /*debug['P']*/ + fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_) + } + switch o.type_ { + default: + c.ctxt.Diag("%v: unknown asm %d", p, o.type_) + + case 0: /* pseudo ops */ + if false { /*debug['G']*/ + fmt.Printf("%x: %s: arm\n", uint32(p.Pc), p.From.Sym.Name) + } + + case 1: /* op R,[R],R */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = 0 + } + if p.As == AMOVB || p.As == AMOVH || p.As == AMOVW || p.As == AMVN { + r = 0 + } else if r == 0 { + r = rt + } + o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 + + case 2: /* movbu $I,[R],R */ + c.aclass(&p.From) + + o1 = c.oprrr(p, p.As, int(p.Scond)) + o1 |= uint32(immrot(uint32(c.instoffset))) + rt := int(p.To.Reg) + r := int(p.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = 0 + } + if p.As == AMOVW || p.As == AMVN { + r = 0 + } else if r == 0 { + r = rt + } + o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 + + case 106: /* op $I,R,R where I can be decomposed into 2 immediates */ + c.aclass(&p.From) + r := int(p.Reg) + rt := int(p.To.Reg) + if r == 0 { + r = rt + } + x, y := immrot2a(uint32(c.instoffset)) + var as2 obj.As + switch p.As { + case AADD, ASUB, AORR, AEOR, ABIC: + as2 = p.As // ADD, SUB, ORR, EOR, BIC + case ARSB: + as2 = AADD // RSB -> RSB/ADD pair + case AADC: + as2 = AADD // ADC -> ADC/ADD pair + case ASBC: + as2 = ASUB // SBC -> SBC/SUB pair + case ARSC: + as2 = AADD // RSC -> RSC/ADD pair + default: + c.ctxt.Diag("unknown second op for %v", p) + } + o1 = c.oprrr(p, p.As, int(p.Scond)) + o2 = c.oprrr(p, as2, int(p.Scond)) + o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 + o2 |= (uint32(rt)&15)<<16 | (uint32(rt)&15)<<12 + o1 |= x + o2 |= y + + case 107: /* op $I,R,R where I can be decomposed into 2 immediates */ + c.aclass(&p.From) + r := int(p.Reg) + rt := int(p.To.Reg) + if r == 0 { + r = rt + } + y, x := immrot2s(uint32(c.instoffset)) + var as2 obj.As + switch p.As { + case AADD: + as2 = ASUB // ADD -> ADD/SUB pair + case ASUB: + as2 = AADD // SUB -> SUB/ADD pair + case ARSB: + as2 = ASUB // RSB -> RSB/SUB pair + case AADC: + as2 = ASUB // ADC -> ADC/SUB pair + case ASBC: + as2 = AADD // SBC -> SBC/ADD pair + case ARSC: + as2 = ASUB // RSC -> RSC/SUB pair + default: + c.ctxt.Diag("unknown second op for %v", p) + } + o1 = c.oprrr(p, p.As, int(p.Scond)) + o2 = c.oprrr(p, as2, int(p.Scond)) + o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 + o2 |= (uint32(rt)&15)<<16 | (uint32(rt)&15)<<12 + o1 |= y + o2 |= x + + case 3: /* add R<<[IR],[R],R */ + o1 = c.mov(p) + + case 4: /* MOVW $off(R), R -> add $off,[R],R */ + c.aclass(&p.From) + if c.instoffset < 0 { + o1 = c.oprrr(p, ASUB, int(p.Scond)) + o1 |= uint32(immrot(uint32(-c.instoffset))) + } else { + o1 = c.oprrr(p, AADD, int(p.Scond)) + o1 |= uint32(immrot(uint32(c.instoffset))) + } + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o1 |= (uint32(r) & 15) << 16 + o1 |= (uint32(p.To.Reg) & 15) << 12 + + case 5: /* bra s */ + o1 = c.opbra(p, p.As, int(p.Scond)) + + v := int32(-8) + if p.To.Sym != nil { + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.To.Sym + v += int32(p.To.Offset) + rel.Add = int64(o1) | (int64(v)>>2)&0xffffff + rel.Type = objabi.R_CALLARM + break + } + + if p.To.Target() != nil { + v = int32((p.To.Target().Pc - c.pc) - 8) + } + o1 |= (uint32(v) >> 2) & 0xffffff + + case 6: /* b ,O(R) -> add $O,R,PC */ + c.aclass(&p.To) + + o1 = c.oprrr(p, AADD, int(p.Scond)) + o1 |= uint32(immrot(uint32(c.instoffset))) + o1 |= (uint32(p.To.Reg) & 15) << 16 + o1 |= (REGPC & 15) << 12 + + case 7: /* bl (R) -> blx R */ + c.aclass(&p.To) + + if c.instoffset != 0 { + c.ctxt.Diag("%v: doesn't support BL offset(REG) with non-zero offset %d", p, c.instoffset) + } + o1 = c.oprrr(p, ABL, int(p.Scond)) + o1 |= (uint32(p.To.Reg) & 15) << 0 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 0 + rel.Type = objabi.R_CALLIND + + case 8: /* sll $c,[R],R -> mov (R<<$c),R */ + c.aclass(&p.From) + + o1 = c.oprrr(p, p.As, int(p.Scond)) + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + o1 |= (uint32(r) & 15) << 0 + o1 |= uint32((c.instoffset & 31) << 7) + o1 |= (uint32(p.To.Reg) & 15) << 12 + + case 9: /* sll R,[R],R -> mov (R< 31 || width <= 0 || (lsb+width) > 32 { + c.ctxt.Diag("%v: wrong width or LSB", p) + } + switch p.As { + case ABFX, ABFXU: // (width-1) is encoded + o1 |= (uint32(r)&15)<<0 | (uint32(rt)&15)<<12 | uint32(lsb)<<7 | uint32(width-1)<<16 + case ABFC, ABFI: // MSB is encoded + o1 |= (uint32(r)&15)<<0 | (uint32(rt)&15)<<12 | uint32(lsb)<<7 | uint32(lsb+width-1)<<16 + default: + c.ctxt.Diag("illegal combination: %v", p) + } + + case 20: /* mov/movb/movbu R,O(R) */ + c.aclass(&p.To) + + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + o1 = c.osr(p.As, int(p.From.Reg), int32(c.instoffset), r, int(p.Scond)) + + case 21: /* mov/movbu O(R),R -> lr */ + c.aclass(&p.From) + + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o1 = c.olr(int32(c.instoffset), r, int(p.To.Reg), int(p.Scond)) + if p.As != AMOVW { + o1 |= 1 << 22 + } + + case 22: /* XTAB R@>i, [R], R */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + switch p.From.Offset &^ 0xf { + // only 0/8/16/24 bits rotation is accepted + case SHIFT_RR, SHIFT_RR | 8<<7, SHIFT_RR | 16<<7, SHIFT_RR | 24<<7: + o1 |= uint32(p.From.Offset) & 0xc0f + default: + c.ctxt.Diag("illegal shift: %v", p) + } + rt := p.To.Reg + r := p.Reg + if r == 0 { + r = rt + } + o1 |= (uint32(rt)&15)<<12 | (uint32(r)&15)<<16 + + case 23: /* MOVW/MOVB/MOVH R@>i, R */ + switch p.As { + case AMOVW: + o1 = c.mov(p) + case AMOVBU, AMOVBS, AMOVB, AMOVHU, AMOVHS, AMOVH: + o1 = c.movxt(p) + default: + c.ctxt.Diag("illegal combination: %v", p) + } + + case 30: /* mov/movb/movbu R,L(R) */ + o1 = c.omvl(p, &p.To, REGTMP) + + if o1 == 0 { + break + } + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + o2 = c.osrr(int(p.From.Reg), REGTMP&15, r, int(p.Scond)) + if p.As != AMOVW { + o2 |= 1 << 22 + } + + case 31: /* mov/movbu L(R),R -> lr[b] */ + o1 = c.omvl(p, &p.From, REGTMP) + + if o1 == 0 { + break + } + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o2 = c.olrr(REGTMP&15, r, int(p.To.Reg), int(p.Scond)) + if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB { + o2 |= 1 << 22 + } + + case 34: /* mov $lacon,R */ + o1 = c.omvl(p, &p.From, REGTMP) + + if o1 == 0 { + break + } + + o2 = c.oprrr(p, AADD, int(p.Scond)) + o2 |= REGTMP & 15 + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o2 |= (uint32(r) & 15) << 16 + if p.To.Type != obj.TYPE_NONE { + o2 |= (uint32(p.To.Reg) & 15) << 12 + } + + case 35: /* mov PSR,R */ + o1 = 2<<23 | 0xf<<16 | 0<<0 + + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + o1 |= (uint32(p.From.Reg) & 1) << 22 + o1 |= (uint32(p.To.Reg) & 15) << 12 + + case 36: /* mov R,PSR */ + o1 = 2<<23 | 0x2cf<<12 | 0<<4 + + if p.Scond&C_FBIT != 0 { + o1 ^= 0x010 << 12 + } + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + o1 |= (uint32(p.To.Reg) & 1) << 22 + o1 |= (uint32(p.From.Reg) & 15) << 0 + + case 37: /* mov $con,PSR */ + c.aclass(&p.From) + + o1 = 2<<23 | 0x2cf<<12 | 0<<4 + if p.Scond&C_FBIT != 0 { + o1 ^= 0x010 << 12 + } + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + o1 |= uint32(immrot(uint32(c.instoffset))) + o1 |= (uint32(p.To.Reg) & 1) << 22 + o1 |= (uint32(p.From.Reg) & 15) << 0 + + case 38, 39: + switch o.type_ { + case 38: /* movm $con,oreg -> stm */ + o1 = 0x4 << 25 + + o1 |= uint32(p.From.Offset & 0xffff) + o1 |= (uint32(p.To.Reg) & 15) << 16 + c.aclass(&p.To) + + case 39: /* movm oreg,$con -> ldm */ + o1 = 0x4<<25 | 1<<20 + + o1 |= uint32(p.To.Offset & 0xffff) + o1 |= (uint32(p.From.Reg) & 15) << 16 + c.aclass(&p.From) + } + + if c.instoffset != 0 { + c.ctxt.Diag("offset must be zero in MOVM; %v", p) + } + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + if p.Scond&C_PBIT != 0 { + o1 |= 1 << 24 + } + if p.Scond&C_UBIT != 0 { + o1 |= 1 << 23 + } + if p.Scond&C_WBIT != 0 { + o1 |= 1 << 21 + } + + case 40: /* swp oreg,reg,reg */ + c.aclass(&p.From) + + if c.instoffset != 0 { + c.ctxt.Diag("offset must be zero in SWP") + } + o1 = 0x2<<23 | 0x9<<4 + if p.As != ASWPW { + o1 |= 1 << 22 + } + o1 |= (uint32(p.From.Reg) & 15) << 16 + o1 |= (uint32(p.Reg) & 15) << 0 + o1 |= (uint32(p.To.Reg) & 15) << 12 + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + case 41: /* rfe -> movm.s.w.u 0(r13),[r15] */ + o1 = 0xe8fd8000 + + case 50: /* floating point store */ + v := c.regoff(&p.To) + + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + o1 = c.ofsr(p.As, int(p.From.Reg), v, r, int(p.Scond), p) + + case 51: /* floating point load */ + v := c.regoff(&p.From) + + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o1 = c.ofsr(p.As, int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20 + + case 52: /* floating point store, int32 offset UGLY */ + o1 = c.omvl(p, &p.To, REGTMP) + + if o1 == 0 { + break + } + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + o2 = c.oprrr(p, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0 + o3 = c.ofsr(p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p) + + case 53: /* floating point load, int32 offset UGLY */ + o1 = c.omvl(p, &p.From, REGTMP) + + if o1 == 0 { + break + } + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o2 = c.oprrr(p, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0 + o3 = c.ofsr(p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20 + + case 54: /* floating point arith */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + switch p.As { + case AMULAD, AMULAF, AMULSF, AMULSD, ANMULAF, ANMULAD, ANMULSF, ANMULSD, + AFMULAD, AFMULAF, AFMULSF, AFMULSD, AFNMULAF, AFNMULAD, AFNMULSF, AFNMULSD: + c.ctxt.Diag("illegal combination: %v", p) + default: + r = rt + } + } + + o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 + + case 55: /* negf freg, freg */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + + o1 |= (uint32(rf)&15)<<0 | (uint32(rt)&15)<<12 + + case 56: /* move to FP[CS]R */ + o1 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0xee1<<16 | 0xa1<<4 + + o1 |= (uint32(p.From.Reg) & 15) << 12 + + case 57: /* move from FP[CS]R */ + o1 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0xef1<<16 | 0xa1<<4 + + o1 |= (uint32(p.To.Reg) & 15) << 12 + + case 58: /* movbu R,R */ + o1 = c.oprrr(p, AAND, int(p.Scond)) + + o1 |= uint32(immrot(0xff)) + rt := int(p.To.Reg) + r := int(p.From.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = 0 + } + if r == 0 { + r = rt + } + o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 + + case 59: /* movw/bu R< ldr indexed */ + if p.From.Reg == 0 { + c.ctxt.Diag("source operand is not a memory address: %v", p) + break + } + if p.From.Offset&(1<<4) != 0 { + c.ctxt.Diag("bad shift in LDR") + break + } + o1 = c.olrr(int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond)) + if p.As == AMOVBU { + o1 |= 1 << 22 + } + + case 60: /* movb R(R),R -> ldrsb indexed */ + if p.From.Reg == 0 { + c.ctxt.Diag("source operand is not a memory address: %v", p) + break + } + if p.From.Offset&(^0xf) != 0 { + c.ctxt.Diag("bad shift: %v", p) + break + } + o1 = c.olhrr(int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond)) + switch p.As { + case AMOVB, AMOVBS: + o1 ^= 1<<5 | 1<<6 + case AMOVH, AMOVHS: + o1 ^= 1 << 6 + default: + } + if p.Scond&C_UBIT != 0 { + o1 &^= 1 << 23 + } + + case 61: /* movw/b/bu R,R<<[IR](R) -> str indexed */ + if p.To.Reg == 0 { + c.ctxt.Diag("MOV to shifter operand") + } + o1 = c.osrr(int(p.From.Reg), int(p.To.Offset), int(p.To.Reg), int(p.Scond)) + if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU { + o1 |= 1 << 22 + } + + case 62: /* MOVH/MOVHS/MOVHU Reg, Reg<<0(Reg) -> strh */ + if p.To.Reg == 0 { + c.ctxt.Diag("MOV to shifter operand") + } + if p.To.Offset&(^0xf) != 0 { + c.ctxt.Diag("bad shift: %v", p) + } + o1 = c.olhrr(int(p.To.Offset), int(p.To.Reg), int(p.From.Reg), int(p.Scond)) + o1 ^= 1 << 20 + if p.Scond&C_UBIT != 0 { + o1 &^= 1 << 23 + } + + /* reloc ops */ + case 64: /* mov/movb/movbu R,addr */ + o1 = c.omvl(p, &p.To, REGTMP) + + if o1 == 0 { + break + } + o2 = c.osr(p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond)) + if o.flag&LPCREL != 0 { + o3 = o2 + o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12 + } + + case 65: /* mov/movbu addr,R */ + o1 = c.omvl(p, &p.From, REGTMP) + + if o1 == 0 { + break + } + o2 = c.olr(0, REGTMP, int(p.To.Reg), int(p.Scond)) + if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB { + o2 |= 1 << 22 + } + if o.flag&LPCREL != 0 { + o3 = o2 + o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12 + } + + case 101: /* movw tlsvar,R, local exec*/ + o1 = c.omvl(p, &p.From, int(p.To.Reg)) + + case 102: /* movw tlsvar,R, initial exec*/ + o1 = c.omvl(p, &p.From, int(p.To.Reg)) + o2 = c.olrr(int(p.To.Reg)&15, (REGPC & 15), int(p.To.Reg), int(p.Scond)) + + case 103: /* word tlsvar, local exec */ + if p.To.Sym == nil { + c.ctxt.Diag("nil sym in tls %v", p) + } + if p.To.Offset != 0 { + c.ctxt.Diag("offset against tls var in %v", p) + } + // This case happens with words generated in the PC stream as part of + // the literal c.pool. + rel := obj.Addrel(c.cursym) + + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.To.Sym + rel.Type = objabi.R_TLS_LE + o1 = 0 + + case 104: /* word tlsvar, initial exec */ + if p.To.Sym == nil { + c.ctxt.Diag("nil sym in tls %v", p) + } + if p.To.Offset != 0 { + c.ctxt.Diag("offset against tls var in %v", p) + } + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.To.Sym + rel.Type = objabi.R_TLS_IE + rel.Add = c.pc - p.Rel.Pc - 8 - int64(rel.Siz) + + case 68: /* floating point store -> ADDR */ + o1 = c.omvl(p, &p.To, REGTMP) + + if o1 == 0 { + break + } + o2 = c.ofsr(p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p) + if o.flag&LPCREL != 0 { + o3 = o2 + o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12 + } + + case 69: /* floating point load <- ADDR */ + o1 = c.omvl(p, &p.From, REGTMP) + + if o1 == 0 { + break + } + o2 = c.ofsr(p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20 + if o.flag&LPCREL != 0 { + o3 = o2 + o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12 + } + + /* ArmV4 ops: */ + case 70: /* movh/movhu R,O(R) -> strh */ + c.aclass(&p.To) + + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + o1 = c.oshr(int(p.From.Reg), int32(c.instoffset), r, int(p.Scond)) + + case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */ + c.aclass(&p.From) + + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o1 = c.olhr(int32(c.instoffset), r, int(p.To.Reg), int(p.Scond)) + if p.As == AMOVB || p.As == AMOVBS { + o1 ^= 1<<5 | 1<<6 + } else if p.As == AMOVH || p.As == AMOVHS { + o1 ^= (1 << 6) + } + + case 72: /* movh/movhu R,L(R) -> strh */ + o1 = c.omvl(p, &p.To, REGTMP) + + if o1 == 0 { + break + } + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + o2 = c.oshrr(int(p.From.Reg), REGTMP&15, r, int(p.Scond)) + + case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */ + o1 = c.omvl(p, &p.From, REGTMP) + + if o1 == 0 { + break + } + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o2 = c.olhrr(REGTMP&15, r, int(p.To.Reg), int(p.Scond)) + if p.As == AMOVB || p.As == AMOVBS { + o2 ^= 1<<5 | 1<<6 + } else if p.As == AMOVH || p.As == AMOVHS { + o2 ^= (1 << 6) + } + + case 74: /* bx $I */ + c.ctxt.Diag("ABX $I") + + case 75: /* bx O(R) */ + c.aclass(&p.To) + + if c.instoffset != 0 { + c.ctxt.Diag("non-zero offset in ABX") + } + + /* + o1 = c.oprrr(p, AADD, p->scond) | immrot(0) | ((REGPC&15)<<16) | ((REGLINK&15)<<12); // mov PC, LR + o2 = (((p->scond&C_SCOND) ^ C_SCOND_XOR)<<28) | (0x12fff<<8) | (1<<4) | ((p->to.reg&15) << 0); // BX R + */ + // p->to.reg may be REGLINK + o1 = c.oprrr(p, AADD, int(p.Scond)) + + o1 |= uint32(immrot(uint32(c.instoffset))) + o1 |= (uint32(p.To.Reg) & 15) << 16 + o1 |= (REGTMP & 15) << 12 + o2 = c.oprrr(p, AADD, int(p.Scond)) | uint32(immrot(0)) | (REGPC&15)<<16 | (REGLINK&15)<<12 // mov PC, LR + o3 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x12fff<<8 | 1<<4 | REGTMP&15 // BX Rtmp + + case 76: /* bx O(R) when returning from fn*/ + c.ctxt.Diag("ABXRET") + + case 77: /* ldrex oreg,reg */ + c.aclass(&p.From) + + if c.instoffset != 0 { + c.ctxt.Diag("offset must be zero in LDREX") + } + o1 = 0x19<<20 | 0xf9f + o1 |= (uint32(p.From.Reg) & 15) << 16 + o1 |= (uint32(p.To.Reg) & 15) << 12 + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + case 78: /* strex reg,oreg,reg */ + c.aclass(&p.From) + + if c.instoffset != 0 { + c.ctxt.Diag("offset must be zero in STREX") + } + if p.To.Reg == p.From.Reg || p.To.Reg == p.Reg { + c.ctxt.Diag("cannot use same register as both source and destination: %v", p) + } + o1 = 0x18<<20 | 0xf90 + o1 |= (uint32(p.From.Reg) & 15) << 16 + o1 |= (uint32(p.Reg) & 15) << 0 + o1 |= (uint32(p.To.Reg) & 15) << 12 + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + case 80: /* fmov zfcon,freg */ + if p.As == AMOVD { + o1 = 0xeeb00b00 // VMOV imm 64 + o2 = c.oprrr(p, ASUBD, int(p.Scond)) + } else { + o1 = 0x0eb00a00 // VMOV imm 32 + o2 = c.oprrr(p, ASUBF, int(p.Scond)) + } + + v := int32(0x70) // 1.0 + r := (int(p.To.Reg) & 15) << 0 + + // movf $1.0, r + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + o1 |= (uint32(r) & 15) << 12 + o1 |= (uint32(v) & 0xf) << 0 + o1 |= (uint32(v) & 0xf0) << 12 + + // subf r,r,r + o2 |= (uint32(r)&15)<<0 | (uint32(r)&15)<<16 | (uint32(r)&15)<<12 + + case 81: /* fmov sfcon,freg */ + o1 = 0x0eb00a00 // VMOV imm 32 + if p.As == AMOVD { + o1 = 0xeeb00b00 // VMOV imm 64 + } + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + o1 |= (uint32(p.To.Reg) & 15) << 12 + v := int32(c.chipfloat5(p.From.Val.(float64))) + o1 |= (uint32(v) & 0xf) << 0 + o1 |= (uint32(v) & 0xf0) << 12 + + case 82: /* fcmp freg,freg, */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.Reg)&15)<<12 | (uint32(p.From.Reg)&15)<<0 + o2 = 0x0ef1fa10 // VMRS R15 + o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + case 83: /* fcmp freg,, */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.From.Reg)&15)<<12 | 1<<16 + o2 = 0x0ef1fa10 // VMRS R15 + o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + case 84: /* movfw freg,freg - truncate float-to-fix */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.From.Reg) & 15) << 0 + o1 |= (uint32(p.To.Reg) & 15) << 12 + + case 85: /* movwf freg,freg - fix-to-float */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.From.Reg) & 15) << 0 + o1 |= (uint32(p.To.Reg) & 15) << 12 + + // macro for movfw freg,FTMP; movw FTMP,reg + case 86: /* movfw freg,reg - truncate float-to-fix */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.From.Reg) & 15) << 0 + o1 |= (FREGTMP & 15) << 12 + o2 = c.oprrr(p, -AMOVFW, int(p.Scond)) + o2 |= (FREGTMP & 15) << 16 + o2 |= (uint32(p.To.Reg) & 15) << 12 + + // macro for movw reg,FTMP; movwf FTMP,freg + case 87: /* movwf reg,freg - fix-to-float */ + o1 = c.oprrr(p, -AMOVWF, int(p.Scond)) + + o1 |= (uint32(p.From.Reg) & 15) << 12 + o1 |= (FREGTMP & 15) << 16 + o2 = c.oprrr(p, p.As, int(p.Scond)) + o2 |= (FREGTMP & 15) << 0 + o2 |= (uint32(p.To.Reg) & 15) << 12 + + case 88: /* movw reg,freg */ + o1 = c.oprrr(p, -AMOVWF, int(p.Scond)) + + o1 |= (uint32(p.From.Reg) & 15) << 12 + o1 |= (uint32(p.To.Reg) & 15) << 16 + + case 89: /* movw freg,reg */ + o1 = c.oprrr(p, -AMOVFW, int(p.Scond)) + + o1 |= (uint32(p.From.Reg) & 15) << 16 + o1 |= (uint32(p.To.Reg) & 15) << 12 + + case 91: /* ldrexd oreg,reg */ + c.aclass(&p.From) + + if c.instoffset != 0 { + c.ctxt.Diag("offset must be zero in LDREX") + } + o1 = 0x1b<<20 | 0xf9f + o1 |= (uint32(p.From.Reg) & 15) << 16 + o1 |= (uint32(p.To.Reg) & 15) << 12 + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + case 92: /* strexd reg,oreg,reg */ + c.aclass(&p.From) + + if c.instoffset != 0 { + c.ctxt.Diag("offset must be zero in STREX") + } + if p.Reg&1 != 0 { + c.ctxt.Diag("source register must be even in STREXD: %v", p) + } + if p.To.Reg == p.From.Reg || p.To.Reg == p.Reg || p.To.Reg == p.Reg+1 { + c.ctxt.Diag("cannot use same register as both source and destination: %v", p) + } + o1 = 0x1a<<20 | 0xf90 + o1 |= (uint32(p.From.Reg) & 15) << 16 + o1 |= (uint32(p.Reg) & 15) << 0 + o1 |= (uint32(p.To.Reg) & 15) << 12 + o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + + case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */ + o1 = c.omvl(p, &p.From, REGTMP) + + if o1 == 0 { + break + } + o2 = c.olhr(0, REGTMP, int(p.To.Reg), int(p.Scond)) + if p.As == AMOVB || p.As == AMOVBS { + o2 ^= 1<<5 | 1<<6 + } else if p.As == AMOVH || p.As == AMOVHS { + o2 ^= (1 << 6) + } + if o.flag&LPCREL != 0 { + o3 = o2 + o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12 + } + + case 94: /* movh/movhu R,addr -> strh */ + o1 = c.omvl(p, &p.To, REGTMP) + + if o1 == 0 { + break + } + o2 = c.oshr(int(p.From.Reg), 0, REGTMP, int(p.Scond)) + if o.flag&LPCREL != 0 { + o3 = o2 + o2 = c.oprrr(p, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12 + } + + case 95: /* PLD off(reg) */ + o1 = 0xf5d0f000 + + o1 |= (uint32(p.From.Reg) & 15) << 16 + if p.From.Offset < 0 { + o1 &^= (1 << 23) + o1 |= uint32((-p.From.Offset) & 0xfff) + } else { + o1 |= uint32(p.From.Offset & 0xfff) + } + + // This is supposed to be something that stops execution. + // It's not supposed to be reached, ever, but if it is, we'd + // like to be able to tell how we got there. Assemble as + // 0xf7fabcfd which is guaranteed to raise undefined instruction + // exception. + case 96: /* UNDEF */ + o1 = 0xf7fabcfd + + case 97: /* CLZ Rm, Rd */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.To.Reg) & 15) << 12 + o1 |= (uint32(p.From.Reg) & 15) << 0 + + case 98: /* MULW{T,B} Rs, Rm, Rd */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.To.Reg) & 15) << 16 + o1 |= (uint32(p.From.Reg) & 15) << 8 + o1 |= (uint32(p.Reg) & 15) << 0 + + case 99: /* MULAW{T,B} Rs, Rm, Rn, Rd */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + + o1 |= (uint32(p.To.Reg) & 15) << 16 + o1 |= (uint32(p.From.Reg) & 15) << 8 + o1 |= (uint32(p.Reg) & 15) << 0 + o1 |= uint32((p.To.Offset & 15) << 12) + + case 105: /* divhw r,[r,]r */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + r = rt + } + o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 + + case 110: /* dmb [mbop | $con] */ + o1 = 0xf57ff050 + mbop := uint32(0) + + switch c.aclass(&p.From) { + case C_SPR: + for _, f := range mbOp { + if f.reg == p.From.Reg { + mbop = f.enc + break + } + } + case C_RCON: + for _, f := range mbOp { + enc := uint32(c.instoffset) + if f.enc == enc { + mbop = enc + break + } + } + case C_NONE: + mbop = 0xf + } + + if mbop == 0 { + c.ctxt.Diag("illegal mb option:\n%v", p) + } + o1 |= mbop + } + + out[0] = o1 + out[1] = o2 + out[2] = o3 + out[3] = o4 + out[4] = o5 + out[5] = o6 +} + +func (c *ctxt5) movxt(p *obj.Prog) uint32 { + o1 := ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + switch p.As { + case AMOVB, AMOVBS: + o1 |= 0x6af<<16 | 0x7<<4 + case AMOVH, AMOVHS: + o1 |= 0x6bf<<16 | 0x7<<4 + case AMOVBU: + o1 |= 0x6ef<<16 | 0x7<<4 + case AMOVHU: + o1 |= 0x6ff<<16 | 0x7<<4 + default: + c.ctxt.Diag("illegal combination: %v", p) + } + switch p.From.Offset &^ 0xf { + // only 0/8/16/24 bits rotation is accepted + case SHIFT_RR, SHIFT_RR | 8<<7, SHIFT_RR | 16<<7, SHIFT_RR | 24<<7: + o1 |= uint32(p.From.Offset) & 0xc0f + default: + c.ctxt.Diag("illegal shift: %v", p) + } + o1 |= (uint32(p.To.Reg) & 15) << 12 + return o1 +} + +func (c *ctxt5) mov(p *obj.Prog) uint32 { + c.aclass(&p.From) + o1 := c.oprrr(p, p.As, int(p.Scond)) + o1 |= uint32(p.From.Offset) + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = 0 + } + r := int(p.Reg) + if p.As == AMOVW || p.As == AMVN { + r = 0 + } else if r == 0 { + r = rt + } + o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 + return o1 +} + +func (c *ctxt5) oprrr(p *obj.Prog, a obj.As, sc int) uint32 { + o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28 + if sc&C_SBIT != 0 { + o |= 1 << 20 + } + switch a { + case ADIVHW: + return o | 0x71<<20 | 0xf<<12 | 0x1<<4 + case ADIVUHW: + return o | 0x73<<20 | 0xf<<12 | 0x1<<4 + case AMMUL: + return o | 0x75<<20 | 0xf<<12 | 0x1<<4 + case AMULS: + return o | 0x6<<20 | 0x9<<4 + case AMMULA: + return o | 0x75<<20 | 0x1<<4 + case AMMULS: + return o | 0x75<<20 | 0xd<<4 + case AMULU, AMUL: + return o | 0x0<<21 | 0x9<<4 + case AMULA: + return o | 0x1<<21 | 0x9<<4 + case AMULLU: + return o | 0x4<<21 | 0x9<<4 + case AMULL: + return o | 0x6<<21 | 0x9<<4 + case AMULALU: + return o | 0x5<<21 | 0x9<<4 + case AMULAL: + return o | 0x7<<21 | 0x9<<4 + case AAND: + return o | 0x0<<21 + case AEOR: + return o | 0x1<<21 + case ASUB: + return o | 0x2<<21 + case ARSB: + return o | 0x3<<21 + case AADD: + return o | 0x4<<21 + case AADC: + return o | 0x5<<21 + case ASBC: + return o | 0x6<<21 + case ARSC: + return o | 0x7<<21 + case ATST: + return o | 0x8<<21 | 1<<20 + case ATEQ: + return o | 0x9<<21 | 1<<20 + case ACMP: + return o | 0xa<<21 | 1<<20 + case ACMN: + return o | 0xb<<21 | 1<<20 + case AORR: + return o | 0xc<<21 + + case AMOVB, AMOVH, AMOVW: + if sc&(C_PBIT|C_WBIT) != 0 { + c.ctxt.Diag("invalid .P/.W suffix: %v", p) + } + return o | 0xd<<21 + case ABIC: + return o | 0xe<<21 + case AMVN: + return o | 0xf<<21 + case ASLL: + return o | 0xd<<21 | 0<<5 + case ASRL: + return o | 0xd<<21 | 1<<5 + case ASRA: + return o | 0xd<<21 | 2<<5 + case ASWI: + return o | 0xf<<24 + + case AADDD: + return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 0<<4 + case AADDF: + return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 0<<4 + case ASUBD: + return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 4<<4 + case ASUBF: + return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 4<<4 + case AMULD: + return o | 0xe<<24 | 0x2<<20 | 0xb<<8 | 0<<4 + case AMULF: + return o | 0xe<<24 | 0x2<<20 | 0xa<<8 | 0<<4 + case ANMULD: + return o | 0xe<<24 | 0x2<<20 | 0xb<<8 | 0x4<<4 + case ANMULF: + return o | 0xe<<24 | 0x2<<20 | 0xa<<8 | 0x4<<4 + case AMULAD: + return o | 0xe<<24 | 0xb<<8 + case AMULAF: + return o | 0xe<<24 | 0xa<<8 + case AMULSD: + return o | 0xe<<24 | 0xb<<8 | 0x4<<4 + case AMULSF: + return o | 0xe<<24 | 0xa<<8 | 0x4<<4 + case ANMULAD: + return o | 0xe<<24 | 0x1<<20 | 0xb<<8 | 0x4<<4 + case ANMULAF: + return o | 0xe<<24 | 0x1<<20 | 0xa<<8 | 0x4<<4 + case ANMULSD: + return o | 0xe<<24 | 0x1<<20 | 0xb<<8 + case ANMULSF: + return o | 0xe<<24 | 0x1<<20 | 0xa<<8 + case AFMULAD: + return o | 0xe<<24 | 0xa<<20 | 0xb<<8 + case AFMULAF: + return o | 0xe<<24 | 0xa<<20 | 0xa<<8 + case AFMULSD: + return o | 0xe<<24 | 0xa<<20 | 0xb<<8 | 0x4<<4 + case AFMULSF: + return o | 0xe<<24 | 0xa<<20 | 0xa<<8 | 0x4<<4 + case AFNMULAD: + return o | 0xe<<24 | 0x9<<20 | 0xb<<8 | 0x4<<4 + case AFNMULAF: + return o | 0xe<<24 | 0x9<<20 | 0xa<<8 | 0x4<<4 + case AFNMULSD: + return o | 0xe<<24 | 0x9<<20 | 0xb<<8 + case AFNMULSF: + return o | 0xe<<24 | 0x9<<20 | 0xa<<8 + case ADIVD: + return o | 0xe<<24 | 0x8<<20 | 0xb<<8 | 0<<4 + case ADIVF: + return o | 0xe<<24 | 0x8<<20 | 0xa<<8 | 0<<4 + case ASQRTD: + return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xb<<8 | 0xc<<4 + case ASQRTF: + return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xa<<8 | 0xc<<4 + case AABSD: + return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 0xc<<4 + case AABSF: + return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 0xc<<4 + case ANEGD: + return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xb<<8 | 0x4<<4 + case ANEGF: + return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xa<<8 | 0x4<<4 + case ACMPD: + return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xb<<8 | 0xc<<4 + case ACMPF: + return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xa<<8 | 0xc<<4 + + case AMOVF: + return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 4<<4 + case AMOVD: + return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 4<<4 + + case AMOVDF: + return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 1<<8 // dtof + case AMOVFD: + return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 0<<8 // dtof + + case AMOVWF: + if sc&C_UBIT == 0 { + o |= 1 << 7 /* signed */ + } + return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 0<<8 // toint, double + + case AMOVWD: + if sc&C_UBIT == 0 { + o |= 1 << 7 /* signed */ + } + return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 1<<8 // toint, double + + case AMOVFW: + if sc&C_UBIT == 0 { + o |= 1 << 16 /* signed */ + } + return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 0<<8 | 1<<7 // toint, double, trunc + + case AMOVDW: + if sc&C_UBIT == 0 { + o |= 1 << 16 /* signed */ + } + return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 1<<8 | 1<<7 // toint, double, trunc + + case -AMOVWF: // copy WtoF + return o | 0xe<<24 | 0x0<<20 | 0xb<<8 | 1<<4 + + case -AMOVFW: // copy FtoW + return o | 0xe<<24 | 0x1<<20 | 0xb<<8 | 1<<4 + + case -ACMP: // cmp imm + return o | 0x3<<24 | 0x5<<20 + + case ABFX: + return o | 0x3d<<21 | 0x5<<4 + + case ABFXU: + return o | 0x3f<<21 | 0x5<<4 + + case ABFC: + return o | 0x3e<<21 | 0x1f + + case ABFI: + return o | 0x3e<<21 | 0x1<<4 + + case AXTAB: + return o | 0x6a<<20 | 0x7<<4 + + case AXTAH: + return o | 0x6b<<20 | 0x7<<4 + + case AXTABU: + return o | 0x6e<<20 | 0x7<<4 + + case AXTAHU: + return o | 0x6f<<20 | 0x7<<4 + + // CLZ doesn't support .nil + case ACLZ: + return o&(0xf<<28) | 0x16f<<16 | 0xf1<<4 + + case AREV: + return o&(0xf<<28) | 0x6bf<<16 | 0xf3<<4 + + case AREV16: + return o&(0xf<<28) | 0x6bf<<16 | 0xfb<<4 + + case AREVSH: + return o&(0xf<<28) | 0x6ff<<16 | 0xfb<<4 + + case ARBIT: + return o&(0xf<<28) | 0x6ff<<16 | 0xf3<<4 + + case AMULWT: + return o&(0xf<<28) | 0x12<<20 | 0xe<<4 + + case AMULWB: + return o&(0xf<<28) | 0x12<<20 | 0xa<<4 + + case AMULBB: + return o&(0xf<<28) | 0x16<<20 | 0x8<<4 + + case AMULAWT: + return o&(0xf<<28) | 0x12<<20 | 0xc<<4 + + case AMULAWB: + return o&(0xf<<28) | 0x12<<20 | 0x8<<4 + + case AMULABB: + return o&(0xf<<28) | 0x10<<20 | 0x8<<4 + + case ABL: // BLX REG + return o&(0xf<<28) | 0x12fff3<<4 + } + + c.ctxt.Diag("%v: bad rrr %d", p, a) + return 0 +} + +func (c *ctxt5) opbra(p *obj.Prog, a obj.As, sc int) uint32 { + sc &= C_SCOND + sc ^= C_SCOND_XOR + if a == ABL || a == obj.ADUFFZERO || a == obj.ADUFFCOPY { + return uint32(sc)<<28 | 0x5<<25 | 0x1<<24 + } + if sc != 0xe { + c.ctxt.Diag("%v: .COND on bcond instruction", p) + } + switch a { + case ABEQ: + return 0x0<<28 | 0x5<<25 + case ABNE: + return 0x1<<28 | 0x5<<25 + case ABCS: + return 0x2<<28 | 0x5<<25 + case ABHS: + return 0x2<<28 | 0x5<<25 + case ABCC: + return 0x3<<28 | 0x5<<25 + case ABLO: + return 0x3<<28 | 0x5<<25 + case ABMI: + return 0x4<<28 | 0x5<<25 + case ABPL: + return 0x5<<28 | 0x5<<25 + case ABVS: + return 0x6<<28 | 0x5<<25 + case ABVC: + return 0x7<<28 | 0x5<<25 + case ABHI: + return 0x8<<28 | 0x5<<25 + case ABLS: + return 0x9<<28 | 0x5<<25 + case ABGE: + return 0xa<<28 | 0x5<<25 + case ABLT: + return 0xb<<28 | 0x5<<25 + case ABGT: + return 0xc<<28 | 0x5<<25 + case ABLE: + return 0xd<<28 | 0x5<<25 + case AB: + return 0xe<<28 | 0x5<<25 + } + + c.ctxt.Diag("%v: bad bra %v", p, a) + return 0 +} + +func (c *ctxt5) olr(v int32, b int, r int, sc int) uint32 { + o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28 + if sc&C_PBIT == 0 { + o |= 1 << 24 + } + if sc&C_UBIT == 0 { + o |= 1 << 23 + } + if sc&C_WBIT != 0 { + o |= 1 << 21 + } + o |= 1<<26 | 1<<20 + if v < 0 { + if sc&C_UBIT != 0 { + c.ctxt.Diag(".U on neg offset") + } + v = -v + o ^= 1 << 23 + } + + if v >= 1<<12 || v < 0 { + c.ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, c.printp) + } + o |= uint32(v) + o |= (uint32(b) & 15) << 16 + o |= (uint32(r) & 15) << 12 + return o +} + +func (c *ctxt5) olhr(v int32, b int, r int, sc int) uint32 { + o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28 + if sc&C_PBIT == 0 { + o |= 1 << 24 + } + if sc&C_WBIT != 0 { + o |= 1 << 21 + } + o |= 1<<23 | 1<<20 | 0xb<<4 + if v < 0 { + v = -v + o ^= 1 << 23 + } + + if v >= 1<<8 || v < 0 { + c.ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, c.printp) + } + o |= uint32(v)&0xf | (uint32(v)>>4)<<8 | 1<<22 + o |= (uint32(b) & 15) << 16 + o |= (uint32(r) & 15) << 12 + return o +} + +func (c *ctxt5) osr(a obj.As, r int, v int32, b int, sc int) uint32 { + o := c.olr(v, b, r, sc) ^ (1 << 20) + if a != AMOVW { + o |= 1 << 22 + } + return o +} + +func (c *ctxt5) oshr(r int, v int32, b int, sc int) uint32 { + o := c.olhr(v, b, r, sc) ^ (1 << 20) + return o +} + +func (c *ctxt5) osrr(r int, i int, b int, sc int) uint32 { + return c.olr(int32(i), b, r, sc) ^ (1<<25 | 1<<20) +} + +func (c *ctxt5) oshrr(r int, i int, b int, sc int) uint32 { + return c.olhr(int32(i), b, r, sc) ^ (1<<22 | 1<<20) +} + +func (c *ctxt5) olrr(i int, b int, r int, sc int) uint32 { + return c.olr(int32(i), b, r, sc) ^ (1 << 25) +} + +func (c *ctxt5) olhrr(i int, b int, r int, sc int) uint32 { + return c.olhr(int32(i), b, r, sc) ^ (1 << 22) +} + +func (c *ctxt5) ofsr(a obj.As, r int, v int32, b int, sc int, p *obj.Prog) uint32 { + o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28 + if sc&C_PBIT == 0 { + o |= 1 << 24 + } + if sc&C_WBIT != 0 { + o |= 1 << 21 + } + o |= 6<<25 | 1<<24 | 1<<23 | 10<<8 + if v < 0 { + v = -v + o ^= 1 << 23 + } + + if v&3 != 0 { + c.ctxt.Diag("odd offset for floating point op: %d\n%v", v, p) + } else if v >= 1<<10 || v < 0 { + c.ctxt.Diag("literal span too large: %d\n%v", v, p) + } + o |= (uint32(v) >> 2) & 0xFF + o |= (uint32(b) & 15) << 16 + o |= (uint32(r) & 15) << 12 + + switch a { + default: + c.ctxt.Diag("bad fst %v", a) + fallthrough + + case AMOVD: + o |= 1 << 8 + fallthrough + + case AMOVF: + break + } + + return o +} + +// MOVW $"lower 16-bit", Reg +func (c *ctxt5) omvs(p *obj.Prog, a *obj.Addr, dr int) uint32 { + o1 := ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + o1 |= 0x30 << 20 + o1 |= (uint32(dr) & 15) << 12 + o1 |= uint32(a.Offset) & 0x0fff + o1 |= (uint32(a.Offset) & 0xf000) << 4 + return o1 +} + +// MVN $C_NCON, Reg -> MOVW $C_RCON, Reg +func (c *ctxt5) omvr(p *obj.Prog, a *obj.Addr, dr int) uint32 { + o1 := c.oprrr(p, AMOVW, int(p.Scond)) + o1 |= (uint32(dr) & 15) << 12 + v := immrot(^uint32(a.Offset)) + if v == 0 { + c.ctxt.Diag("%v: missing literal", p) + return 0 + } + o1 |= uint32(v) + return o1 +} + +func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { + var o1 uint32 + if p.Pool == nil { + c.aclass(a) + v := immrot(^uint32(c.instoffset)) + if v == 0 { + c.ctxt.Diag("%v: missing literal", p) + return 0 + } + + o1 = c.oprrr(p, AMVN, int(p.Scond)&C_SCOND) + o1 |= uint32(v) + o1 |= (uint32(dr) & 15) << 12 + } else { + v := int32(p.Pool.Pc - p.Pc - 8) + o1 = c.olr(v, REGPC, dr, int(p.Scond)&C_SCOND) + } + + return o1 +} + +func (c *ctxt5) chipzero5(e float64) int { + // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions. + if objabi.GOARM < 7 || math.Float64bits(e) != 0 { + return -1 + } + return 0 +} + +func (c *ctxt5) chipfloat5(e float64) int { + // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions. + if objabi.GOARM < 7 { + return -1 + } + + ei := math.Float64bits(e) + l := uint32(ei) + h := uint32(ei >> 32) + + if l != 0 || h&0xffff != 0 { + return -1 + } + h1 := h & 0x7fc00000 + if h1 != 0x40000000 && h1 != 0x3fc00000 { + return -1 + } + n := 0 + + // sign bit (a) + if h&0x80000000 != 0 { + n |= 1 << 7 + } + + // exp sign bit (b) + if h1 == 0x3fc00000 { + n |= 1 << 6 + } + + // rest of exp and mantissa (cd-efgh) + n |= int((h >> 16) & 0x3f) + + //print("match %.8lux %.8lux %d\n", l, h, n); + return n +} + +func nocache(p *obj.Prog) { + p.Optab = 0 + p.From.Class = 0 + if p.GetFrom3() != nil { + p.GetFrom3().Class = 0 + } + p.To.Class = 0 +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go new file mode 100644 index 0000000..30e0300 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm/list5.go @@ -0,0 +1,124 @@ +// Inferno utils/5c/list.c +// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/5c/list.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "fmt" +) + +func init() { + obj.RegisterRegister(obj.RBaseARM, MAXREG, rconv) + obj.RegisterOpcode(obj.ABaseARM, Anames) + obj.RegisterRegisterList(obj.RegListARMLo, obj.RegListARMHi, rlconv) + obj.RegisterOpSuffix("arm", obj.CConvARM) +} + +func rconv(r int) string { + if r == 0 { + return "NONE" + } + if r == REGG { + // Special case. + return "g" + } + if REG_R0 <= r && r <= REG_R15 { + return fmt.Sprintf("R%d", r-REG_R0) + } + if REG_F0 <= r && r <= REG_F15 { + return fmt.Sprintf("F%d", r-REG_F0) + } + + switch r { + case REG_FPSR: + return "FPSR" + + case REG_FPCR: + return "FPCR" + + case REG_CPSR: + return "CPSR" + + case REG_SPSR: + return "SPSR" + + case REG_MB_SY: + return "MB_SY" + case REG_MB_ST: + return "MB_ST" + case REG_MB_ISH: + return "MB_ISH" + case REG_MB_ISHST: + return "MB_ISHST" + case REG_MB_NSH: + return "MB_NSH" + case REG_MB_NSHST: + return "MB_NSHST" + case REG_MB_OSH: + return "MB_OSH" + case REG_MB_OSHST: + return "MB_OSHST" + } + + return fmt.Sprintf("Rgok(%d)", r-obj.RBaseARM) +} + +func DRconv(a int) string { + s := "C_??" + if a >= C_NONE && a <= C_NCLASS { + s = cnames5[a] + } + var fp string + fp += s + return fp +} + +func rlconv(list int64) string { + str := "" + for i := 0; i < 16; i++ { + if list&(1<, C13, C0, 3 specially. + case AMRC: + if p.To.Offset&0xffff0fff == 0xee1d0f70 { + // Because the instruction might be rewritten to a BL which returns in R0 + // the register must be zero. + if p.To.Offset&0xf000 != 0 { + ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line()) + } + + if objabi.GOARM < 7 { + // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension. + if progedit_tlsfallback == nil { + progedit_tlsfallback = ctxt.Lookup("runtime.read_tls_fallback") + } + + // MOVW LR, R11 + p.As = AMOVW + + p.From.Type = obj.TYPE_REG + p.From.Reg = REGLINK + p.To.Type = obj.TYPE_REG + p.To.Reg = REGTMP + + // BL runtime.read_tls_fallback(SB) + p = obj.Appendp(p, newprog) + + p.As = ABL + p.To.Type = obj.TYPE_BRANCH + p.To.Sym = progedit_tlsfallback + p.To.Offset = 0 + + // MOVW R11, LR + p = obj.Appendp(p, newprog) + + p.As = AMOVW + p.From.Type = obj.TYPE_REG + p.From.Reg = REGTMP + p.To.Type = obj.TYPE_REG + p.To.Reg = REGLINK + break + } + } + + // Otherwise, MRC/MCR instructions need no further treatment. + p.As = AWORD + } + + // Rewrite float constants to values stored in memory. + switch p.As { + case AMOVF: + if p.From.Type == obj.TYPE_FCONST && c.chipfloat5(p.From.Val.(float64)) < 0 && (c.chipzero5(p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { + f32 := float32(p.From.Val.(float64)) + p.From.Type = obj.TYPE_MEM + p.From.Sym = ctxt.Float32Sym(f32) + p.From.Name = obj.NAME_EXTERN + p.From.Offset = 0 + } + + case AMOVD: + if p.From.Type == obj.TYPE_FCONST && c.chipfloat5(p.From.Val.(float64)) < 0 && (c.chipzero5(p.From.Val.(float64)) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) { + p.From.Type = obj.TYPE_MEM + p.From.Sym = ctxt.Float64Sym(p.From.Val.(float64)) + p.From.Name = obj.NAME_EXTERN + p.From.Offset = 0 + } + } + + if ctxt.Flag_dynlink { + c.rewriteToUseGot(p) + } +} + +// Rewrite p, if necessary, to access global data via the global offset table. +func (c *ctxt5) rewriteToUseGot(p *obj.Prog) { + if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { + // ADUFFxxx $offset + // becomes + // MOVW runtime.duffxxx@GOT, R9 + // ADD $offset, R9 + // CALL (R9) + var sym *obj.LSym + if p.As == obj.ADUFFZERO { + sym = c.ctxt.Lookup("runtime.duffzero") + } else { + sym = c.ctxt.Lookup("runtime.duffcopy") + } + offset := p.To.Offset + p.As = AMOVW + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_GOTREF + p.From.Sym = sym + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R9 + p.To.Name = obj.NAME_NONE + p.To.Offset = 0 + p.To.Sym = nil + p1 := obj.Appendp(p, c.newprog) + p1.As = AADD + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = offset + p1.To.Type = obj.TYPE_REG + p1.To.Reg = REG_R9 + p2 := obj.Appendp(p1, c.newprog) + p2.As = obj.ACALL + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = REG_R9 + return + } + + // We only care about global data: NAME_EXTERN means a global + // symbol in the Go sense, and p.Sym.Local is true for a few + // internally defined symbols. + if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { + // MOVW $sym, Rx becomes MOVW sym@GOT, Rx + // MOVW $sym+, Rx becomes MOVW sym@GOT, Rx; ADD , Rx + if p.As != AMOVW { + c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p) + } + if p.To.Type != obj.TYPE_REG { + c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p) + } + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_GOTREF + if p.From.Offset != 0 { + q := obj.Appendp(p, c.newprog) + q.As = AADD + q.From.Type = obj.TYPE_CONST + q.From.Offset = p.From.Offset + q.To = p.To + p.From.Offset = 0 + } + } + if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { + c.ctxt.Diag("don't know how to handle %v with -dynlink", p) + } + var source *obj.Addr + // MOVx sym, Ry becomes MOVW sym@GOT, R9; MOVx (R9), Ry + // MOVx Ry, sym becomes MOVW sym@GOT, R9; MOVx Ry, (R9) + // An addition may be inserted between the two MOVs if there is an offset. + if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { + if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { + c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) + } + source = &p.From + } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { + source = &p.To + } else { + return + } + if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { + return + } + if source.Sym.Type == objabi.STLSBSS { + return + } + if source.Type != obj.TYPE_MEM { + c.ctxt.Diag("don't know how to handle %v with -dynlink", p) + } + p1 := obj.Appendp(p, c.newprog) + p2 := obj.Appendp(p1, c.newprog) + + p1.As = AMOVW + p1.From.Type = obj.TYPE_MEM + p1.From.Sym = source.Sym + p1.From.Name = obj.NAME_GOTREF + p1.To.Type = obj.TYPE_REG + p1.To.Reg = REG_R9 + + p2.As = p.As + p2.From = p.From + p2.To = p.To + if p.From.Name == obj.NAME_EXTERN { + p2.From.Reg = REG_R9 + p2.From.Name = obj.NAME_NONE + p2.From.Sym = nil + } else if p.To.Name == obj.NAME_EXTERN { + p2.To.Reg = REG_R9 + p2.To.Name = obj.NAME_NONE + p2.To.Sym = nil + } else { + return + } + obj.Nopout(p) +} + +// Prog.mark +const ( + FOLL = 1 << 0 + LABEL = 1 << 1 + LEAF = 1 << 2 +) + +func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + autosize := int32(0) + + if cursym.Func.Text == nil || cursym.Func.Text.Link == nil { + return + } + + c := ctxt5{ctxt: ctxt, cursym: cursym, newprog: newprog} + + p := c.cursym.Func.Text + autoffset := int32(p.To.Offset) + if autoffset == -4 { + // Historical way to mark NOFRAME. + p.From.Sym.Set(obj.AttrNoFrame, true) + autoffset = 0 + } + if autoffset < 0 || autoffset%4 != 0 { + c.ctxt.Diag("frame size %d not 0 or a positive multiple of 4", autoffset) + } + if p.From.Sym.NoFrame() { + if autoffset != 0 { + c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", autoffset) + } + } + + cursym.Func.Locals = autoffset + cursym.Func.Args = p.To.Val.(int32) + + /* + * find leaf subroutines + */ + for p := cursym.Func.Text; p != nil; p = p.Link { + switch p.As { + case obj.ATEXT: + p.Mark |= LEAF + + case ADIV, ADIVU, AMOD, AMODU: + cursym.Func.Text.Mark &^= LEAF + + case ABL, + ABX, + obj.ADUFFZERO, + obj.ADUFFCOPY: + cursym.Func.Text.Mark &^= LEAF + } + } + + var q2 *obj.Prog + for p := cursym.Func.Text; p != nil; p = p.Link { + o := p.As + switch o { + case obj.ATEXT: + autosize = autoffset + + if p.Mark&LEAF != 0 && autosize == 0 { + // A leaf function with no locals has no frame. + p.From.Sym.Set(obj.AttrNoFrame, true) + } + + if !p.From.Sym.NoFrame() { + // If there is a stack frame at all, it includes + // space to save the LR. + autosize += 4 + } + + if autosize == 0 && cursym.Func.Text.Mark&LEAF == 0 { + // A very few functions that do not return to their caller + // are not identified as leaves but still have no frame. + if ctxt.Debugvlog { + ctxt.Logf("save suppressed in: %s\n", cursym.Name) + } + + cursym.Func.Text.Mark |= LEAF + } + + // FP offsets need an updated p.To.Offset. + p.To.Offset = int64(autosize) - 4 + + if cursym.Func.Text.Mark&LEAF != 0 { + cursym.Set(obj.AttrLeaf, true) + if p.From.Sym.NoFrame() { + break + } + } + + if !p.From.Sym.NoSplit() { + p = c.stacksplit(p, autosize) // emit split check + } + + // MOVW.W R14,$-autosize(SP) + p = obj.Appendp(p, c.newprog) + + p.As = AMOVW + p.Scond |= C_WBIT + p.From.Type = obj.TYPE_REG + p.From.Reg = REGLINK + p.To.Type = obj.TYPE_MEM + p.To.Offset = int64(-autosize) + p.To.Reg = REGSP + p.Spadj = autosize + + if cursym.Func.Text.From.Sym.Wrapper() { + // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame + // + // MOVW g_panic(g), R1 + // CMP $0, R1 + // B.NE checkargp + // end: + // NOP + // ... function ... + // checkargp: + // MOVW panic_argp(R1), R2 + // ADD $(autosize+4), R13, R3 + // CMP R2, R3 + // B.NE end + // ADD $4, R13, R4 + // MOVW R4, panic_argp(R1) + // B end + // + // The NOP is needed to give the jumps somewhere to land. + // It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes. + + p = obj.Appendp(p, newprog) + p.As = AMOVW + p.From.Type = obj.TYPE_MEM + p.From.Reg = REGG + p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R1 + + p = obj.Appendp(p, newprog) + p.As = ACMP + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0 + p.Reg = REG_R1 + + // B.NE checkargp + bne := obj.Appendp(p, newprog) + bne.As = ABNE + bne.To.Type = obj.TYPE_BRANCH + + // end: NOP + end := obj.Appendp(bne, newprog) + end.As = obj.ANOP + + // find end of function + var last *obj.Prog + for last = end; last.Link != nil; last = last.Link { + } + + // MOVW panic_argp(R1), R2 + mov := obj.Appendp(last, newprog) + mov.As = AMOVW + mov.From.Type = obj.TYPE_MEM + mov.From.Reg = REG_R1 + mov.From.Offset = 0 // Panic.argp + mov.To.Type = obj.TYPE_REG + mov.To.Reg = REG_R2 + + // B.NE branch target is MOVW above + bne.To.SetTarget(mov) + + // ADD $(autosize+4), R13, R3 + p = obj.Appendp(mov, newprog) + p.As = AADD + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(autosize) + 4 + p.Reg = REG_R13 + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R3 + + // CMP R2, R3 + p = obj.Appendp(p, newprog) + p.As = ACMP + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R2 + p.Reg = REG_R3 + + // B.NE end + p = obj.Appendp(p, newprog) + p.As = ABNE + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(end) + + // ADD $4, R13, R4 + p = obj.Appendp(p, newprog) + p.As = AADD + p.From.Type = obj.TYPE_CONST + p.From.Offset = 4 + p.Reg = REG_R13 + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R4 + + // MOVW R4, panic_argp(R1) + p = obj.Appendp(p, newprog) + p.As = AMOVW + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R4 + p.To.Type = obj.TYPE_MEM + p.To.Reg = REG_R1 + p.To.Offset = 0 // Panic.argp + + // B end + p = obj.Appendp(p, newprog) + p.As = AB + p.To.Type = obj.TYPE_BRANCH + p.To.SetTarget(end) + + // reset for subsequent passes + p = end + } + + case obj.ARET: + nocache(p) + if cursym.Func.Text.Mark&LEAF != 0 { + if autosize == 0 { + p.As = AB + p.From = obj.Addr{} + if p.To.Sym != nil { // retjmp + p.To.Type = obj.TYPE_BRANCH + } else { + p.To.Type = obj.TYPE_MEM + p.To.Offset = 0 + p.To.Reg = REGLINK + } + + break + } + } + + p.As = AMOVW + p.Scond |= C_PBIT + p.From.Type = obj.TYPE_MEM + p.From.Offset = int64(autosize) + p.From.Reg = REGSP + p.To.Type = obj.TYPE_REG + p.To.Reg = REGPC + + // If there are instructions following + // this ARET, they come from a branch + // with the same stackframe, so no spadj. + if p.To.Sym != nil { // retjmp + p.To.Reg = REGLINK + q2 = obj.Appendp(p, newprog) + q2.As = AB + q2.To.Type = obj.TYPE_BRANCH + q2.To.Sym = p.To.Sym + p.To.Sym = nil + p = q2 + } + + case AADD: + if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { + p.Spadj = int32(-p.From.Offset) + } + + case ASUB: + if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { + p.Spadj = int32(p.From.Offset) + } + + case ADIV, ADIVU, AMOD, AMODU: + if cursym.Func.Text.From.Sym.NoSplit() { + ctxt.Diag("cannot divide in NOSPLIT function") + } + const debugdivmod = false + if debugdivmod { + break + } + if p.From.Type != obj.TYPE_REG { + break + } + if p.To.Type != obj.TYPE_REG { + break + } + + // Make copy because we overwrite p below. + q1 := *p + if q1.Reg == REGTMP || q1.Reg == 0 && q1.To.Reg == REGTMP { + ctxt.Diag("div already using REGTMP: %v", p) + } + + /* MOV m(g),REGTMP */ + p.As = AMOVW + p.Pos = q1.Pos + p.From.Type = obj.TYPE_MEM + p.From.Reg = REGG + p.From.Offset = 6 * 4 // offset of g.m + p.Reg = 0 + p.To.Type = obj.TYPE_REG + p.To.Reg = REGTMP + + /* MOV a,m_divmod(REGTMP) */ + p = obj.Appendp(p, newprog) + p.As = AMOVW + p.Pos = q1.Pos + p.From.Type = obj.TYPE_REG + p.From.Reg = q1.From.Reg + p.To.Type = obj.TYPE_MEM + p.To.Reg = REGTMP + p.To.Offset = 8 * 4 // offset of m.divmod + + /* MOV b, R8 */ + p = obj.Appendp(p, newprog) + p.As = AMOVW + p.Pos = q1.Pos + p.From.Type = obj.TYPE_REG + p.From.Reg = q1.Reg + if q1.Reg == 0 { + p.From.Reg = q1.To.Reg + } + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R8 + p.To.Offset = 0 + + /* CALL appropriate */ + p = obj.Appendp(p, newprog) + p.As = ABL + p.Pos = q1.Pos + p.To.Type = obj.TYPE_BRANCH + switch o { + case ADIV: + p.To.Sym = symdiv + case ADIVU: + p.To.Sym = symdivu + case AMOD: + p.To.Sym = symmod + case AMODU: + p.To.Sym = symmodu + } + + /* MOV REGTMP, b */ + p = obj.Appendp(p, newprog) + p.As = AMOVW + p.Pos = q1.Pos + p.From.Type = obj.TYPE_REG + p.From.Reg = REGTMP + p.From.Offset = 0 + p.To.Type = obj.TYPE_REG + p.To.Reg = q1.To.Reg + + case AMOVW: + if (p.Scond&C_WBIT != 0) && p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP { + p.Spadj = int32(-p.To.Offset) + } + if (p.Scond&C_PBIT != 0) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP && p.To.Reg != REGPC { + p.Spadj = int32(-p.From.Offset) + } + if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { + p.Spadj = int32(-p.From.Offset) + } + + case obj.AGETCALLERPC: + if cursym.Leaf() { + /* MOVW LR, Rd */ + p.As = AMOVW + p.From.Type = obj.TYPE_REG + p.From.Reg = REGLINK + } else { + /* MOVW (RSP), Rd */ + p.As = AMOVW + p.From.Type = obj.TYPE_MEM + p.From.Reg = REGSP + } + } + } +} + +func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { + // MOVW g_stackguard(g), R1 + p = obj.Appendp(p, c.newprog) + + p.As = AMOVW + p.From.Type = obj.TYPE_MEM + p.From.Reg = REGG + p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0 + if c.cursym.CFunc() { + p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 + } + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R1 + + // Mark the stack bound check and morestack call async nonpreemptible. + // If we get preempted here, when resumed the preemption request is + // cleared, but we'll still call morestack, which will double the stack + // unnecessarily. See issue #35470. + p = c.ctxt.StartUnsafePoint(p, c.newprog) + + if framesize <= objabi.StackSmall { + // small stack: SP < stackguard + // CMP stackguard, SP + p = obj.Appendp(p, c.newprog) + + p.As = ACMP + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R1 + p.Reg = REGSP + } else if framesize <= objabi.StackBig { + // large stack: SP-framesize < stackguard-StackSmall + // MOVW $-(framesize-StackSmall)(SP), R2 + // CMP stackguard, R2 + p = obj.Appendp(p, c.newprog) + + p.As = AMOVW + p.From.Type = obj.TYPE_ADDR + p.From.Reg = REGSP + p.From.Offset = -(int64(framesize) - objabi.StackSmall) + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R2 + + p = obj.Appendp(p, c.newprog) + p.As = ACMP + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R1 + p.Reg = REG_R2 + } else { + // Such a large stack we need to protect against wraparound + // if SP is close to zero. + // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall) + // The +StackGuard on both sides is required to keep the left side positive: + // SP is allowed to be slightly below stackguard. See stack.h. + // CMP $StackPreempt, R1 + // MOVW.NE $StackGuard(SP), R2 + // SUB.NE R1, R2 + // MOVW.NE $(framesize+(StackGuard-StackSmall)), R3 + // CMP.NE R3, R2 + p = obj.Appendp(p, c.newprog) + + p.As = ACMP + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(uint32(objabi.StackPreempt & (1<<32 - 1))) + p.Reg = REG_R1 + + p = obj.Appendp(p, c.newprog) + p.As = AMOVW + p.From.Type = obj.TYPE_ADDR + p.From.Reg = REGSP + p.From.Offset = int64(objabi.StackGuard) + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R2 + p.Scond = C_SCOND_NE + + p = obj.Appendp(p, c.newprog) + p.As = ASUB + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R1 + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R2 + p.Scond = C_SCOND_NE + + p = obj.Appendp(p, c.newprog) + p.As = AMOVW + p.From.Type = obj.TYPE_ADDR + p.From.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) + p.To.Type = obj.TYPE_REG + p.To.Reg = REG_R3 + p.Scond = C_SCOND_NE + + p = obj.Appendp(p, c.newprog) + p.As = ACMP + p.From.Type = obj.TYPE_REG + p.From.Reg = REG_R3 + p.Reg = REG_R2 + p.Scond = C_SCOND_NE + } + + // BLS call-to-morestack + bls := obj.Appendp(p, c.newprog) + bls.As = ABLS + bls.To.Type = obj.TYPE_BRANCH + + end := c.ctxt.EndUnsafePoint(bls, c.newprog, -1) + + var last *obj.Prog + for last = c.cursym.Func.Text; last.Link != nil; last = last.Link { + } + + // Now we are at the end of the function, but logically + // we are still in function prologue. We need to fix the + // SP data and PCDATA. + spfix := obj.Appendp(last, c.newprog) + spfix.As = obj.ANOP + spfix.Spadj = -framesize + + pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog) + pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog) + + // MOVW LR, R3 + movw := obj.Appendp(pcdata, c.newprog) + movw.As = AMOVW + movw.From.Type = obj.TYPE_REG + movw.From.Reg = REGLINK + movw.To.Type = obj.TYPE_REG + movw.To.Reg = REG_R3 + + bls.To.SetTarget(movw) + + // BL runtime.morestack + call := obj.Appendp(movw, c.newprog) + call.As = obj.ACALL + call.To.Type = obj.TYPE_BRANCH + morestack := "runtime.morestack" + switch { + case c.cursym.CFunc(): + morestack = "runtime.morestackc" + case !c.cursym.Func.Text.From.Sym.NeedCtxt(): + morestack = "runtime.morestack_noctxt" + } + call.To.Sym = c.ctxt.Lookup(morestack) + + pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1) + + // B start + b := obj.Appendp(pcdata, c.newprog) + b.As = obj.AJMP + b.To.Type = obj.TYPE_BRANCH + b.To.SetTarget(c.cursym.Func.Text.Link) + b.Spadj = +framesize + + return end +} + +var unaryDst = map[obj.As]bool{ + ASWI: true, + AWORD: true, +} + +var Linkarm = obj.LinkArch{ + Arch: sys.ArchARM, + Init: buildop, + Preprocess: preprocess, + Assemble: span5, + Progedit: progedit, + UnaryDst: unaryDst, + DWARFRegisters: ARMDWARFRegisters, +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/a.out.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/a.out.go new file mode 100644 index 0000000..04c084e --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/a.out.go @@ -0,0 +1,1033 @@ +// cmd/7c/7.out.h from Vita Nuova. +// https://code.google.com/p/ken-cc/source/browse/src/cmd/7c/7.out.h +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm64 + +import "github.com/twitchyliquid64/golang-asm/obj" + +const ( + NSNAME = 8 + NSYM = 50 + NREG = 32 /* number of general registers */ + NFREG = 32 /* number of floating point registers */ +) + +// General purpose registers, kept in the low bits of Prog.Reg. +const ( + // integer + REG_R0 = obj.RBaseARM64 + iota + REG_R1 + REG_R2 + REG_R3 + REG_R4 + REG_R5 + REG_R6 + REG_R7 + REG_R8 + REG_R9 + REG_R10 + REG_R11 + REG_R12 + REG_R13 + REG_R14 + REG_R15 + REG_R16 + REG_R17 + REG_R18 + REG_R19 + REG_R20 + REG_R21 + REG_R22 + REG_R23 + REG_R24 + REG_R25 + REG_R26 + REG_R27 + REG_R28 + REG_R29 + REG_R30 + REG_R31 + + // scalar floating point + REG_F0 + REG_F1 + REG_F2 + REG_F3 + REG_F4 + REG_F5 + REG_F6 + REG_F7 + REG_F8 + REG_F9 + REG_F10 + REG_F11 + REG_F12 + REG_F13 + REG_F14 + REG_F15 + REG_F16 + REG_F17 + REG_F18 + REG_F19 + REG_F20 + REG_F21 + REG_F22 + REG_F23 + REG_F24 + REG_F25 + REG_F26 + REG_F27 + REG_F28 + REG_F29 + REG_F30 + REG_F31 + + // SIMD + REG_V0 + REG_V1 + REG_V2 + REG_V3 + REG_V4 + REG_V5 + REG_V6 + REG_V7 + REG_V8 + REG_V9 + REG_V10 + REG_V11 + REG_V12 + REG_V13 + REG_V14 + REG_V15 + REG_V16 + REG_V17 + REG_V18 + REG_V19 + REG_V20 + REG_V21 + REG_V22 + REG_V23 + REG_V24 + REG_V25 + REG_V26 + REG_V27 + REG_V28 + REG_V29 + REG_V30 + REG_V31 + + // The EQ in + // CSET EQ, R0 + // is encoded as TYPE_REG, even though it's not really a register. + COND_EQ + COND_NE + COND_HS + COND_LO + COND_MI + COND_PL + COND_VS + COND_VC + COND_HI + COND_LS + COND_GE + COND_LT + COND_GT + COND_LE + COND_AL + COND_NV + + REG_RSP = REG_V31 + 32 // to differentiate ZR/SP, REG_RSP&0x1f = 31 +) + +// bits 0-4 indicates register: Vn +// bits 5-8 indicates arrangement: +const ( + REG_ARNG = obj.RBaseARM64 + 1<<10 + iota<<9 // Vn. + REG_ELEM // Vn.[index] + REG_ELEM_END +) + +// Not registers, but flags that can be combined with regular register +// constants to indicate extended register conversion. When checking, +// you should subtract obj.RBaseARM64 first. From this difference, bit 11 +// indicates extended register, bits 8-10 select the conversion mode. +// REG_LSL is the index shift specifier, bit 9 indicates shifted offset register. +const REG_LSL = obj.RBaseARM64 + 1<<9 +const REG_EXT = obj.RBaseARM64 + 1<<11 + +const ( + REG_UXTB = REG_EXT + iota<<8 + REG_UXTH + REG_UXTW + REG_UXTX + REG_SXTB + REG_SXTH + REG_SXTW + REG_SXTX +) + +// Special registers, after subtracting obj.RBaseARM64, bit 12 indicates +// a special register and the low bits select the register. +// SYSREG_END is the last item in the automatically generated system register +// declaration, and it is defined in the sysRegEnc.go file. +const ( + REG_SPECIAL = obj.RBaseARM64 + 1<<12 + REG_DAIFSet = SYSREG_END + iota + REG_DAIFClr + REG_PLDL1KEEP + REG_PLDL1STRM + REG_PLDL2KEEP + REG_PLDL2STRM + REG_PLDL3KEEP + REG_PLDL3STRM + REG_PLIL1KEEP + REG_PLIL1STRM + REG_PLIL2KEEP + REG_PLIL2STRM + REG_PLIL3KEEP + REG_PLIL3STRM + REG_PSTL1KEEP + REG_PSTL1STRM + REG_PSTL2KEEP + REG_PSTL2STRM + REG_PSTL3KEEP + REG_PSTL3STRM +) + +// Register assignments: +// +// compiler allocates R0 up as temps +// compiler allocates register variables R7-R25 +// compiler allocates external registers R26 down +// +// compiler allocates register variables F7-F26 +// compiler allocates external registers F26 down +const ( + REGMIN = REG_R7 // register variables allocated from here to REGMAX + REGRT1 = REG_R16 // ARM64 IP0, external linker may use as a scrach register in trampoline + REGRT2 = REG_R17 // ARM64 IP1, external linker may use as a scrach register in trampoline + REGPR = REG_R18 // ARM64 platform register, unused in the Go toolchain + REGMAX = REG_R25 + + REGCTXT = REG_R26 // environment for closures + REGTMP = REG_R27 // reserved for liblink + REGG = REG_R28 // G + REGFP = REG_R29 // frame pointer, unused in the Go toolchain + REGLINK = REG_R30 + + // ARM64 uses R31 as both stack pointer and zero register, + // depending on the instruction. To differentiate RSP from ZR, + // we use a different numeric value for REGZERO and REGSP. + REGZERO = REG_R31 + REGSP = REG_RSP + + FREGRET = REG_F0 + FREGMIN = REG_F7 // first register variable + FREGMAX = REG_F26 // last register variable for 7g only + FREGEXT = REG_F26 // first external register +) + +// http://infocenter.arm.com/help/topic/com.arm.doc.ecm0665627/abi_sve_aadwarf_100985_0000_00_en.pdf +var ARM64DWARFRegisters = map[int16]int16{ + REG_R0: 0, + REG_R1: 1, + REG_R2: 2, + REG_R3: 3, + REG_R4: 4, + REG_R5: 5, + REG_R6: 6, + REG_R7: 7, + REG_R8: 8, + REG_R9: 9, + REG_R10: 10, + REG_R11: 11, + REG_R12: 12, + REG_R13: 13, + REG_R14: 14, + REG_R15: 15, + REG_R16: 16, + REG_R17: 17, + REG_R18: 18, + REG_R19: 19, + REG_R20: 20, + REG_R21: 21, + REG_R22: 22, + REG_R23: 23, + REG_R24: 24, + REG_R25: 25, + REG_R26: 26, + REG_R27: 27, + REG_R28: 28, + REG_R29: 29, + REG_R30: 30, + + // floating point + REG_F0: 64, + REG_F1: 65, + REG_F2: 66, + REG_F3: 67, + REG_F4: 68, + REG_F5: 69, + REG_F6: 70, + REG_F7: 71, + REG_F8: 72, + REG_F9: 73, + REG_F10: 74, + REG_F11: 75, + REG_F12: 76, + REG_F13: 77, + REG_F14: 78, + REG_F15: 79, + REG_F16: 80, + REG_F17: 81, + REG_F18: 82, + REG_F19: 83, + REG_F20: 84, + REG_F21: 85, + REG_F22: 86, + REG_F23: 87, + REG_F24: 88, + REG_F25: 89, + REG_F26: 90, + REG_F27: 91, + REG_F28: 92, + REG_F29: 93, + REG_F30: 94, + REG_F31: 95, + + // SIMD + REG_V0: 64, + REG_V1: 65, + REG_V2: 66, + REG_V3: 67, + REG_V4: 68, + REG_V5: 69, + REG_V6: 70, + REG_V7: 71, + REG_V8: 72, + REG_V9: 73, + REG_V10: 74, + REG_V11: 75, + REG_V12: 76, + REG_V13: 77, + REG_V14: 78, + REG_V15: 79, + REG_V16: 80, + REG_V17: 81, + REG_V18: 82, + REG_V19: 83, + REG_V20: 84, + REG_V21: 85, + REG_V22: 86, + REG_V23: 87, + REG_V24: 88, + REG_V25: 89, + REG_V26: 90, + REG_V27: 91, + REG_V28: 92, + REG_V29: 93, + REG_V30: 94, + REG_V31: 95, +} + +const ( + BIG = 2048 - 8 +) + +const ( + /* mark flags */ + LABEL = 1 << iota + LEAF + FLOAT + BRANCH + LOAD + FCMP + SYNC + LIST + FOLL + NOSCHED +) + +const ( + // optab is sorted based on the order of these constants + // and the first match is chosen. + // The more specific class needs to come earlier. + C_NONE = iota + C_REG // R0..R30 + C_RSP // R0..R30, RSP + C_FREG // F0..F31 + C_VREG // V0..V31 + C_PAIR // (Rn, Rm) + C_SHIFT // Rn<<2 + C_EXTREG // Rn.UXTB[<<3] + C_SPR // REG_NZCV + C_COND // EQ, NE, etc + C_ARNG // Vn. + C_ELEM // Vn.[index] + C_LIST // [V1, V2, V3] + + C_ZCON // $0 or ZR + C_ABCON0 // could be C_ADDCON0 or C_BITCON + C_ADDCON0 // 12-bit unsigned, unshifted + C_ABCON // could be C_ADDCON or C_BITCON + C_AMCON // could be C_ADDCON or C_MOVCON + C_ADDCON // 12-bit unsigned, shifted left by 0 or 12 + C_MBCON // could be C_MOVCON or C_BITCON + C_MOVCON // generated by a 16-bit constant, optionally inverted and/or shifted by multiple of 16 + C_BITCON // bitfield and logical immediate masks + C_ADDCON2 // 24-bit constant + C_LCON // 32-bit constant + C_MOVCON2 // a constant that can be loaded with one MOVZ/MOVN and one MOVK + C_MOVCON3 // a constant that can be loaded with one MOVZ/MOVN and two MOVKs + C_VCON // 64-bit constant + C_FCON // floating-point constant + C_VCONADDR // 64-bit memory address + + C_AACON // ADDCON offset in auto constant $a(FP) + C_AACON2 // 24-bit offset in auto constant $a(FP) + C_LACON // 32-bit offset in auto constant $a(FP) + C_AECON // ADDCON offset in extern constant $e(SB) + + // TODO(aram): only one branch class should be enough + C_SBRA // for TYPE_BRANCH + C_LBRA + + C_ZAUTO // 0(RSP) + C_NSAUTO_8 // -256 <= x < 0, 0 mod 8 + C_NSAUTO_4 // -256 <= x < 0, 0 mod 4 + C_NSAUTO // -256 <= x < 0 + C_NPAUTO // -512 <= x < 0, 0 mod 8 + C_NAUTO4K // -4095 <= x < 0 + C_PSAUTO_8 // 0 to 255, 0 mod 8 + C_PSAUTO_4 // 0 to 255, 0 mod 4 + C_PSAUTO // 0 to 255 + C_PPAUTO // 0 to 504, 0 mod 8 + C_UAUTO4K_8 // 0 to 4095, 0 mod 8 + C_UAUTO4K_4 // 0 to 4095, 0 mod 4 + C_UAUTO4K_2 // 0 to 4095, 0 mod 2 + C_UAUTO4K // 0 to 4095 + C_UAUTO8K_8 // 0 to 8190, 0 mod 8 + C_UAUTO8K_4 // 0 to 8190, 0 mod 4 + C_UAUTO8K // 0 to 8190, 0 mod 2 + C_UAUTO16K_8 // 0 to 16380, 0 mod 8 + C_UAUTO16K // 0 to 16380, 0 mod 4 + C_UAUTO32K // 0 to 32760, 0 mod 8 + C_LAUTO // any other 32-bit constant + + C_SEXT1 // 0 to 4095, direct + C_SEXT2 // 0 to 8190 + C_SEXT4 // 0 to 16380 + C_SEXT8 // 0 to 32760 + C_SEXT16 // 0 to 65520 + C_LEXT + + C_ZOREG // 0(R) + C_NSOREG_8 // must mirror C_NSAUTO_8, etc + C_NSOREG_4 + C_NSOREG + C_NPOREG + C_NOREG4K + C_PSOREG_8 + C_PSOREG_4 + C_PSOREG + C_PPOREG + C_UOREG4K_8 + C_UOREG4K_4 + C_UOREG4K_2 + C_UOREG4K + C_UOREG8K_8 + C_UOREG8K_4 + C_UOREG8K + C_UOREG16K_8 + C_UOREG16K + C_UOREG32K + C_LOREG + + C_ADDR // TODO(aram): explain difference from C_VCONADDR + + // The GOT slot for a symbol in -dynlink mode. + C_GOTADDR + + // TLS "var" in local exec mode: will become a constant offset from + // thread local base that is ultimately chosen by the program linker. + C_TLS_LE + + // TLS "var" in initial exec mode: will become a memory address (chosen + // by the program linker) that the dynamic linker will fill with the + // offset from the thread local base. + C_TLS_IE + + C_ROFF // register offset (including register extended) + + C_GOK + C_TEXTSIZE + C_NCLASS // must be last +) + +const ( + C_XPRE = 1 << 6 // match arm.C_WBIT, so Prog.String know how to print it + C_XPOST = 1 << 5 // match arm.C_PBIT, so Prog.String know how to print it +) + +//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p arm64 + +const ( + AADC = obj.ABaseARM64 + obj.A_ARCHSPECIFIC + iota + AADCS + AADCSW + AADCW + AADD + AADDS + AADDSW + AADDW + AADR + AADRP + AAND + AANDS + AANDSW + AANDW + AASR + AASRW + AAT + ABFI + ABFIW + ABFM + ABFMW + ABFXIL + ABFXILW + ABIC + ABICS + ABICSW + ABICW + ABRK + ACBNZ + ACBNZW + ACBZ + ACBZW + ACCMN + ACCMNW + ACCMP + ACCMPW + ACINC + ACINCW + ACINV + ACINVW + ACLREX + ACLS + ACLSW + ACLZ + ACLZW + ACMN + ACMNW + ACMP + ACMPW + ACNEG + ACNEGW + ACRC32B + ACRC32CB + ACRC32CH + ACRC32CW + ACRC32CX + ACRC32H + ACRC32W + ACRC32X + ACSEL + ACSELW + ACSET + ACSETM + ACSETMW + ACSETW + ACSINC + ACSINCW + ACSINV + ACSINVW + ACSNEG + ACSNEGW + ADC + ADCPS1 + ADCPS2 + ADCPS3 + ADMB + ADRPS + ADSB + AEON + AEONW + AEOR + AEORW + AERET + AEXTR + AEXTRW + AHINT + AHLT + AHVC + AIC + AISB + ALDADDAB + ALDADDAD + ALDADDAH + ALDADDAW + ALDADDALB + ALDADDALD + ALDADDALH + ALDADDALW + ALDADDB + ALDADDD + ALDADDH + ALDADDW + ALDADDLB + ALDADDLD + ALDADDLH + ALDADDLW + ALDANDAB + ALDANDAD + ALDANDAH + ALDANDAW + ALDANDALB + ALDANDALD + ALDANDALH + ALDANDALW + ALDANDB + ALDANDD + ALDANDH + ALDANDW + ALDANDLB + ALDANDLD + ALDANDLH + ALDANDLW + ALDAR + ALDARB + ALDARH + ALDARW + ALDAXP + ALDAXPW + ALDAXR + ALDAXRB + ALDAXRH + ALDAXRW + ALDEORAB + ALDEORAD + ALDEORAH + ALDEORAW + ALDEORALB + ALDEORALD + ALDEORALH + ALDEORALW + ALDEORB + ALDEORD + ALDEORH + ALDEORW + ALDEORLB + ALDEORLD + ALDEORLH + ALDEORLW + ALDORAB + ALDORAD + ALDORAH + ALDORAW + ALDORALB + ALDORALD + ALDORALH + ALDORALW + ALDORB + ALDORD + ALDORH + ALDORW + ALDORLB + ALDORLD + ALDORLH + ALDORLW + ALDP + ALDPW + ALDPSW + ALDXR + ALDXRB + ALDXRH + ALDXRW + ALDXP + ALDXPW + ALSL + ALSLW + ALSR + ALSRW + AMADD + AMADDW + AMNEG + AMNEGW + AMOVK + AMOVKW + AMOVN + AMOVNW + AMOVZ + AMOVZW + AMRS + AMSR + AMSUB + AMSUBW + AMUL + AMULW + AMVN + AMVNW + ANEG + ANEGS + ANEGSW + ANEGW + ANGC + ANGCS + ANGCSW + ANGCW + ANOOP + AORN + AORNW + AORR + AORRW + APRFM + APRFUM + ARBIT + ARBITW + AREM + AREMW + AREV + AREV16 + AREV16W + AREV32 + AREVW + AROR + ARORW + ASBC + ASBCS + ASBCSW + ASBCW + ASBFIZ + ASBFIZW + ASBFM + ASBFMW + ASBFX + ASBFXW + ASDIV + ASDIVW + ASEV + ASEVL + ASMADDL + ASMC + ASMNEGL + ASMSUBL + ASMULH + ASMULL + ASTXR + ASTXRB + ASTXRH + ASTXP + ASTXPW + ASTXRW + ASTLP + ASTLPW + ASTLR + ASTLRB + ASTLRH + ASTLRW + ASTLXP + ASTLXPW + ASTLXR + ASTLXRB + ASTLXRH + ASTLXRW + ASTP + ASTPW + ASUB + ASUBS + ASUBSW + ASUBW + ASVC + ASXTB + ASXTBW + ASXTH + ASXTHW + ASXTW + ASYS + ASYSL + ATBNZ + ATBZ + ATLBI + ATST + ATSTW + AUBFIZ + AUBFIZW + AUBFM + AUBFMW + AUBFX + AUBFXW + AUDIV + AUDIVW + AUMADDL + AUMNEGL + AUMSUBL + AUMULH + AUMULL + AUREM + AUREMW + AUXTB + AUXTH + AUXTW + AUXTBW + AUXTHW + AWFE + AWFI + AYIELD + AMOVB + AMOVBU + AMOVH + AMOVHU + AMOVW + AMOVWU + AMOVD + AMOVNP + AMOVNPW + AMOVP + AMOVPD + AMOVPQ + AMOVPS + AMOVPSW + AMOVPW + ASWPAD + ASWPAW + ASWPAH + ASWPAB + ASWPALD + ASWPALW + ASWPALH + ASWPALB + ASWPD + ASWPW + ASWPH + ASWPB + ASWPLD + ASWPLW + ASWPLH + ASWPLB + ABEQ + ABNE + ABCS + ABHS + ABCC + ABLO + ABMI + ABPL + ABVS + ABVC + ABHI + ABLS + ABGE + ABLT + ABGT + ABLE + AFABSD + AFABSS + AFADDD + AFADDS + AFCCMPD + AFCCMPED + AFCCMPS + AFCCMPES + AFCMPD + AFCMPED + AFCMPES + AFCMPS + AFCVTSD + AFCVTDS + AFCVTZSD + AFCVTZSDW + AFCVTZSS + AFCVTZSSW + AFCVTZUD + AFCVTZUDW + AFCVTZUS + AFCVTZUSW + AFDIVD + AFDIVS + AFLDPD + AFLDPS + AFMOVD + AFMOVS + AFMOVQ + AFMULD + AFMULS + AFNEGD + AFNEGS + AFSQRTD + AFSQRTS + AFSTPD + AFSTPS + AFSUBD + AFSUBS + ASCVTFD + ASCVTFS + ASCVTFWD + ASCVTFWS + AUCVTFD + AUCVTFS + AUCVTFWD + AUCVTFWS + AWORD + ADWORD + AFCSELS + AFCSELD + AFMAXS + AFMINS + AFMAXD + AFMIND + AFMAXNMS + AFMAXNMD + AFNMULS + AFNMULD + AFRINTNS + AFRINTND + AFRINTPS + AFRINTPD + AFRINTMS + AFRINTMD + AFRINTZS + AFRINTZD + AFRINTAS + AFRINTAD + AFRINTXS + AFRINTXD + AFRINTIS + AFRINTID + AFMADDS + AFMADDD + AFMSUBS + AFMSUBD + AFNMADDS + AFNMADDD + AFNMSUBS + AFNMSUBD + AFMINNMS + AFMINNMD + AFCVTDH + AFCVTHS + AFCVTHD + AFCVTSH + AAESD + AAESE + AAESIMC + AAESMC + ASHA1C + ASHA1H + ASHA1M + ASHA1P + ASHA1SU0 + ASHA1SU1 + ASHA256H + ASHA256H2 + ASHA256SU0 + ASHA256SU1 + ASHA512H + ASHA512H2 + ASHA512SU0 + ASHA512SU1 + AVADD + AVADDP + AVAND + AVBIF + AVCMEQ + AVCNT + AVEOR + AVMOV + AVLD1 + AVLD2 + AVLD3 + AVLD4 + AVLD1R + AVLD2R + AVLD3R + AVLD4R + AVORR + AVREV16 + AVREV32 + AVREV64 + AVST1 + AVST2 + AVST3 + AVST4 + AVDUP + AVADDV + AVMOVI + AVUADDLV + AVSUB + AVFMLA + AVFMLS + AVPMULL + AVPMULL2 + AVEXT + AVRBIT + AVUSHR + AVUSHLL + AVUSHLL2 + AVUXTL + AVUXTL2 + AVUZP1 + AVUZP2 + AVSHL + AVSRI + AVBSL + AVBIT + AVTBL + AVZIP1 + AVZIP2 + AVCMTST + ALAST + AB = obj.AJMP + ABL = obj.ACALL +) + +const ( + // shift types + SHIFT_LL = 0 << 22 + SHIFT_LR = 1 << 22 + SHIFT_AR = 2 << 22 +) + +// Arrangement for ARM64 SIMD instructions +const ( + // arrangement types + ARNG_8B = iota + ARNG_16B + ARNG_1D + ARNG_4H + ARNG_8H + ARNG_2S + ARNG_4S + ARNG_2D + ARNG_1Q + ARNG_B + ARNG_H + ARNG_S + ARNG_D +) diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames.go new file mode 100644 index 0000000..0ce620a --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames.go @@ -0,0 +1,512 @@ +// Code generated by stringer -i a.out.go -o anames.go -p arm64; DO NOT EDIT. + +package arm64 + +import "github.com/twitchyliquid64/golang-asm/obj" + +var Anames = []string{ + obj.A_ARCHSPECIFIC: "ADC", + "ADCS", + "ADCSW", + "ADCW", + "ADD", + "ADDS", + "ADDSW", + "ADDW", + "ADR", + "ADRP", + "AND", + "ANDS", + "ANDSW", + "ANDW", + "ASR", + "ASRW", + "AT", + "BFI", + "BFIW", + "BFM", + "BFMW", + "BFXIL", + "BFXILW", + "BIC", + "BICS", + "BICSW", + "BICW", + "BRK", + "CBNZ", + "CBNZW", + "CBZ", + "CBZW", + "CCMN", + "CCMNW", + "CCMP", + "CCMPW", + "CINC", + "CINCW", + "CINV", + "CINVW", + "CLREX", + "CLS", + "CLSW", + "CLZ", + "CLZW", + "CMN", + "CMNW", + "CMP", + "CMPW", + "CNEG", + "CNEGW", + "CRC32B", + "CRC32CB", + "CRC32CH", + "CRC32CW", + "CRC32CX", + "CRC32H", + "CRC32W", + "CRC32X", + "CSEL", + "CSELW", + "CSET", + "CSETM", + "CSETMW", + "CSETW", + "CSINC", + "CSINCW", + "CSINV", + "CSINVW", + "CSNEG", + "CSNEGW", + "DC", + "DCPS1", + "DCPS2", + "DCPS3", + "DMB", + "DRPS", + "DSB", + "EON", + "EONW", + "EOR", + "EORW", + "ERET", + "EXTR", + "EXTRW", + "HINT", + "HLT", + "HVC", + "IC", + "ISB", + "LDADDAB", + "LDADDAD", + "LDADDAH", + "LDADDAW", + "LDADDALB", + "LDADDALD", + "LDADDALH", + "LDADDALW", + "LDADDB", + "LDADDD", + "LDADDH", + "LDADDW", + "LDADDLB", + "LDADDLD", + "LDADDLH", + "LDADDLW", + "LDANDAB", + "LDANDAD", + "LDANDAH", + "LDANDAW", + "LDANDALB", + "LDANDALD", + "LDANDALH", + "LDANDALW", + "LDANDB", + "LDANDD", + "LDANDH", + "LDANDW", + "LDANDLB", + "LDANDLD", + "LDANDLH", + "LDANDLW", + "LDAR", + "LDARB", + "LDARH", + "LDARW", + "LDAXP", + "LDAXPW", + "LDAXR", + "LDAXRB", + "LDAXRH", + "LDAXRW", + "LDEORAB", + "LDEORAD", + "LDEORAH", + "LDEORAW", + "LDEORALB", + "LDEORALD", + "LDEORALH", + "LDEORALW", + "LDEORB", + "LDEORD", + "LDEORH", + "LDEORW", + "LDEORLB", + "LDEORLD", + "LDEORLH", + "LDEORLW", + "LDORAB", + "LDORAD", + "LDORAH", + "LDORAW", + "LDORALB", + "LDORALD", + "LDORALH", + "LDORALW", + "LDORB", + "LDORD", + "LDORH", + "LDORW", + "LDORLB", + "LDORLD", + "LDORLH", + "LDORLW", + "LDP", + "LDPW", + "LDPSW", + "LDXR", + "LDXRB", + "LDXRH", + "LDXRW", + "LDXP", + "LDXPW", + "LSL", + "LSLW", + "LSR", + "LSRW", + "MADD", + "MADDW", + "MNEG", + "MNEGW", + "MOVK", + "MOVKW", + "MOVN", + "MOVNW", + "MOVZ", + "MOVZW", + "MRS", + "MSR", + "MSUB", + "MSUBW", + "MUL", + "MULW", + "MVN", + "MVNW", + "NEG", + "NEGS", + "NEGSW", + "NEGW", + "NGC", + "NGCS", + "NGCSW", + "NGCW", + "NOOP", + "ORN", + "ORNW", + "ORR", + "ORRW", + "PRFM", + "PRFUM", + "RBIT", + "RBITW", + "REM", + "REMW", + "REV", + "REV16", + "REV16W", + "REV32", + "REVW", + "ROR", + "RORW", + "SBC", + "SBCS", + "SBCSW", + "SBCW", + "SBFIZ", + "SBFIZW", + "SBFM", + "SBFMW", + "SBFX", + "SBFXW", + "SDIV", + "SDIVW", + "SEV", + "SEVL", + "SMADDL", + "SMC", + "SMNEGL", + "SMSUBL", + "SMULH", + "SMULL", + "STXR", + "STXRB", + "STXRH", + "STXP", + "STXPW", + "STXRW", + "STLP", + "STLPW", + "STLR", + "STLRB", + "STLRH", + "STLRW", + "STLXP", + "STLXPW", + "STLXR", + "STLXRB", + "STLXRH", + "STLXRW", + "STP", + "STPW", + "SUB", + "SUBS", + "SUBSW", + "SUBW", + "SVC", + "SXTB", + "SXTBW", + "SXTH", + "SXTHW", + "SXTW", + "SYS", + "SYSL", + "TBNZ", + "TBZ", + "TLBI", + "TST", + "TSTW", + "UBFIZ", + "UBFIZW", + "UBFM", + "UBFMW", + "UBFX", + "UBFXW", + "UDIV", + "UDIVW", + "UMADDL", + "UMNEGL", + "UMSUBL", + "UMULH", + "UMULL", + "UREM", + "UREMW", + "UXTB", + "UXTH", + "UXTW", + "UXTBW", + "UXTHW", + "WFE", + "WFI", + "YIELD", + "MOVB", + "MOVBU", + "MOVH", + "MOVHU", + "MOVW", + "MOVWU", + "MOVD", + "MOVNP", + "MOVNPW", + "MOVP", + "MOVPD", + "MOVPQ", + "MOVPS", + "MOVPSW", + "MOVPW", + "SWPAD", + "SWPAW", + "SWPAH", + "SWPAB", + "SWPALD", + "SWPALW", + "SWPALH", + "SWPALB", + "SWPD", + "SWPW", + "SWPH", + "SWPB", + "SWPLD", + "SWPLW", + "SWPLH", + "SWPLB", + "BEQ", + "BNE", + "BCS", + "BHS", + "BCC", + "BLO", + "BMI", + "BPL", + "BVS", + "BVC", + "BHI", + "BLS", + "BGE", + "BLT", + "BGT", + "BLE", + "FABSD", + "FABSS", + "FADDD", + "FADDS", + "FCCMPD", + "FCCMPED", + "FCCMPS", + "FCCMPES", + "FCMPD", + "FCMPED", + "FCMPES", + "FCMPS", + "FCVTSD", + "FCVTDS", + "FCVTZSD", + "FCVTZSDW", + "FCVTZSS", + "FCVTZSSW", + "FCVTZUD", + "FCVTZUDW", + "FCVTZUS", + "FCVTZUSW", + "FDIVD", + "FDIVS", + "FLDPD", + "FLDPS", + "FMOVD", + "FMOVS", + "FMOVQ", + "FMULD", + "FMULS", + "FNEGD", + "FNEGS", + "FSQRTD", + "FSQRTS", + "FSTPD", + "FSTPS", + "FSUBD", + "FSUBS", + "SCVTFD", + "SCVTFS", + "SCVTFWD", + "SCVTFWS", + "UCVTFD", + "UCVTFS", + "UCVTFWD", + "UCVTFWS", + "WORD", + "DWORD", + "FCSELS", + "FCSELD", + "FMAXS", + "FMINS", + "FMAXD", + "FMIND", + "FMAXNMS", + "FMAXNMD", + "FNMULS", + "FNMULD", + "FRINTNS", + "FRINTND", + "FRINTPS", + "FRINTPD", + "FRINTMS", + "FRINTMD", + "FRINTZS", + "FRINTZD", + "FRINTAS", + "FRINTAD", + "FRINTXS", + "FRINTXD", + "FRINTIS", + "FRINTID", + "FMADDS", + "FMADDD", + "FMSUBS", + "FMSUBD", + "FNMADDS", + "FNMADDD", + "FNMSUBS", + "FNMSUBD", + "FMINNMS", + "FMINNMD", + "FCVTDH", + "FCVTHS", + "FCVTHD", + "FCVTSH", + "AESD", + "AESE", + "AESIMC", + "AESMC", + "SHA1C", + "SHA1H", + "SHA1M", + "SHA1P", + "SHA1SU0", + "SHA1SU1", + "SHA256H", + "SHA256H2", + "SHA256SU0", + "SHA256SU1", + "SHA512H", + "SHA512H2", + "SHA512SU0", + "SHA512SU1", + "VADD", + "VADDP", + "VAND", + "VBIF", + "VCMEQ", + "VCNT", + "VEOR", + "VMOV", + "VLD1", + "VLD2", + "VLD3", + "VLD4", + "VLD1R", + "VLD2R", + "VLD3R", + "VLD4R", + "VORR", + "VREV16", + "VREV32", + "VREV64", + "VST1", + "VST2", + "VST3", + "VST4", + "VDUP", + "VADDV", + "VMOVI", + "VUADDLV", + "VSUB", + "VFMLA", + "VFMLS", + "VPMULL", + "VPMULL2", + "VEXT", + "VRBIT", + "VUSHR", + "VUSHLL", + "VUSHLL2", + "VUXTL", + "VUXTL2", + "VUZP1", + "VUZP2", + "VSHL", + "VSRI", + "VBSL", + "VBIT", + "VTBL", + "VZIP1", + "VZIP2", + "VCMTST", + "LAST", +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames7.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames7.go new file mode 100644 index 0000000..96c9f78 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/anames7.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64 + +// This order should be strictly consistent to that in a.out.go +var cnames7 = []string{ + "NONE", + "REG", + "RSP", + "FREG", + "VREG", + "PAIR", + "SHIFT", + "EXTREG", + "SPR", + "COND", + "ARNG", + "ELEM", + "LIST", + "ZCON", + "ABCON0", + "ADDCON0", + "ABCON", + "AMCON", + "ADDCON", + "MBCON", + "MOVCON", + "BITCON", + "ADDCON2", + "LCON", + "MOVCON2", + "MOVCON3", + "VCON", + "FCON", + "VCONADDR", + "AACON", + "AACON2", + "LACON", + "AECON", + "SBRA", + "LBRA", + "ZAUTO", + "NSAUTO_8", + "NSAUTO_4", + "NSAUTO", + "NPAUTO", + "NAUTO4K", + "PSAUTO_8", + "PSAUTO_4", + "PSAUTO", + "PPAUTO", + "UAUTO4K_8", + "UAUTO4K_4", + "UAUTO4K_2", + "UAUTO4K", + "UAUTO8K_8", + "UAUTO8K_4", + "UAUTO8K", + "UAUTO16K_8", + "UAUTO16K", + "UAUTO32K", + "LAUTO", + "SEXT1", + "SEXT2", + "SEXT4", + "SEXT8", + "SEXT16", + "LEXT", + "ZOREG", + "NSOREG_8", + "NSOREG_4", + "NSOREG", + "NPOREG", + "NOREG4K", + "PSOREG_8", + "PSOREG_4", + "PSOREG", + "PPOREG", + "UOREG4K_8", + "UOREG4K_4", + "UOREG4K_2", + "UOREG4K", + "UOREG8K_8", + "UOREG8K_4", + "UOREG8K", + "UOREG16K_8", + "UOREG16K", + "UOREG32K", + "LOREG", + "ADDR", + "GOTADDR", + "TLS_LE", + "TLS_IE", + "ROFF", + "GOK", + "TEXTSIZE", + "NCLASS", +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go new file mode 100644 index 0000000..2bbb64b --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/asm7.go @@ -0,0 +1,7140 @@ +// cmd/7l/asm.c, cmd/7l/asmout.c, cmd/7l/optab.c, cmd/7l/span.c, cmd/ld/sub.c, cmd/ld/mod.c, from Vita Nuova. +// https://code.google.com/p/ken-cc/source/browse/ +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package arm64 + +import ( + "github.com/twitchyliquid64/golang-asm/obj" + "github.com/twitchyliquid64/golang-asm/objabi" + "fmt" + "log" + "math" + "sort" +) + +// ctxt7 holds state while assembling a single function. +// Each function gets a fresh ctxt7. +// This allows for multiple functions to be safely concurrently assembled. +type ctxt7 struct { + ctxt *obj.Link + newprog obj.ProgAlloc + cursym *obj.LSym + blitrl *obj.Prog + elitrl *obj.Prog + autosize int32 + extrasize int32 + instoffset int64 + pc int64 + pool struct { + start uint32 + size uint32 + } +} + +const ( + funcAlign = 16 +) + +const ( + REGFROM = 1 +) + +type Optab struct { + as obj.As + a1 uint8 + a2 uint8 + a3 uint8 + a4 uint8 + type_ int8 + size int8 + param int16 + flag int8 + scond uint16 +} + +func IsAtomicInstruction(as obj.As) bool { + _, ok := atomicInstructions[as] + return ok +} + +// known field values of an instruction. +var atomicInstructions = map[obj.As]uint32{ + ALDADDAD: 3<<30 | 0x1c5<<21 | 0x00<<10, + ALDADDAW: 2<<30 | 0x1c5<<21 | 0x00<<10, + ALDADDAH: 1<<30 | 0x1c5<<21 | 0x00<<10, + ALDADDAB: 0<<30 | 0x1c5<<21 | 0x00<<10, + ALDADDALD: 3<<30 | 0x1c7<<21 | 0x00<<10, + ALDADDALW: 2<<30 | 0x1c7<<21 | 0x00<<10, + ALDADDALH: 1<<30 | 0x1c7<<21 | 0x00<<10, + ALDADDALB: 0<<30 | 0x1c7<<21 | 0x00<<10, + ALDADDD: 3<<30 | 0x1c1<<21 | 0x00<<10, + ALDADDW: 2<<30 | 0x1c1<<21 | 0x00<<10, + ALDADDH: 1<<30 | 0x1c1<<21 | 0x00<<10, + ALDADDB: 0<<30 | 0x1c1<<21 | 0x00<<10, + ALDADDLD: 3<<30 | 0x1c3<<21 | 0x00<<10, + ALDADDLW: 2<<30 | 0x1c3<<21 | 0x00<<10, + ALDADDLH: 1<<30 | 0x1c3<<21 | 0x00<<10, + ALDADDLB: 0<<30 | 0x1c3<<21 | 0x00<<10, + ALDANDAD: 3<<30 | 0x1c5<<21 | 0x04<<10, + ALDANDAW: 2<<30 | 0x1c5<<21 | 0x04<<10, + ALDANDAH: 1<<30 | 0x1c5<<21 | 0x04<<10, + ALDANDAB: 0<<30 | 0x1c5<<21 | 0x04<<10, + ALDANDALD: 3<<30 | 0x1c7<<21 | 0x04<<10, + ALDANDALW: 2<<30 | 0x1c7<<21 | 0x04<<10, + ALDANDALH: 1<<30 | 0x1c7<<21 | 0x04<<10, + ALDANDALB: 0<<30 | 0x1c7<<21 | 0x04<<10, + ALDANDD: 3<<30 | 0x1c1<<21 | 0x04<<10, + ALDANDW: 2<<30 | 0x1c1<<21 | 0x04<<10, + ALDANDH: 1<<30 | 0x1c1<<21 | 0x04<<10, + ALDANDB: 0<<30 | 0x1c1<<21 | 0x04<<10, + ALDANDLD: 3<<30 | 0x1c3<<21 | 0x04<<10, + ALDANDLW: 2<<30 | 0x1c3<<21 | 0x04<<10, + ALDANDLH: 1<<30 | 0x1c3<<21 | 0x04<<10, + ALDANDLB: 0<<30 | 0x1c3<<21 | 0x04<<10, + ALDEORAD: 3<<30 | 0x1c5<<21 | 0x08<<10, + ALDEORAW: 2<<30 | 0x1c5<<21 | 0x08<<10, + ALDEORAH: 1<<30 | 0x1c5<<21 | 0x08<<10, + ALDEORAB: 0<<30 | 0x1c5<<21 | 0x08<<10, + ALDEORALD: 3<<30 | 0x1c7<<21 | 0x08<<10, + ALDEORALW: 2<<30 | 0x1c7<<21 | 0x08<<10, + ALDEORALH: 1<<30 | 0x1c7<<21 | 0x08<<10, + ALDEORALB: 0<<30 | 0x1c7<<21 | 0x08<<10, + ALDEORD: 3<<30 | 0x1c1<<21 | 0x08<<10, + ALDEORW: 2<<30 | 0x1c1<<21 | 0x08<<10, + ALDEORH: 1<<30 | 0x1c1<<21 | 0x08<<10, + ALDEORB: 0<<30 | 0x1c1<<21 | 0x08<<10, + ALDEORLD: 3<<30 | 0x1c3<<21 | 0x08<<10, + ALDEORLW: 2<<30 | 0x1c3<<21 | 0x08<<10, + ALDEORLH: 1<<30 | 0x1c3<<21 | 0x08<<10, + ALDEORLB: 0<<30 | 0x1c3<<21 | 0x08<<10, + ALDORAD: 3<<30 | 0x1c5<<21 | 0x0c<<10, + ALDORAW: 2<<30 | 0x1c5<<21 | 0x0c<<10, + ALDORAH: 1<<30 | 0x1c5<<21 | 0x0c<<10, + ALDORAB: 0<<30 | 0x1c5<<21 | 0x0c<<10, + ALDORALD: 3<<30 | 0x1c7<<21 | 0x0c<<10, + ALDORALW: 2<<30 | 0x1c7<<21 | 0x0c<<10, + ALDORALH: 1<<30 | 0x1c7<<21 | 0x0c<<10, + ALDORALB: 0<<30 | 0x1c7<<21 | 0x0c<<10, + ALDORD: 3<<30 | 0x1c1<<21 | 0x0c<<10, + ALDORW: 2<<30 | 0x1c1<<21 | 0x0c<<10, + ALDORH: 1<<30 | 0x1c1<<21 | 0x0c<<10, + ALDORB: 0<<30 | 0x1c1<<21 | 0x0c<<10, + ALDORLD: 3<<30 | 0x1c3<<21 | 0x0c<<10, + ALDORLW: 2<<30 | 0x1c3<<21 | 0x0c<<10, + ALDORLH: 1<<30 | 0x1c3<<21 | 0x0c<<10, + ALDORLB: 0<<30 | 0x1c3<<21 | 0x0c<<10, + ASWPAD: 3<<30 | 0x1c5<<21 | 0x20<<10, + ASWPAW: 2<<30 | 0x1c5<<21 | 0x20<<10, + ASWPAH: 1<<30 | 0x1c5<<21 | 0x20<<10, + ASWPAB: 0<<30 | 0x1c5<<21 | 0x20<<10, + ASWPALD: 3<<30 | 0x1c7<<21 | 0x20<<10, + ASWPALW: 2<<30 | 0x1c7<<21 | 0x20<<10, + ASWPALH: 1<<30 | 0x1c7<<21 | 0x20<<10, + ASWPALB: 0<<30 | 0x1c7<<21 | 0x20<<10, + ASWPD: 3<<30 | 0x1c1<<21 | 0x20<<10, + ASWPW: 2<<30 | 0x1c1<<21 | 0x20<<10, + ASWPH: 1<<30 | 0x1c1<<21 | 0x20<<10, + ASWPB: 0<<30 | 0x1c1<<21 | 0x20<<10, + ASWPLD: 3<<30 | 0x1c3<<21 | 0x20<<10, + ASWPLW: 2<<30 | 0x1c3<<21 | 0x20<<10, + ASWPLH: 1<<30 | 0x1c3<<21 | 0x20<<10, + ASWPLB: 0<<30 | 0x1c3<<21 | 0x20<<10, +} + +var oprange [ALAST & obj.AMask][]Optab + +var xcmp [C_NCLASS][C_NCLASS]bool + +const ( + S32 = 0 << 31 + S64 = 1 << 31 + Sbit = 1 << 29 + LSL0_32 = 2 << 13 + LSL0_64 = 3 << 13 +) + +func OPDP2(x uint32) uint32 { + return 0<<30 | 0<<29 | 0xd6<<21 | x<<10 +} + +func OPDP3(sf uint32, op54 uint32, op31 uint32, o0 uint32) uint32 { + return sf<<31 | op54<<29 | 0x1B<<24 | op31<<21 | o0<<15 +} + +func OPBcc(x uint32) uint32 { + return 0x2A<<25 | 0<<24 | 0<<4 | x&15 +} + +func OPBLR(x uint32) uint32 { + /* x=0, JMP; 1, CALL; 2, RET */ + return 0x6B<<25 | 0<<23 | x<<21 | 0x1F<<16 | 0<<10 +} + +func SYSOP(l uint32, op0 uint32, op1 uint32, crn uint32, crm uint32, op2 uint32, rt uint32) uint32 { + return 0x354<<22 | l<<21 | op0<<19 | op1<<16 | crn&15<<12 | crm&15<<8 | op2<<5 | rt +} + +func SYSHINT(x uint32) uint32 { + return SYSOP(0, 0, 3, 2, 0, x, 0x1F) +} + +func LDSTR12U(sz uint32, v uint32, opc uint32) uint32 { + return sz<<30 | 7<<27 | v<<26 | 1<<24 | opc<<22 +} + +func LDSTR9S(sz uint32, v uint32, opc uint32) uint32 { + return sz<<30 | 7<<27 | v<<26 | 0<<24 | opc<<22 +} + +func LD2STR(o uint32) uint32 { + return o &^ (3 << 22) +} + +func LDSTX(sz uint32, o2 uint32, l uint32, o1 uint32, o0 uint32) uint32 { + return sz<<30 | 0x8<<24 | o2<<23 | l<<22 | o1<<21 | o0<<15 +} + +func FPCMP(m uint32, s uint32, type_ uint32, op uint32, op2 uint32) uint32 { + return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | op<<14 | 8<<10 | op2 +} + +func FPCCMP(m uint32, s uint32, type_ uint32, op uint32) uint32 { + return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | 1<<10 | op<<4 +} + +func FPOP1S(m uint32, s uint32, type_ uint32, op uint32) uint32 { + return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | op<<15 | 0x10<<10 +} + +func FPOP2S(m uint32, s uint32, type_ uint32, op uint32) uint32 { + return m<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | op<<12 | 2<<10 +} + +func FPOP3S(m uint32, s uint32, type_ uint32, op uint32, op2 uint32) uint32 { + return m<<31 | s<<29 | 0x1F<<24 | type_<<22 | op<<21 | op2<<15 +} + +func FPCVTI(sf uint32, s uint32, type_ uint32, rmode uint32, op uint32) uint32 { + return sf<<31 | s<<29 | 0x1E<<24 | type_<<22 | 1<<21 | rmode<<19 | op<<16 | 0<<10 +} + +func ADR(p uint32, o uint32, rt uint32) uint32 { + return p<<31 | (o&3)<<29 | 0x10<<24 | ((o>>2)&0x7FFFF)<<5 | rt&31 +} + +func OPBIT(x uint32) uint32 { + return 1<<30 | 0<<29 | 0xD6<<21 | 0<<16 | x<<10 +} + +func MOVCONST(d int64, s int, rt int) uint32 { + return uint32(((d>>uint(s*16))&0xFFFF)<<5) | uint32(s)&3<<21 | uint32(rt&31) +} + +const ( + // Optab.flag + LFROM = 1 << 0 // p.From uses constant pool + LTO = 1 << 1 // p.To uses constant pool + NOTUSETMP = 1 << 2 // p expands to multiple instructions, but does NOT use REGTMP +) + +var optab = []Optab{ + /* struct Optab: + OPCODE, from, prog->reg, from3, to, type,size,param,flag,scond */ + {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0}, + + /* arithmetic operations */ + {AADD, C_REG, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AADD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AADC, C_REG, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AADC, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {ANEG, C_REG, C_NONE, C_NONE, C_REG, 25, 4, 0, 0, 0}, + {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 25, 4, 0, 0, 0}, + {ANGC, C_REG, C_NONE, C_NONE, C_REG, 17, 4, 0, 0, 0}, + {ACMP, C_REG, C_REG, C_NONE, C_NONE, 1, 4, 0, 0, 0}, + {AADD, C_ADDCON, C_RSP, C_NONE, C_RSP, 2, 4, 0, 0, 0}, + {AADD, C_ADDCON, C_NONE, C_NONE, C_RSP, 2, 4, 0, 0, 0}, + {ACMP, C_ADDCON, C_RSP, C_NONE, C_NONE, 2, 4, 0, 0, 0}, + {AADD, C_MOVCON, C_RSP, C_NONE, C_RSP, 62, 8, 0, 0, 0}, + {AADD, C_MOVCON, C_NONE, C_NONE, C_RSP, 62, 8, 0, 0, 0}, + {ACMP, C_MOVCON, C_RSP, C_NONE, C_NONE, 62, 8, 0, 0, 0}, + {AADD, C_BITCON, C_RSP, C_NONE, C_RSP, 62, 8, 0, 0, 0}, + {AADD, C_BITCON, C_NONE, C_NONE, C_RSP, 62, 8, 0, 0, 0}, + {ACMP, C_BITCON, C_RSP, C_NONE, C_NONE, 62, 8, 0, 0, 0}, + {AADD, C_ADDCON2, C_RSP, C_NONE, C_RSP, 48, 8, 0, NOTUSETMP, 0}, + {AADD, C_ADDCON2, C_NONE, C_NONE, C_RSP, 48, 8, 0, NOTUSETMP, 0}, + {AADD, C_MOVCON2, C_RSP, C_NONE, C_RSP, 13, 12, 0, 0, 0}, + {AADD, C_MOVCON2, C_NONE, C_NONE, C_RSP, 13, 12, 0, 0, 0}, + {AADD, C_MOVCON3, C_RSP, C_NONE, C_RSP, 13, 16, 0, 0, 0}, + {AADD, C_MOVCON3, C_NONE, C_NONE, C_RSP, 13, 16, 0, 0, 0}, + {AADD, C_VCON, C_RSP, C_NONE, C_RSP, 13, 20, 0, 0, 0}, + {AADD, C_VCON, C_NONE, C_NONE, C_RSP, 13, 20, 0, 0, 0}, + {ACMP, C_MOVCON2, C_REG, C_NONE, C_NONE, 13, 12, 0, 0, 0}, + {ACMP, C_MOVCON3, C_REG, C_NONE, C_NONE, 13, 16, 0, 0, 0}, + {ACMP, C_VCON, C_REG, C_NONE, C_NONE, 13, 20, 0, 0, 0}, + {AADD, C_SHIFT, C_REG, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {AADD, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {AMVN, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {ACMP, C_SHIFT, C_REG, C_NONE, C_NONE, 3, 4, 0, 0, 0}, + {ANEG, C_SHIFT, C_NONE, C_NONE, C_REG, 26, 4, 0, 0, 0}, + {AADD, C_REG, C_RSP, C_NONE, C_RSP, 27, 4, 0, 0, 0}, + {AADD, C_REG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0}, + {ACMP, C_REG, C_RSP, C_NONE, C_NONE, 27, 4, 0, 0, 0}, + {AADD, C_EXTREG, C_RSP, C_NONE, C_RSP, 27, 4, 0, 0, 0}, + {AADD, C_EXTREG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0}, + {AMVN, C_EXTREG, C_NONE, C_NONE, C_RSP, 27, 4, 0, 0, 0}, + {ACMP, C_EXTREG, C_RSP, C_NONE, C_NONE, 27, 4, 0, 0, 0}, + {AADD, C_REG, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AADD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AMUL, C_REG, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0}, + {AMUL, C_REG, C_NONE, C_NONE, C_REG, 15, 4, 0, 0, 0}, + {AMADD, C_REG, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0}, + {AREM, C_REG, C_REG, C_NONE, C_REG, 16, 8, 0, 0, 0}, + {AREM, C_REG, C_NONE, C_NONE, C_REG, 16, 8, 0, 0, 0}, + {ASDIV, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {ASDIV, C_REG, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, + + {AFADDS, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, + {AFADDS, C_FREG, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0}, + {AFMSUBD, C_FREG, C_FREG, C_FREG, C_FREG, 15, 4, 0, 0, 0}, + {AFCMPS, C_FREG, C_FREG, C_NONE, C_NONE, 56, 4, 0, 0, 0}, + {AFCMPS, C_FCON, C_FREG, C_NONE, C_NONE, 56, 4, 0, 0, 0}, + {AVADDP, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0}, + {AVADD, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0}, + {AVADD, C_VREG, C_VREG, C_NONE, C_VREG, 89, 4, 0, 0, 0}, + {AVADD, C_VREG, C_NONE, C_NONE, C_VREG, 89, 4, 0, 0, 0}, + {AVADDV, C_ARNG, C_NONE, C_NONE, C_VREG, 85, 4, 0, 0, 0}, + + /* logical operations */ + {AAND, C_REG, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AAND, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AANDS, C_REG, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {AANDS, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0, 0, 0}, + {ATST, C_REG, C_REG, C_NONE, C_NONE, 1, 4, 0, 0, 0}, + {AAND, C_MBCON, C_REG, C_NONE, C_RSP, 53, 4, 0, 0, 0}, + {AAND, C_MBCON, C_NONE, C_NONE, C_REG, 53, 4, 0, 0, 0}, + {AANDS, C_MBCON, C_REG, C_NONE, C_REG, 53, 4, 0, 0, 0}, + {AANDS, C_MBCON, C_NONE, C_NONE, C_REG, 53, 4, 0, 0, 0}, + {ATST, C_MBCON, C_REG, C_NONE, C_NONE, 53, 4, 0, 0, 0}, + {AAND, C_BITCON, C_REG, C_NONE, C_RSP, 53, 4, 0, 0, 0}, + {AAND, C_BITCON, C_NONE, C_NONE, C_REG, 53, 4, 0, 0, 0}, + {AANDS, C_BITCON, C_REG, C_NONE, C_REG, 53, 4, 0, 0, 0}, + {AANDS, C_BITCON, C_NONE, C_NONE, C_REG, 53, 4, 0, 0, 0}, + {ATST, C_BITCON, C_REG, C_NONE, C_NONE, 53, 4, 0, 0, 0}, + {AAND, C_MOVCON, C_REG, C_NONE, C_REG, 62, 8, 0, 0, 0}, + {AAND, C_MOVCON, C_NONE, C_NONE, C_REG, 62, 8, 0, 0, 0}, + {AANDS, C_MOVCON, C_REG, C_NONE, C_REG, 62, 8, 0, 0, 0}, + {AANDS, C_MOVCON, C_NONE, C_NONE, C_REG, 62, 8, 0, 0, 0}, + {ATST, C_MOVCON, C_REG, C_NONE, C_NONE, 62, 8, 0, 0, 0}, + {AAND, C_MOVCON2, C_REG, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AAND, C_MOVCON2, C_NONE, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AAND, C_MOVCON3, C_REG, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AAND, C_MOVCON3, C_NONE, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AAND, C_VCON, C_REG, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {AAND, C_VCON, C_NONE, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {AANDS, C_MOVCON2, C_REG, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AANDS, C_MOVCON2, C_NONE, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AANDS, C_MOVCON3, C_REG, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AANDS, C_MOVCON3, C_NONE, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AANDS, C_VCON, C_REG, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {AANDS, C_VCON, C_NONE, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {ATST, C_MOVCON2, C_REG, C_NONE, C_NONE, 28, 12, 0, 0, 0}, + {ATST, C_MOVCON3, C_REG, C_NONE, C_NONE, 28, 16, 0, 0, 0}, + {ATST, C_VCON, C_REG, C_NONE, C_NONE, 28, 20, 0, 0, 0}, + {AAND, C_SHIFT, C_REG, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {AAND, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {AANDS, C_SHIFT, C_REG, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {AANDS, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, + {ATST, C_SHIFT, C_REG, C_NONE, C_NONE, 3, 4, 0, 0, 0}, + {AMOVD, C_RSP, C_NONE, C_NONE, C_RSP, 24, 4, 0, 0, 0}, + {AMVN, C_REG, C_NONE, C_NONE, C_REG, 24, 4, 0, 0, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 45, 4, 0, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_REG, 45, 4, 0, 0, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_REG, 45, 4, 0, 0, 0}, /* also MOVHU */ + {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 45, 4, 0, 0, 0}, /* also MOVWU */ + /* TODO: MVN C_SHIFT */ + + /* MOVs that become MOVK/MOVN/MOVZ/ADD/SUB/OR */ + {AMOVW, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, + {AMOVD, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, + {AMOVW, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, + {AMOVD, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, + {AMOVW, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, NOTUSETMP, 0}, + {AMOVD, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, NOTUSETMP, 0}, + {AMOVD, C_MOVCON3, C_NONE, C_NONE, C_REG, 12, 12, 0, NOTUSETMP, 0}, + {AMOVD, C_VCON, C_NONE, C_NONE, C_REG, 12, 16, 0, NOTUSETMP, 0}, + + {AMOVK, C_VCON, C_NONE, C_NONE, C_REG, 33, 4, 0, 0, 0}, + {AMOVD, C_AACON, C_NONE, C_NONE, C_RSP, 4, 4, REGFROM, 0, 0}, + {AMOVD, C_AACON2, C_NONE, C_NONE, C_RSP, 4, 8, REGFROM, 0, 0}, + + /* load long effective stack address (load int32 offset and add) */ + {AMOVD, C_LACON, C_NONE, C_NONE, C_RSP, 34, 8, REGSP, LFROM, 0}, + + // Move a large constant to a Vn. + {AFMOVQ, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + {AFMOVD, C_VCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + {AFMOVS, C_LCON, C_NONE, C_NONE, C_VREG, 101, 4, 0, LFROM, 0}, + + /* jump operations */ + {AB, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, + {ABL, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, + {AB, C_NONE, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0}, + {ABL, C_NONE, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0}, + {ABL, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0}, + {ABL, C_NONE, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0}, + {obj.ARET, C_NONE, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0}, + {obj.ARET, C_NONE, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0}, + {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 7, 4, 0, 0, 0}, + {ACBZ, C_REG, C_NONE, C_NONE, C_SBRA, 39, 4, 0, 0, 0}, + {ATBZ, C_VCON, C_REG, C_NONE, C_SBRA, 40, 4, 0, 0, 0}, + {AERET, C_NONE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0}, + + // get a PC-relative address + {AADRP, C_SBRA, C_NONE, C_NONE, C_REG, 60, 4, 0, 0, 0}, + {AADR, C_SBRA, C_NONE, C_NONE, C_REG, 61, 4, 0, 0, 0}, + + {ACLREX, C_NONE, C_NONE, C_NONE, C_VCON, 38, 4, 0, 0, 0}, + {ACLREX, C_NONE, C_NONE, C_NONE, C_NONE, 38, 4, 0, 0, 0}, + {ABFM, C_VCON, C_REG, C_VCON, C_REG, 42, 4, 0, 0, 0}, + {ABFI, C_VCON, C_REG, C_VCON, C_REG, 43, 4, 0, 0, 0}, + {AEXTR, C_VCON, C_REG, C_REG, C_REG, 44, 4, 0, 0, 0}, + {ASXTB, C_REG, C_NONE, C_NONE, C_REG, 45, 4, 0, 0, 0}, + {ACLS, C_REG, C_NONE, C_NONE, C_REG, 46, 4, 0, 0, 0}, + {ALSL, C_VCON, C_REG, C_NONE, C_REG, 8, 4, 0, 0, 0}, + {ALSL, C_VCON, C_NONE, C_NONE, C_REG, 8, 4, 0, 0, 0}, + {ALSL, C_REG, C_NONE, C_NONE, C_REG, 9, 4, 0, 0, 0}, + {ALSL, C_REG, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, + {ASVC, C_VCON, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0}, + {ASVC, C_NONE, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0}, + {ADWORD, C_NONE, C_NONE, C_NONE, C_VCON, 11, 8, 0, NOTUSETMP, 0}, + {ADWORD, C_NONE, C_NONE, C_NONE, C_LEXT, 11, 8, 0, NOTUSETMP, 0}, + {ADWORD, C_NONE, C_NONE, C_NONE, C_ADDR, 11, 8, 0, NOTUSETMP, 0}, + {ADWORD, C_NONE, C_NONE, C_NONE, C_LACON, 11, 8, 0, NOTUSETMP, 0}, + {AWORD, C_NONE, C_NONE, C_NONE, C_LCON, 14, 4, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_NONE, C_LEXT, 14, 4, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_NONE, C_ADDR, 14, 4, 0, 0, 0}, + {AMOVW, C_VCONADDR, C_NONE, C_NONE, C_REG, 68, 8, 0, NOTUSETMP, 0}, + {AMOVD, C_VCONADDR, C_NONE, C_NONE, C_REG, 68, 8, 0, NOTUSETMP, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, + {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 65, 12, 0, 0, 0}, + {AMOVBU, C_ADDR, C_NONE, C_NONE, C_REG, 65, 12, 0, 0, 0}, + {AMOVH, C_ADDR, C_NONE, C_NONE, C_REG, 65, 12, 0, 0, 0}, + {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 65, 12, 0, 0, 0}, + {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 65, 12, 0, 0, 0}, + {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 71, 8, 0, 0, 0}, + {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 69, 4, 0, 0, 0}, + {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 70, 8, 0, 0, 0}, + + {AFMOVS, C_FREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, + {AFMOVS, C_ADDR, C_NONE, C_NONE, C_FREG, 65, 12, 0, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, + {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 65, 12, 0, 0, 0}, + {AFMOVS, C_FCON, C_NONE, C_NONE, C_FREG, 55, 4, 0, 0, 0}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, + {AFMOVD, C_FCON, C_NONE, C_NONE, C_FREG, 55, 4, 0, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, + {AFMOVS, C_REG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_REG, 29, 4, 0, 0, 0}, + {AFMOVD, C_REG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_REG, 29, 4, 0, 0, 0}, + {AFCVTZSD, C_FREG, C_NONE, C_NONE, C_REG, 29, 4, 0, 0, 0}, + {ASCVTFD, C_REG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0}, + {AFCVTSD, C_FREG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0}, + {AVMOV, C_ELEM, C_NONE, C_NONE, C_REG, 73, 4, 0, 0, 0}, + {AVMOV, C_ELEM, C_NONE, C_NONE, C_ELEM, 92, 4, 0, 0, 0}, + {AVMOV, C_ELEM, C_NONE, C_NONE, C_VREG, 80, 4, 0, 0, 0}, + {AVMOV, C_REG, C_NONE, C_NONE, C_ARNG, 82, 4, 0, 0, 0}, + {AVMOV, C_REG, C_NONE, C_NONE, C_ELEM, 78, 4, 0, 0, 0}, + {AVMOV, C_ARNG, C_NONE, C_NONE, C_ARNG, 83, 4, 0, 0, 0}, + {AVDUP, C_ELEM, C_NONE, C_NONE, C_ARNG, 79, 4, 0, 0, 0}, + {AVMOVI, C_ADDCON, C_NONE, C_NONE, C_ARNG, 86, 4, 0, 0, 0}, + {AVFMLA, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0}, + {AVEXT, C_VCON, C_ARNG, C_ARNG, C_ARNG, 94, 4, 0, 0, 0}, + {AVTBL, C_ARNG, C_NONE, C_LIST, C_ARNG, 100, 4, 0, 0, 0}, + {AVUSHR, C_VCON, C_ARNG, C_NONE, C_ARNG, 95, 4, 0, 0, 0}, + {AVZIP1, C_ARNG, C_ARNG, C_NONE, C_ARNG, 72, 4, 0, 0, 0}, + {AVUSHLL, C_VCON, C_ARNG, C_NONE, C_ARNG, 102, 4, 0, 0, 0}, + {AVUXTL, C_ARNG, C_NONE, C_NONE, C_ARNG, 102, 4, 0, 0, 0}, + + /* conditional operations */ + {ACSEL, C_COND, C_REG, C_REG, C_REG, 18, 4, 0, 0, 0}, + {ACINC, C_COND, C_REG, C_NONE, C_REG, 18, 4, 0, 0, 0}, + {ACSET, C_COND, C_NONE, C_NONE, C_REG, 18, 4, 0, 0, 0}, + {AFCSELD, C_COND, C_FREG, C_FREG, C_FREG, 18, 4, 0, 0, 0}, + {ACCMN, C_COND, C_REG, C_REG, C_VCON, 19, 4, 0, 0, 0}, + {ACCMN, C_COND, C_REG, C_VCON, C_VCON, 19, 4, 0, 0, 0}, + {AFCCMPS, C_COND, C_FREG, C_FREG, C_VCON, 57, 4, 0, 0, 0}, + + /* scaled 12-bit unsigned displacement store */ + {AMOVB, C_REG, C_NONE, C_NONE, C_UAUTO4K, 20, 4, REGSP, 0, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_UOREG4K, 20, 4, 0, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_UAUTO4K, 20, 4, REGSP, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_UOREG4K, 20, 4, 0, 0, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_UAUTO8K, 20, 4, REGSP, 0, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_UOREG8K, 20, 4, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_UAUTO16K, 20, 4, REGSP, 0, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_UAUTO32K, 20, 4, REGSP, 0, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_UOREG32K, 20, 4, 0, 0, 0}, + + {AFMOVS, C_FREG, C_NONE, C_NONE, C_UAUTO16K, 20, 4, REGSP, 0, 0}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_UAUTO32K, 20, 4, REGSP, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_UOREG32K, 20, 4, 0, 0, 0}, + + /* unscaled 9-bit signed displacement store */ + {AMOVB, C_REG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, + + {AFMOVS, C_FREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, + + /* scaled 12-bit unsigned displacement load */ + {AMOVB, C_UAUTO4K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVB, C_UOREG4K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVBU, C_UAUTO4K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVBU, C_UOREG4K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVH, C_UAUTO8K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVH, C_UOREG8K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVW, C_UAUTO16K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVW, C_UOREG16K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVD, C_UAUTO32K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVD, C_UOREG32K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + + {AFMOVS, C_UAUTO16K, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0}, + {AFMOVS, C_UOREG16K, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0}, + {AFMOVD, C_UAUTO32K, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0}, + {AFMOVD, C_UOREG32K, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0}, + + /* unscaled 9-bit signed displacement load */ + {AMOVB, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVB, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVBU, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVBU, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVH, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVH, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVW, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVW, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + {AMOVD, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVD, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, + + {AFMOVS, C_NSAUTO, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0}, + {AFMOVS, C_NSOREG, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0}, + {AFMOVD, C_NSAUTO, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0}, + {AFMOVD, C_NSOREG, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0}, + + /* long displacement store */ + {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, + + {AFMOVS, C_FREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, + + /* long displacement load */ + {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0}, + {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 31, 8, 0, LFROM, 0}, + {AMOVBU, C_LAUTO, C_NONE, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0}, + {AMOVBU, C_LOREG, C_NONE, C_NONE, C_REG, 31, 8, 0, LFROM, 0}, + {AMOVH, C_LAUTO, C_NONE, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0}, + {AMOVH, C_LOREG, C_NONE, C_NONE, C_REG, 31, 8, 0, LFROM, 0}, + {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0}, + {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 31, 8, 0, LFROM, 0}, + {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0}, + {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 31, 8, 0, LFROM, 0}, + + {AFMOVS, C_LAUTO, C_NONE, C_NONE, C_FREG, 31, 8, REGSP, LFROM, 0}, + {AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, 31, 8, 0, LFROM, 0}, + {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 31, 8, REGSP, LFROM, 0}, + {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 31, 8, 0, LFROM, 0}, + + /* pre/post-indexed load (unscaled, signed 9-bit offset) */ + {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST}, + {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST}, + {AMOVH, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST}, + {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST}, + {AMOVBU, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST}, + {AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST}, + {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST}, + + {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, + {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, + {AMOVH, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, + {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, + {AMOVBU, C_LOREG, C_NONE, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, + {AFMOVS, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE}, + {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE}, + + /* pre/post-indexed store (unscaled, signed 9-bit offset) */ + {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + {AMOVH, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + + {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + {AMOVH, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + {AMOVBU, C_REG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + + /* load with shifted or extended register offset */ + {AMOVD, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, + {AMOVW, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, + {AMOVH, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, + {AMOVB, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, + {AMOVBU, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, + {AFMOVS, C_ROFF, C_NONE, C_NONE, C_FREG, 98, 4, 0, 0, 0}, + {AFMOVD, C_ROFF, C_NONE, C_NONE, C_FREG, 98, 4, 0, 0, 0}, + + /* store with extended register offset */ + {AMOVD, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + {AMOVH, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + {AMOVB, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + + /* pre/post-indexed/signed-offset load/store register pair + (unscaled, signed 10-bit quad-aligned and long offset) */ + {ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {ALDP, C_NPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {ALDP, C_PPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {ALDP, C_PPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {ALDP, C_PPAUTO, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, + {ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, + {ALDP, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, + {ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, + {ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, + {ALDP, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, + {ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0}, + {ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPRE}, + {ALDP, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPOST}, + {ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {ALDP, C_NPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {ALDP, C_PPOREG, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, + {ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, + {ALDP, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, + {ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, + {ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, + {ALDP, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, + {ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0}, + {ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPRE}, + {ALDP, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPOST}, + {ALDP, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0}, + + {ASTP, C_PAIR, C_NONE, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0}, + + // differ from LDP/STP for C_NSAUTO_4/C_PSAUTO_4/C_NSOREG_4/C_PSOREG_4 + {ALDPW, C_NSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {ALDPW, C_NSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {ALDPW, C_NSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {ALDPW, C_PSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {ALDPW, C_PSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {ALDPW, C_PSAUTO_4, C_NONE, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, + {ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, + {ALDPW, C_UAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, + {ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, + {ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, + {ALDPW, C_NAUTO4K, C_NONE, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, + {ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0}, + {ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPRE}, + {ALDPW, C_LAUTO, C_NONE, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPOST}, + {ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {ALDPW, C_NSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {ALDPW, C_PSOREG_4, C_NONE, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, + {ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, + {ALDPW, C_UOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, + {ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, + {ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, + {ALDPW, C_NOREG4K, C_NONE, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, + {ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0}, + {ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPRE}, + {ALDPW, C_LOREG, C_NONE, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPOST}, + {ALDPW, C_ADDR, C_NONE, C_NONE, C_PAIR, 88, 12, 0, 0, 0}, + + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSAUTO_4, 67, 4, REGSP, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSAUTO_4, 67, 4, REGSP, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSAUTO_4, 67, 4, REGSP, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSAUTO_4, 67, 4, REGSP, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSAUTO_4, 67, 4, REGSP, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSAUTO_4, 67, 4, REGSP, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NAUTO4K, 76, 12, REGSP, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NSOREG_4, 67, 4, 0, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_PSOREG_4, 67, 4, 0, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_NOREG4K, 76, 8, 0, 0, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, 0}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPRE}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPOST}, + {ASTPW, C_PAIR, C_NONE, C_NONE, C_ADDR, 87, 12, 0, 0, 0}, + + {ASWPD, C_REG, C_NONE, C_NONE, C_ZOREG, 47, 4, 0, 0, 0}, // RegTo2=C_REG + {ASWPD, C_REG, C_NONE, C_NONE, C_ZAUTO, 47, 4, REGSP, 0, 0}, // RegTo2=C_REG + {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 58, 4, 0, 0, 0}, + {ALDXR, C_ZOREG, C_NONE, C_NONE, C_REG, 58, 4, 0, 0, 0}, + {ALDAXR, C_ZOREG, C_NONE, C_NONE, C_REG, 58, 4, 0, 0, 0}, + {ALDXP, C_ZOREG, C_NONE, C_NONE, C_PAIR, 58, 4, 0, 0, 0}, + {ASTLR, C_REG, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0}, // RegTo2=C_NONE + {ASTXR, C_REG, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0}, // RegTo2=C_REG + {ASTLXR, C_REG, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0}, // RegTo2=C_REG + {ASTXP, C_PAIR, C_NONE, C_NONE, C_ZOREG, 59, 4, 0, 0, 0}, + + /* VLD[1-4]/VST[1-4] */ + {AVLD1, C_ZOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, 0}, + {AVLD1, C_LOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST}, + {AVLD1, C_ROFF, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST}, + {AVLD1R, C_ZOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, 0}, + {AVLD1R, C_LOREG, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST}, + {AVLD1R, C_ROFF, C_NONE, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST}, + {AVLD1, C_LOREG, C_NONE, C_NONE, C_ELEM, 97, 4, 0, 0, C_XPOST}, + {AVLD1, C_ROFF, C_NONE, C_NONE, C_ELEM, 97, 4, 0, 0, C_XPOST}, + {AVLD1, C_LOREG, C_NONE, C_NONE, C_ELEM, 97, 4, 0, 0, 0}, + {AVST1, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0}, + {AVST1, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST}, + {AVST1, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST}, + {AVST2, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0}, + {AVST2, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST}, + {AVST2, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST}, + {AVST3, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0}, + {AVST3, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST}, + {AVST3, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST}, + {AVST4, C_LIST, C_NONE, C_NONE, C_ZOREG, 84, 4, 0, 0, 0}, + {AVST4, C_LIST, C_NONE, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST}, + {AVST4, C_LIST, C_NONE, C_NONE, C_ROFF, 84, 4, 0, 0, C_XPOST}, + {AVST1, C_ELEM, C_NONE, C_NONE, C_LOREG, 96, 4, 0, 0, C_XPOST}, + {AVST1, C_ELEM, C_NONE, C_NONE, C_ROFF, 96, 4, 0, 0, C_XPOST}, + {AVST1, C_ELEM, C_NONE, C_NONE, C_LOREG, 96, 4, 0, 0, 0}, + + /* special */ + {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 35, 4, 0, 0, 0}, + {AMRS, C_SPR, C_NONE, C_NONE, C_REG, 35, 4, 0, 0, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 36, 4, 0, 0, 0}, + {AMSR, C_REG, C_NONE, C_NONE, C_SPR, 36, 4, 0, 0, 0}, + {AMOVD, C_VCON, C_NONE, C_NONE, C_SPR, 37, 4, 0, 0, 0}, + {AMSR, C_VCON, C_NONE, C_NONE, C_SPR, 37, 4, 0, 0, 0}, + {APRFM, C_UOREG32K, C_NONE, C_NONE, C_SPR, 91, 4, 0, 0, 0}, + {APRFM, C_UOREG32K, C_NONE, C_NONE, C_LCON, 91, 4, 0, 0, 0}, + {ADMB, C_VCON, C_NONE, C_NONE, C_NONE, 51, 4, 0, 0, 0}, + {AHINT, C_VCON, C_NONE, C_NONE, C_NONE, 52, 4, 0, 0, 0}, + {ASYS, C_VCON, C_NONE, C_NONE, C_NONE, 50, 4, 0, 0, 0}, + {ASYS, C_VCON, C_REG, C_NONE, C_NONE, 50, 4, 0, 0, 0}, + {ASYSL, C_VCON, C_NONE, C_NONE, C_REG, 50, 4, 0, 0, 0}, + + /* encryption instructions */ + {AAESD, C_VREG, C_NONE, C_NONE, C_VREG, 29, 4, 0, 0, 0}, // for compatibility with old code + {AAESD, C_ARNG, C_NONE, C_NONE, C_ARNG, 29, 4, 0, 0, 0}, // recommend using the new one for better readability + {ASHA1C, C_VREG, C_REG, C_NONE, C_VREG, 1, 4, 0, 0, 0}, + {ASHA1C, C_ARNG, C_VREG, C_NONE, C_VREG, 1, 4, 0, 0, 0}, + {ASHA1H, C_VREG, C_NONE, C_NONE, C_VREG, 29, 4, 0, 0, 0}, + {ASHA1SU0, C_ARNG, C_ARNG, C_NONE, C_ARNG, 1, 4, 0, 0, 0}, + {ASHA256H, C_ARNG, C_VREG, C_NONE, C_VREG, 1, 4, 0, 0, 0}, + {AVREV32, C_ARNG, C_NONE, C_NONE, C_ARNG, 83, 4, 0, 0, 0}, + {AVPMULL, C_ARNG, C_ARNG, C_NONE, C_ARNG, 93, 4, 0, 0, 0}, + + {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0}, + {obj.APCDATA, C_VCON, C_NONE, C_NONE, C_VCON, 0, 0, 0, 0, 0}, + {obj.AFUNCDATA, C_VCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0, 0, 0}, + {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, + {obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // nop variants, see #40689 + {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, + {obj.ANOP, C_VREG, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, + {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as AB/ABL + {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as AB/ABL + {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, // align code + + {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0}, +} + +/* + * valid pstate field values, and value to use in instruction + */ +var pstatefield = []struct { + reg int16 + enc uint32 +}{ + {REG_SPSel, 0<<16 | 4<<12 | 5<<5}, + {REG_DAIFSet, 3<<16 | 4<<12 | 6<<5}, + {REG_DAIFClr, 3<<16 | 4<<12 | 7<<5}, +} + +var prfopfield = []struct { + reg int16 + enc uint32 +}{ + {REG_PLDL1KEEP, 0}, + {REG_PLDL1STRM, 1}, + {REG_PLDL2KEEP, 2}, + {REG_PLDL2STRM, 3}, + {REG_PLDL3KEEP, 4}, + {REG_PLDL3STRM, 5}, + {REG_PLIL1KEEP, 8}, + {REG_PLIL1STRM, 9}, + {REG_PLIL2KEEP, 10}, + {REG_PLIL2STRM, 11}, + {REG_PLIL3KEEP, 12}, + {REG_PLIL3STRM, 13}, + {REG_PSTL1KEEP, 16}, + {REG_PSTL1STRM, 17}, + {REG_PSTL2KEEP, 18}, + {REG_PSTL2STRM, 19}, + {REG_PSTL3KEEP, 20}, + {REG_PSTL3STRM, 21}, +} + +// Used for padinng NOOP instruction +const OP_NOOP = 0xd503201f + +// align code to a certain length by padding bytes. +func pcAlignPadLength(pc int64, alignedValue int64, ctxt *obj.Link) int { + if !((alignedValue&(alignedValue-1) == 0) && 8 <= alignedValue && alignedValue <= 2048) { + ctxt.Diag("alignment value of an instruction must be a power of two and in the range [8, 2048], got %d\n", alignedValue) + } + return int(-pc & (alignedValue - 1)) +} + +func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + if ctxt.Retpoline { + ctxt.Diag("-spectre=ret not supported on arm64") + ctxt.Retpoline = false // don't keep printing + } + + p := cursym.Func.Text + if p == nil || p.Link == nil { // handle external functions and ELF section symbols + return + } + + if oprange[AAND&obj.AMask] == nil { + ctxt.Diag("arm64 ops not initialized, call arm64.buildop first") + } + + c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset & 0xffffffff), extrasize: int32(p.To.Offset >> 32)} + p.To.Offset &= 0xffffffff // extrasize is no longer needed + + bflag := 1 + pc := int64(0) + p.Pc = pc + var m int + var o *Optab + for p = p.Link; p != nil; p = p.Link { + if p.As == ADWORD && (pc&7) != 0 { + pc += 4 + } + p.Pc = pc + o = c.oplook(p) + m = int(o.size) + if m == 0 { + switch p.As { + case obj.APCALIGN: + alignedValue := p.From.Offset + m = pcAlignPadLength(pc, alignedValue, ctxt) + // Update the current text symbol alignment value. + if int32(alignedValue) > cursym.Func.Align { + cursym.Func.Align = int32(alignedValue) + } + break + case obj.ANOP, obj.AFUNCDATA, obj.APCDATA: + continue + default: + c.ctxt.Diag("zero-width instruction\n%v", p) + } + } + switch o.flag & (LFROM | LTO) { + case LFROM: + c.addpool(p, &p.From) + + case LTO: + c.addpool(p, &p.To) + break + } + + if p.As == AB || p.As == obj.ARET || p.As == AERET { /* TODO: other unconditional operations */ + c.checkpool(p, 0) + } + pc += int64(m) + if c.blitrl != nil { + c.checkpool(p, 1) + } + } + + c.cursym.Size = pc + + /* + * if any procedure is large enough to + * generate a large SBRA branch, then + * generate extra passes putting branches + * around jmps to fix. this is rare. + */ + for bflag != 0 { + bflag = 0 + pc = 0 + for p = c.cursym.Func.Text.Link; p != nil; p = p.Link { + if p.As == ADWORD && (pc&7) != 0 { + pc += 4 + } + p.Pc = pc + o = c.oplook(p) + + /* very large branches */ + if (o.type_ == 7 || o.type_ == 39 || o.type_ == 40) && p.To.Target() != nil { // 7: BEQ and like, 39: CBZ and like, 40: TBZ and like + otxt := p.To.Target().Pc - pc + var toofar bool + switch o.type_ { + case 7, 39: // branch instruction encodes 19 bits + toofar = otxt <= -(1<<20)+10 || otxt >= (1<<20)-10 + case 40: // branch instruction encodes 14 bits + toofar = otxt <= -(1<<15)+10 || otxt >= (1<<15)-10 + } + if toofar { + q := c.newprog() + q.Link = p.Link + p.Link = q + q.As = AB + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) + q = c.newprog() + q.Link = p.Link + p.Link = q + q.As = AB + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(q.Link.Link) + bflag = 1 + } + } + m = int(o.size) + + if m == 0 { + switch p.As { + case obj.APCALIGN: + alignedValue := p.From.Offset + m = pcAlignPadLength(pc, alignedValue, ctxt) + break + case obj.ANOP, obj.AFUNCDATA, obj.APCDATA: + continue + default: + c.ctxt.Diag("zero-width instruction\n%v", p) + } + } + + pc += int64(m) + } + } + + pc += -pc & (funcAlign - 1) + c.cursym.Size = pc + + /* + * lay out the code, emitting code and data relocations. + */ + c.cursym.Grow(c.cursym.Size) + bp := c.cursym.P + psz := int32(0) + var i int + var out [6]uint32 + for p := c.cursym.Func.Text.Link; p != nil; p = p.Link { + c.pc = p.Pc + o = c.oplook(p) + + // need to align DWORDs on 8-byte boundary. The ISA doesn't + // require it, but the various 64-bit loads we generate assume it. + if o.as == ADWORD && psz%8 != 0 { + bp[3] = 0 + bp[2] = bp[3] + bp[1] = bp[2] + bp[0] = bp[1] + bp = bp[4:] + psz += 4 + } + + if int(o.size) > 4*len(out) { + log.Fatalf("out array in span7 is too small, need at least %d for %v", o.size/4, p) + } + if p.As == obj.APCALIGN { + alignedValue := p.From.Offset + v := pcAlignPadLength(p.Pc, alignedValue, c.ctxt) + for i = 0; i < int(v/4); i++ { + // emit ANOOP instruction by the padding size + c.ctxt.Arch.ByteOrder.PutUint32(bp, OP_NOOP) + bp = bp[4:] + psz += 4 + } + } else { + c.asmout(p, o, out[:]) + for i = 0; i < int(o.size/4); i++ { + c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) + bp = bp[4:] + psz += 4 + } + } + } + + // Mark nonpreemptible instruction sequences. + // We use REGTMP as a scratch register during call injection, + // so instruction sequences that use REGTMP are unsafe to + // preempt asynchronously. + obj.MarkUnsafePoints(c.ctxt, c.cursym.Func.Text, c.newprog, c.isUnsafePoint, c.isRestartable) +} + +// isUnsafePoint returns whether p is an unsafe point. +func (c *ctxt7) isUnsafePoint(p *obj.Prog) bool { + // If p explicitly uses REGTMP, it's unsafe to preempt, because the + // preemption sequence clobbers REGTMP. + return p.From.Reg == REGTMP || p.To.Reg == REGTMP || p.Reg == REGTMP +} + +// isRestartable returns whether p is a multi-instruction sequence that, +// if preempted, can be restarted. +func (c *ctxt7) isRestartable(p *obj.Prog) bool { + if c.isUnsafePoint(p) { + return false + } + // If p is a multi-instruction sequence with uses REGTMP inserted by + // the assembler in order to materialize a large constant/offset, we + // can restart p (at the start of the instruction sequence), recompute + // the content of REGTMP, upon async preemption. Currently, all cases + // of assembler-inserted REGTMP fall into this category. + // If p doesn't use REGTMP, it can be simply preempted, so we don't + // mark it. + o := c.oplook(p) + return o.size > 4 && o.flag&NOTUSETMP == 0 +} + +/* + * when the first reference to the literal pool threatens + * to go out of range of a 1Mb PC-relative offset + * drop the pool now, and branch round it. + */ +func (c *ctxt7) checkpool(p *obj.Prog, skip int) { + if c.pool.size >= 0xffff0 || !ispcdisp(int32(p.Pc+4+int64(c.pool.size)-int64(c.pool.start)+8)) { + c.flushpool(p, skip) + } else if p.Link == nil { + c.flushpool(p, 2) + } +} + +func (c *ctxt7) flushpool(p *obj.Prog, skip int) { + if c.blitrl != nil { + if skip != 0 { + if c.ctxt.Debugvlog && skip == 1 { + fmt.Printf("note: flush literal pool at %#x: len=%d ref=%x\n", uint64(p.Pc+4), c.pool.size, c.pool.start) + } + q := c.newprog() + q.As = AB + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(p.Link) + q.Link = c.blitrl + q.Pos = p.Pos + c.blitrl = q + } else if p.Pc+int64(c.pool.size)-int64(c.pool.start) < maxPCDisp { + return + } + + // The line number for constant pool entries doesn't really matter. + // We set it to the line number of the preceding instruction so that + // there are no deltas to encode in the pc-line tables. + for q := c.blitrl; q != nil; q = q.Link { + q.Pos = p.Pos + } + + c.elitrl.Link = p.Link + p.Link = c.blitrl + + c.blitrl = nil /* BUG: should refer back to values until out-of-range */ + c.elitrl = nil + c.pool.size = 0 + c.pool.start = 0 + } +} + +/* + * MOVD foo(SB), R is actually + * MOVD addr, REGTMP + * MOVD REGTMP, R + * where addr is the address of the DWORD containing the address of foo. + * + * TODO: hash + */ +func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { + cls := c.aclass(a) + lit := c.instoffset + t := c.newprog() + t.As = AWORD + sz := 4 + + if a.Type == obj.TYPE_CONST { + if lit != int64(int32(lit)) && uint64(lit) != uint64(uint32(lit)) { + // out of range -0x80000000 ~ 0xffffffff, must store 64-bit + t.As = ADWORD + sz = 8 + } // else store 32-bit + } else if p.As == AMOVD && a.Type != obj.TYPE_MEM || cls == C_ADDR || cls == C_VCON || lit != int64(int32(lit)) || uint64(lit) != uint64(uint32(lit)) { + // conservative: don't know if we want signed or unsigned extension. + // in case of ambiguity, store 64-bit + t.As = ADWORD + sz = 8 + } + + switch cls { + // TODO(aram): remove. + default: + if a.Name != obj.NAME_EXTERN { + fmt.Printf("addpool: %v in %v shouldn't go to default case\n", DRconv(cls), p) + } + + t.To.Offset = a.Offset + t.To.Sym = a.Sym + t.To.Type = a.Type + t.To.Name = a.Name + + /* This is here because MOV uint12<<12, R is disabled in optab. + Because of this, we need to load the constant from memory. */ + case C_ADDCON: + fallthrough + + case C_ZAUTO, + C_PSAUTO, + C_PSAUTO_8, + C_PSAUTO_4, + C_PPAUTO, + C_UAUTO4K_8, + C_UAUTO4K_4, + C_UAUTO4K_2, + C_UAUTO4K, + C_UAUTO8K_8, + C_UAUTO8K_4, + C_UAUTO8K, + C_UAUTO16K_8, + C_UAUTO16K, + C_UAUTO32K, + C_NSAUTO_8, + C_NSAUTO_4, + C_NSAUTO, + C_NPAUTO, + C_NAUTO4K, + C_LAUTO, + C_PPOREG, + C_PSOREG, + C_PSOREG_4, + C_PSOREG_8, + C_UOREG4K_8, + C_UOREG4K_4, + C_UOREG4K_2, + C_UOREG4K, + C_UOREG8K_8, + C_UOREG8K_4, + C_UOREG8K, + C_UOREG16K_8, + C_UOREG16K, + C_UOREG32K, + C_NSOREG_8, + C_NSOREG_4, + C_NSOREG, + C_NPOREG, + C_NOREG4K, + C_LOREG, + C_LACON, + C_ADDCON2, + C_LCON, + C_VCON: + if a.Name == obj.NAME_EXTERN { + fmt.Printf("addpool: %v in %v needs reloc\n", DRconv(cls), p) + } + + t.To.Type = obj.TYPE_CONST + t.To.Offset = lit + break + } + + for q := c.blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */ + if q.To == t.To { + p.Pool = q + return + } + } + + q := c.newprog() + *q = *t + q.Pc = int64(c.pool.size) + if c.blitrl == nil { + c.blitrl = q + c.pool.start = uint32(p.Pc) + } else { + c.elitrl.Link = q + } + c.elitrl = q + c.pool.size = -c.pool.size & (funcAlign - 1) + c.pool.size += uint32(sz) + p.Pool = q +} + +func (c *ctxt7) regoff(a *obj.Addr) uint32 { + c.instoffset = 0 + c.aclass(a) + return uint32(c.instoffset) +} + +func isSTLXRop(op obj.As) bool { + switch op { + case ASTLXR, ASTLXRW, ASTLXRB, ASTLXRH, + ASTXR, ASTXRW, ASTXRB, ASTXRH: + return true + } + return false +} + +func isSTXPop(op obj.As) bool { + switch op { + case ASTXP, ASTLXP, ASTXPW, ASTLXPW: + return true + } + return false +} + +func isANDop(op obj.As) bool { + switch op { + case AAND, AORR, AEOR, AANDS, ATST, + ABIC, AEON, AORN, ABICS: + return true + } + return false +} + +func isANDWop(op obj.As) bool { + switch op { + case AANDW, AORRW, AEORW, AANDSW, ATSTW, + ABICW, AEONW, AORNW, ABICSW: + return true + } + return false +} + +func isADDop(op obj.As) bool { + switch op { + case AADD, AADDS, ASUB, ASUBS, ACMN, ACMP: + return true + } + return false +} + +func isADDWop(op obj.As) bool { + switch op { + case AADDW, AADDSW, ASUBW, ASUBSW, ACMNW, ACMPW: + return true + } + return false +} + +func isRegShiftOrExt(a *obj.Addr) bool { + return (a.Index-obj.RBaseARM64)®_EXT != 0 || (a.Index-obj.RBaseARM64)®_LSL != 0 +} + +// Maximum PC-relative displacement. +// The actual limit is ±2²⁰, but we are conservative +// to avoid needing to recompute the literal pool flush points +// as span-dependent jumps are enlarged. +const maxPCDisp = 512 * 1024 + +// ispcdisp reports whether v is a valid PC-relative displacement. +func ispcdisp(v int32) bool { + return -maxPCDisp < v && v < maxPCDisp && v&3 == 0 +} + +func isaddcon(v int64) bool { + /* uimm12 or uimm24? */ + if v < 0 { + return false + } + if (v & 0xFFF) == 0 { + v >>= 12 + } + return v <= 0xFFF +} + +func isaddcon2(v int64) bool { + return 0 <= v && v <= 0xFFFFFF +} + +// isbitcon reports whether a constant can be encoded into a logical instruction. +// bitcon has a binary form of repetition of a bit sequence of length 2, 4, 8, 16, 32, or 64, +// which itself is a rotate (w.r.t. the length of the unit) of a sequence of ones. +// special cases: 0 and -1 are not bitcon. +// this function needs to run against virtually all the constants, so it needs to be fast. +// for this reason, bitcon testing and bitcon encoding are separate functions. +func isbitcon(x uint64) bool { + if x == 1<<64-1 || x == 0 { + return false + } + // determine the period and sign-extend a unit to 64 bits + switch { + case x != x>>32|x<<32: + // period is 64 + // nothing to do + case x != x>>16|x<<48: + // period is 32 + x = uint64(int64(int32(x))) + case x != x>>8|x<<56: + // period is 16 + x = uint64(int64(int16(x))) + case x != x>>4|x<<60: + // period is 8 + x = uint64(int64(int8(x))) + default: + // period is 4 or 2, always true + // 0001, 0010, 0100, 1000 -- 0001 rotate + // 0011, 0110, 1100, 1001 -- 0011 rotate + // 0111, 1011, 1101, 1110 -- 0111 rotate + // 0101, 1010 -- 01 rotate, repeat + return true + } + return sequenceOfOnes(x) || sequenceOfOnes(^x) +} + +// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros +func sequenceOfOnes(x uint64) bool { + y := x & -x // lowest set bit of x. x is good iff x+y is a power of 2 + y += x + return (y-1)&y == 0 +} + +// bitconEncode returns the encoding of a bitcon used in logical instructions +// x is known to be a bitcon +// a bitcon is a sequence of n ones at low bits (i.e. 1<>32|x<<32: + period = 64 + case x != x>>16|x<<48: + period = 32 + x = uint64(int64(int32(x))) + case x != x>>8|x<<56: + period = 16 + x = uint64(int64(int16(x))) + case x != x>>4|x<<60: + period = 8 + x = uint64(int64(int8(x))) + case x != x>>2|x<<62: + period = 4 + x = uint64(int64(x<<60) >> 60) + default: + period = 2 + x = uint64(int64(x<<62) >> 62) + } + neg := false + if int64(x) < 0 { + x = ^x + neg = true + } + y := x & -x // lowest set bit of x. + s := log2(y) + n := log2(x+y) - s // x (or ^x) is a sequence of n ones left shifted by s bits + if neg { + // ^x is a sequence of n ones left shifted by s bits + // adjust n, s for x + s = n + s + n = period - n + } + + N := uint32(0) + if mode == 64 && period == 64 { + N = 1 + } + R := (period - s) & (period - 1) & uint32(mode-1) // shift amount of right rotate + S := (n - 1) | 63&^(period<<1-1) // low bits = #ones - 1, high bits encodes period + return N<<22 | R<<16 | S<<10 +} + +func log2(x uint64) uint32 { + if x == 0 { + panic("log2 of 0") + } + n := uint32(0) + if x >= 1<<32 { + x >>= 32 + n += 32 + } + if x >= 1<<16 { + x >>= 16 + n += 16 + } + if x >= 1<<8 { + x >>= 8 + n += 8 + } + if x >= 1<<4 { + x >>= 4 + n += 4 + } + if x >= 1<<2 { + x >>= 2 + n += 2 + } + if x >= 1<<1 { + x >>= 1 + n += 1 + } + return n +} + +func autoclass(l int64) int { + if l == 0 { + return C_ZAUTO + } + + if l < 0 { + if l >= -256 && (l&7) == 0 { + return C_NSAUTO_8 + } + if l >= -256 && (l&3) == 0 { + return C_NSAUTO_4 + } + if l >= -256 { + return C_NSAUTO + } + if l >= -512 && (l&7) == 0 { + return C_NPAUTO + } + if l >= -4095 { + return C_NAUTO4K + } + return C_LAUTO + } + + if l <= 255 { + if (l & 7) == 0 { + return C_PSAUTO_8 + } + if (l & 3) == 0 { + return C_PSAUTO_4 + } + return C_PSAUTO + } + if l <= 504 && l&7 == 0 { + return C_PPAUTO + } + if l <= 4095 { + if l&7 == 0 { + return C_UAUTO4K_8 + } + if l&3 == 0 { + return C_UAUTO4K_4 + } + if l&1 == 0 { + return C_UAUTO4K_2 + } + return C_UAUTO4K + } + if l <= 8190 { + if l&7 == 0 { + return C_UAUTO8K_8 + } + if l&3 == 0 { + return C_UAUTO8K_4 + } + if l&1 == 0 { + return C_UAUTO8K + } + } + if l <= 16380 { + if l&7 == 0 { + return C_UAUTO16K_8 + } + if l&3 == 0 { + return C_UAUTO16K + } + } + if l <= 32760 && (l&7) == 0 { + return C_UAUTO32K + } + return C_LAUTO +} + +func oregclass(l int64) int { + return autoclass(l) - C_ZAUTO + C_ZOREG +} + +/* + * given an offset v and a class c (see above) + * return the offset value to use in the instruction, + * scaled if necessary + */ +func (c *ctxt7) offsetshift(p *obj.Prog, v int64, cls int) int64 { + s := 0 + if cls >= C_SEXT1 && cls <= C_SEXT16 { + s = cls - C_SEXT1 + } else { + switch cls { + case C_UAUTO4K, C_UOREG4K, C_ZOREG: + s = 0 + case C_UAUTO8K, C_UOREG8K: + s = 1 + case C_UAUTO16K, C_UOREG16K: + s = 2 + case C_UAUTO32K, C_UOREG32K: + s = 3 + default: + c.ctxt.Diag("bad class: %v\n%v", DRconv(cls), p) + } + } + vs := v >> uint(s) + if vs<= REG_ARNG && r < REG_ELEM: + return C_ARNG + case r >= REG_ELEM && r < REG_ELEM_END: + return C_ELEM + case r >= REG_UXTB && r < REG_SPECIAL: + return C_EXTREG + case r >= REG_SPECIAL: + return C_SPR + } + return C_GOK +} + +// con32class reclassifies the constant of 32-bit instruction. Because the constant type is 32-bit, +// but saved in Offset which type is int64, con32class treats it as uint32 type and reclassifies it. +func (c *ctxt7) con32class(a *obj.Addr) int { + v := uint32(a.Offset) + if v == 0 { + return C_ZCON + } + if isaddcon(int64(v)) { + if v <= 0xFFF { + if isbitcon(uint64(a.Offset)) { + return C_ABCON0 + } + return C_ADDCON0 + } + if isbitcon(uint64(a.Offset)) { + return C_ABCON + } + if movcon(int64(v)) >= 0 { + return C_AMCON + } + if movcon(int64(^v)) >= 0 { + return C_AMCON + } + return C_ADDCON + } + + t := movcon(int64(v)) + if t >= 0 { + if isbitcon(uint64(a.Offset)) { + return C_MBCON + } + return C_MOVCON + } + + t = movcon(int64(^v)) + if t >= 0 { + if isbitcon(uint64(a.Offset)) { + return C_MBCON + } + return C_MOVCON + } + + if isbitcon(uint64(a.Offset)) { + return C_BITCON + } + + if 0 <= v && v <= 0xffffff { + return C_ADDCON2 + } + return C_LCON +} + +// con64class reclassifies the constant of C_VCON and C_LCON class. +func (c *ctxt7) con64class(a *obj.Addr) int { + zeroCount := 0 + negCount := 0 + for i := uint(0); i < 4; i++ { + immh := uint32(a.Offset >> (i * 16) & 0xffff) + if immh == 0 { + zeroCount++ + } else if immh == 0xffff { + negCount++ + } + } + if zeroCount >= 3 || negCount >= 3 { + return C_MOVCON + } else if zeroCount == 2 || negCount == 2 { + return C_MOVCON2 + } else if zeroCount == 1 || negCount == 1 { + return C_MOVCON3 + } else { + return C_VCON + } +} + +func (c *ctxt7) aclass(a *obj.Addr) int { + switch a.Type { + case obj.TYPE_NONE: + return C_NONE + + case obj.TYPE_REG: + return rclass(a.Reg) + + case obj.TYPE_REGREG: + return C_PAIR + + case obj.TYPE_SHIFT: + return C_SHIFT + + case obj.TYPE_REGLIST: + return C_LIST + + case obj.TYPE_MEM: + // The base register should be an integer register. + if int16(REG_F0) <= a.Reg && a.Reg <= int16(REG_V31) { + break + } + switch a.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC: + if a.Sym == nil { + break + } + c.instoffset = a.Offset + if a.Sym != nil { // use relocation + if a.Sym.Type == objabi.STLSBSS { + if c.ctxt.Flag_shared { + return C_TLS_IE + } else { + return C_TLS_LE + } + } + return C_ADDR + } + return C_LEXT + + case obj.NAME_GOTREF: + return C_GOTADDR + + case obj.NAME_AUTO: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-SP. + a.Reg = obj.REG_NONE + } + // The frame top 8 or 16 bytes are for FP + c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) + return autoclass(c.instoffset) + + case obj.NAME_PARAM: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-FP. + a.Reg = obj.REG_NONE + } + c.instoffset = int64(c.autosize) + a.Offset + 8 + return autoclass(c.instoffset) + + case obj.NAME_NONE: + if a.Index != 0 { + if a.Offset != 0 { + if isRegShiftOrExt(a) { + // extended or shifted register offset, (Rn)(Rm.UXTW<<2) or (Rn)(Rm<<2). + return C_ROFF + } + return C_GOK + } + // register offset, (Rn)(Rm) + return C_ROFF + } + c.instoffset = a.Offset + return oregclass(c.instoffset) + } + return C_GOK + + case obj.TYPE_FCONST: + return C_FCON + + case obj.TYPE_TEXTSIZE: + return C_TEXTSIZE + + case obj.TYPE_CONST, obj.TYPE_ADDR: + switch a.Name { + case obj.NAME_NONE: + c.instoffset = a.Offset + if a.Reg != 0 && a.Reg != REGZERO { + break + } + v := c.instoffset + if v == 0 { + return C_ZCON + } + if isaddcon(v) { + if v <= 0xFFF { + if isbitcon(uint64(v)) { + return C_ABCON0 + } + return C_ADDCON0 + } + if isbitcon(uint64(v)) { + return C_ABCON + } + if movcon(v) >= 0 { + return C_AMCON + } + if movcon(^v) >= 0 { + return C_AMCON + } + return C_ADDCON + } + + t := movcon(v) + if t >= 0 { + if isbitcon(uint64(v)) { + return C_MBCON + } + return C_MOVCON + } + + t = movcon(^v) + if t >= 0 { + if isbitcon(uint64(v)) { + return C_MBCON + } + return C_MOVCON + } + + if isbitcon(uint64(v)) { + return C_BITCON + } + + if 0 <= v && v <= 0xffffff { + return C_ADDCON2 + } + + if uint64(v) == uint64(uint32(v)) || v == int64(int32(v)) { + return C_LCON + } + return C_VCON + + case obj.NAME_EXTERN, obj.NAME_STATIC: + if a.Sym == nil { + return C_GOK + } + if a.Sym.Type == objabi.STLSBSS { + c.ctxt.Diag("taking address of TLS variable is not supported") + } + c.instoffset = a.Offset + return C_VCONADDR + + case obj.NAME_AUTO: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-SP. + a.Reg = obj.REG_NONE + } + // The frame top 8 or 16 bytes are for FP + c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) + + case obj.NAME_PARAM: + if a.Reg == REGSP { + // unset base register for better printing, since + // a.Offset is still relative to pseudo-FP. + a.Reg = obj.REG_NONE + } + c.instoffset = int64(c.autosize) + a.Offset + 8 + default: + return C_GOK + } + cf := c.instoffset + if isaddcon(cf) || isaddcon(-cf) { + return C_AACON + } + if isaddcon2(cf) { + return C_AACON2 + } + + return C_LACON + + case obj.TYPE_BRANCH: + return C_SBRA + } + + return C_GOK +} + +func oclass(a *obj.Addr) int { + return int(a.Class) - 1 +} + +func (c *ctxt7) oplook(p *obj.Prog) *Optab { + a1 := int(p.Optab) + if a1 != 0 { + return &optab[a1-1] + } + a1 = int(p.From.Class) + if a1 == 0 { + a0 := c.aclass(&p.From) + // do not break C_ADDCON2 when S bit is set + if (p.As == AADDS || p.As == AADDSW || p.As == ASUBS || p.As == ASUBSW) && a0 == C_ADDCON2 { + a0 = C_LCON + } + a1 = a0 + 1 + p.From.Class = int8(a1) + // more specific classification of 32-bit integers + if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE { + if p.As == AMOVW || isADDWop(p.As) { + ra0 := c.con32class(&p.From) + // do not break C_ADDCON2 when S bit is set + if (p.As == AADDSW || p.As == ASUBSW) && ra0 == C_ADDCON2 { + ra0 = C_LCON + } + a1 = ra0 + 1 + p.From.Class = int8(a1) + } + if isANDWop(p.As) && a0 != C_BITCON { + // For 32-bit logical instruction with constant, + // the BITCON test is special in that it looks at + // the 64-bit which has the high 32-bit as a copy + // of the low 32-bit. We have handled that and + // don't pass it to con32class. + a1 = c.con32class(&p.From) + 1 + p.From.Class = int8(a1) + } + if ((p.As == AMOVD) || isANDop(p.As) || isADDop(p.As)) && (a0 == C_LCON || a0 == C_VCON) { + a1 = c.con64class(&p.From) + 1 + p.From.Class = int8(a1) + } + } + } + + a1-- + a3 := C_NONE + 1 + if p.GetFrom3() != nil { + a3 = int(p.GetFrom3().Class) + if a3 == 0 { + a3 = c.aclass(p.GetFrom3()) + 1 + p.GetFrom3().Class = int8(a3) + } + } + + a3-- + a4 := int(p.To.Class) + if a4 == 0 { + a4 = c.aclass(&p.To) + 1 + p.To.Class = int8(a4) + } + + a4-- + a2 := C_NONE + if p.Reg != 0 { + a2 = rclass(p.Reg) + } + + if false { + fmt.Printf("oplook %v %d %d %d %d\n", p.As, a1, a2, a3, a4) + fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type) + } + + ops := oprange[p.As&obj.AMask] + c1 := &xcmp[a1] + c2 := &xcmp[a2] + c3 := &xcmp[a3] + c4 := &xcmp[a4] + c5 := &xcmp[p.Scond>>5] + for i := range ops { + op := &ops[i] + if (int(op.a2) == a2 || c2[op.a2]) && c5[op.scond>>5] && c1[op.a1] && c3[op.a3] && c4[op.a4] { + p.Optab = uint16(cap(optab) - cap(ops) + i + 1) + return op + } + } + + c.ctxt.Diag("illegal combination: %v %v %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), p.From.Type, p.To.Type) + // Turn illegal instruction into an UNDEF, avoid crashing in asmout + return &Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0} +} + +func cmp(a int, b int) bool { + if a == b { + return true + } + switch a { + case C_RSP: + if b == C_REG { + return true + } + + case C_REG: + if b == C_ZCON { + return true + } + + case C_ADDCON0: + if b == C_ZCON || b == C_ABCON0 { + return true + } + + case C_ADDCON: + if b == C_ZCON || b == C_ABCON0 || b == C_ADDCON0 || b == C_ABCON || b == C_AMCON { + return true + } + + case C_BITCON: + if b == C_ABCON0 || b == C_ABCON || b == C_MBCON { + return true + } + + case C_MOVCON: + if b == C_MBCON || b == C_ZCON || b == C_ADDCON0 || b == C_AMCON { + return true + } + + case C_ADDCON2: + if b == C_ZCON || b == C_ADDCON || b == C_ADDCON0 { + return true + } + + case C_LCON: + if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_ABCON0 || b == C_MBCON || b == C_MOVCON || b == C_ADDCON2 || b == C_AMCON { + return true + } + + case C_MOVCON2: + return cmp(C_LCON, b) + + case C_VCON: + return cmp(C_LCON, b) + + case C_LACON: + if b == C_AACON || b == C_AACON2 { + return true + } + + case C_SEXT2: + if b == C_SEXT1 { + return true + } + + case C_SEXT4: + if b == C_SEXT1 || b == C_SEXT2 { + return true + } + + case C_SEXT8: + if b >= C_SEXT1 && b <= C_SEXT4 { + return true + } + + case C_SEXT16: + if b >= C_SEXT1 && b <= C_SEXT8 { + return true + } + + case C_LEXT: + if b >= C_SEXT1 && b <= C_SEXT16 { + return true + } + + case C_NSAUTO_4: + if b == C_NSAUTO_8 { + return true + } + + case C_NSAUTO: + switch b { + case C_NSAUTO_4, C_NSAUTO_8: + return true + } + + case C_NPAUTO: + switch b { + case C_NSAUTO_8: + return true + } + + case C_NAUTO4K: + switch b { + case C_NSAUTO_8, C_NSAUTO_4, C_NSAUTO, C_NPAUTO: + return true + } + + case C_PSAUTO_8: + if b == C_ZAUTO { + return true + } + + case C_PSAUTO_4: + switch b { + case C_ZAUTO, C_PSAUTO_8: + return true + } + + case C_PSAUTO: + switch b { + case C_ZAUTO, C_PSAUTO_8, C_PSAUTO_4: + return true + } + + case C_PPAUTO: + switch b { + case C_ZAUTO, C_PSAUTO_8: + return true + } + + case C_UAUTO4K: + switch b { + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, + C_PPAUTO, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8: + return true + } + + case C_UAUTO8K: + switch b { + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PPAUTO, + C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO8K_4, C_UAUTO8K_8: + return true + } + + case C_UAUTO16K: + switch b { + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PPAUTO, + C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO8K_4, C_UAUTO8K_8, C_UAUTO16K_8: + return true + } + + case C_UAUTO32K: + switch b { + case C_ZAUTO, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, + C_PPAUTO, C_UAUTO4K_8, C_UAUTO8K_8, C_UAUTO16K_8: + return true + } + + case C_LAUTO: + switch b { + case C_ZAUTO, C_NSAUTO, C_NSAUTO_4, C_NSAUTO_8, C_NPAUTO, + C_NAUTO4K, C_PSAUTO, C_PSAUTO_4, C_PSAUTO_8, C_PPAUTO, + C_UAUTO4K, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, + C_UAUTO8K, C_UAUTO8K_4, C_UAUTO8K_8, + C_UAUTO16K, C_UAUTO16K_8, + C_UAUTO32K: + return true + } + + case C_NSOREG_4: + if b == C_NSOREG_8 { + return true + } + + case C_NSOREG: + switch b { + case C_NSOREG_4, C_NSOREG_8: + return true + } + + case C_NPOREG: + switch b { + case C_NSOREG_8: + return true + } + + case C_NOREG4K: + switch b { + case C_NSOREG_8, C_NSOREG_4, C_NSOREG, C_NPOREG: + return true + } + + case C_PSOREG_4: + switch b { + case C_ZOREG, C_PSOREG_8: + return true + } + + case C_PSOREG: + switch b { + case C_ZOREG, C_PSOREG_8, C_PSOREG_4: + return true + } + + case C_PPOREG: + switch b { + case C_ZOREG, C_PSOREG_8: + return true + } + + case C_UOREG4K: + switch b { + case C_ZOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG, + C_PPOREG, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8: + return true + } + + case C_UOREG8K: + switch b { + case C_ZOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG, + C_PPOREG, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, + C_UOREG8K_4, C_UOREG8K_8: + return true + } + + case C_UOREG16K: + switch b { + case C_ZOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG, + C_PPOREG, C_UOREG4K_4, C_UOREG4K_8, C_UOREG8K_4, + C_UOREG8K_8, C_UOREG16K_8: + return true + } + + case C_UOREG32K: + switch b { + case C_ZOREG, C_PSOREG_4, C_PSOREG_8, C_PSOREG, + C_PPOREG, C_UOREG4K_8, C_UOREG8K_8, C_UOREG16K_8: + return true + } + + case C_LOREG: + switch b { + case C_ZOREG, C_NSOREG, C_NSOREG_4, C_NSOREG_8, C_NPOREG, + C_NOREG4K, C_PSOREG_4, C_PSOREG_8, C_PSOREG, C_PPOREG, + C_UOREG4K, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, + C_UOREG8K, C_UOREG8K_4, C_UOREG8K_8, + C_UOREG16K, C_UOREG16K_8, + C_UOREG32K: + return true + } + + case C_LBRA: + if b == C_SBRA { + return true + } + } + + return false +} + +type ocmp []Optab + +func (x ocmp) Len() int { + return len(x) +} + +func (x ocmp) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} + +func (x ocmp) Less(i, j int) bool { + p1 := &x[i] + p2 := &x[j] + if p1.as != p2.as { + return p1.as < p2.as + } + if p1.a1 != p2.a1 { + return p1.a1 < p2.a1 + } + if p1.a2 != p2.a2 { + return p1.a2 < p2.a2 + } + if p1.a3 != p2.a3 { + return p1.a3 < p2.a3 + } + if p1.a4 != p2.a4 { + return p1.a4 < p2.a4 + } + if p1.scond != p2.scond { + return p1.scond < p2.scond + } + return false +} + +func oprangeset(a obj.As, t []Optab) { + oprange[a&obj.AMask] = t +} + +func buildop(ctxt *obj.Link) { + if oprange[AAND&obj.AMask] != nil { + // Already initialized; stop now. + // This happens in the cmd/asm tests, + // each of which re-initializes the arch. + return + } + + var n int + for i := 0; i < C_GOK; i++ { + for n = 0; n < C_GOK; n++ { + if cmp(n, i) { + xcmp[i][n] = true + } + } + } + for n = 0; optab[n].as != obj.AXXX; n++ { + } + sort.Sort(ocmp(optab[:n])) + for i := 0; i < n; i++ { + r := optab[i].as + start := i + for optab[i].as == r { + i++ + } + t := optab[start:i] + i-- + oprangeset(r, t) + switch r { + default: + ctxt.Diag("unknown op in build: %v", r) + ctxt.DiagFlush() + log.Fatalf("bad code") + + case AADD: + oprangeset(AADDS, t) + oprangeset(ASUB, t) + oprangeset(ASUBS, t) + oprangeset(AADDW, t) + oprangeset(AADDSW, t) + oprangeset(ASUBW, t) + oprangeset(ASUBSW, t) + + case AAND: /* logical immediate, logical shifted register */ + oprangeset(AANDW, t) + oprangeset(AEOR, t) + oprangeset(AEORW, t) + oprangeset(AORR, t) + oprangeset(AORRW, t) + oprangeset(ABIC, t) + oprangeset(ABICW, t) + oprangeset(AEON, t) + oprangeset(AEONW, t) + oprangeset(AORN, t) + oprangeset(AORNW, t) + + case AANDS: /* logical immediate, logical shifted register, set flags, cannot target RSP */ + oprangeset(AANDSW, t) + oprangeset(ABICS, t) + oprangeset(ABICSW, t) + + case ANEG: + oprangeset(ANEGS, t) + oprangeset(ANEGSW, t) + oprangeset(ANEGW, t) + + case AADC: /* rn=Rd */ + oprangeset(AADCW, t) + + oprangeset(AADCS, t) + oprangeset(AADCSW, t) + oprangeset(ASBC, t) + oprangeset(ASBCW, t) + oprangeset(ASBCS, t) + oprangeset(ASBCSW, t) + + case ANGC: /* rn=REGZERO */ + oprangeset(ANGCW, t) + + oprangeset(ANGCS, t) + oprangeset(ANGCSW, t) + + case ACMP: + oprangeset(ACMPW, t) + oprangeset(ACMN, t) + oprangeset(ACMNW, t) + + case ATST: + oprangeset(ATSTW, t) + + /* register/register, and shifted */ + case AMVN: + oprangeset(AMVNW, t) + + case AMOVK: + oprangeset(AMOVKW, t) + oprangeset(AMOVN, t) + oprangeset(AMOVNW, t) + oprangeset(AMOVZ, t) + oprangeset(AMOVZW, t) + + case ASWPD: + for i := range atomicInstructions { + oprangeset(i, t) + } + + case ABEQ: + oprangeset(ABNE, t) + oprangeset(ABCS, t) + oprangeset(ABHS, t) + oprangeset(ABCC, t) + oprangeset(ABLO, t) + oprangeset(ABMI, t) + oprangeset(ABPL, t) + oprangeset(ABVS, t) + oprangeset(ABVC, t) + oprangeset(ABHI, t) + oprangeset(ABLS, t) + oprangeset(ABGE, t) + oprangeset(ABLT, t) + oprangeset(ABGT, t) + oprangeset(ABLE, t) + + case ALSL: + oprangeset(ALSLW, t) + oprangeset(ALSR, t) + oprangeset(ALSRW, t) + oprangeset(AASR, t) + oprangeset(AASRW, t) + oprangeset(AROR, t) + oprangeset(ARORW, t) + + case ACLS: + oprangeset(ACLSW, t) + oprangeset(ACLZ, t) + oprangeset(ACLZW, t) + oprangeset(ARBIT, t) + oprangeset(ARBITW, t) + oprangeset(AREV, t) + oprangeset(AREVW, t) + oprangeset(AREV16, t) + oprangeset(AREV16W, t) + oprangeset(AREV32, t) + + case ASDIV: + oprangeset(ASDIVW, t) + oprangeset(AUDIV, t) + oprangeset(AUDIVW, t) + oprangeset(ACRC32B, t) + oprangeset(ACRC32CB, t) + oprangeset(ACRC32CH, t) + oprangeset(ACRC32CW, t) + oprangeset(ACRC32CX, t) + oprangeset(ACRC32H, t) + oprangeset(ACRC32W, t) + oprangeset(ACRC32X, t) + + case AMADD: + oprangeset(AMADDW, t) + oprangeset(AMSUB, t) + oprangeset(AMSUBW, t) + oprangeset(ASMADDL, t) + oprangeset(ASMSUBL, t) + oprangeset(AUMADDL, t) + oprangeset(AUMSUBL, t) + + case AREM: + oprangeset(AREMW, t) + oprangeset(AUREM, t) + oprangeset(AUREMW, t) + + case AMUL: + oprangeset(AMULW, t) + oprangeset(AMNEG, t) + oprangeset(AMNEGW, t) + oprangeset(ASMNEGL, t) + oprangeset(ASMULL, t) + oprangeset(ASMULH, t) + oprangeset(AUMNEGL, t) + oprangeset(AUMULH, t) + oprangeset(AUMULL, t) + + case AMOVB: + oprangeset(AMOVBU, t) + + case AMOVH: + oprangeset(AMOVHU, t) + + case AMOVW: + oprangeset(AMOVWU, t) + + case ABFM: + oprangeset(ABFMW, t) + oprangeset(ASBFM, t) + oprangeset(ASBFMW, t) + oprangeset(AUBFM, t) + oprangeset(AUBFMW, t) + + case ABFI: + oprangeset(ABFIW, t) + oprangeset(ABFXIL, t) + oprangeset(ABFXILW, t) + oprangeset(ASBFIZ, t) + oprangeset(ASBFIZW, t) + oprangeset(ASBFX, t) + oprangeset(ASBFXW, t) + oprangeset(AUBFIZ, t) + oprangeset(AUBFIZW, t) + oprangeset(AUBFX, t) + oprangeset(AUBFXW, t) + + case AEXTR: + oprangeset(AEXTRW, t) + + case ASXTB: + oprangeset(ASXTBW, t) + oprangeset(ASXTH, t) + oprangeset(ASXTHW, t) + oprangeset(ASXTW, t) + oprangeset(AUXTB, t) + oprangeset(AUXTH, t) + oprangeset(AUXTW, t) + oprangeset(AUXTBW, t) + oprangeset(AUXTHW, t) + + case ACCMN: + oprangeset(ACCMNW, t) + oprangeset(ACCMP, t) + oprangeset(ACCMPW, t) + + case ACSEL: + oprangeset(ACSELW, t) + oprangeset(ACSINC, t) + oprangeset(ACSINCW, t) + oprangeset(ACSINV, t) + oprangeset(ACSINVW, t) + oprangeset(ACSNEG, t) + oprangeset(ACSNEGW, t) + + case ACINC: + // aliases Rm=Rn, !cond + oprangeset(ACINCW, t) + oprangeset(ACINV, t) + oprangeset(ACINVW, t) + oprangeset(ACNEG, t) + oprangeset(ACNEGW, t) + + // aliases, Rm=Rn=REGZERO, !cond + case ACSET: + oprangeset(ACSETW, t) + + oprangeset(ACSETM, t) + oprangeset(ACSETMW, t) + + case AMOVD, + AMOVBU, + AB, + ABL, + AWORD, + ADWORD, + obj.ARET, + obj.ATEXT: + break + + case ALDP: + oprangeset(AFLDPD, t) + + case ASTP: + oprangeset(AFSTPD, t) + + case ASTPW: + oprangeset(AFSTPS, t) + + case ALDPW: + oprangeset(ALDPSW, t) + oprangeset(AFLDPS, t) + + case AERET: + oprangeset(AWFE, t) + oprangeset(AWFI, t) + oprangeset(AYIELD, t) + oprangeset(ASEV, t) + oprangeset(ASEVL, t) + oprangeset(ANOOP, t) + oprangeset(ADRPS, t) + + case ACBZ: + oprangeset(ACBZW, t) + oprangeset(ACBNZ, t) + oprangeset(ACBNZW, t) + + case ATBZ: + oprangeset(ATBNZ, t) + + case AADR, AADRP: + break + + case ACLREX: + break + + case ASVC: + oprangeset(AHVC, t) + oprangeset(AHLT, t) + oprangeset(ASMC, t) + oprangeset(ABRK, t) + oprangeset(ADCPS1, t) + oprangeset(ADCPS2, t) + oprangeset(ADCPS3, t) + + case AFADDS: + oprangeset(AFADDD, t) + oprangeset(AFSUBS, t) + oprangeset(AFSUBD, t) + oprangeset(AFMULS, t) + oprangeset(AFMULD, t) + oprangeset(AFNMULS, t) + oprangeset(AFNMULD, t) + oprangeset(AFDIVS, t) + oprangeset(AFMAXD, t) + oprangeset(AFMAXS, t) + oprangeset(AFMIND, t) + oprangeset(AFMINS, t) + oprangeset(AFMAXNMD, t) + oprangeset(AFMAXNMS, t) + oprangeset(AFMINNMD, t) + oprangeset(AFMINNMS, t) + oprangeset(AFDIVD, t) + + case AFMSUBD: + oprangeset(AFMSUBS, t) + oprangeset(AFMADDS, t) + oprangeset(AFMADDD, t) + oprangeset(AFNMSUBS, t) + oprangeset(AFNMSUBD, t) + oprangeset(AFNMADDS, t) + oprangeset(AFNMADDD, t) + + case AFCVTSD: + oprangeset(AFCVTDS, t) + oprangeset(AFABSD, t) + oprangeset(AFABSS, t) + oprangeset(AFNEGD, t) + oprangeset(AFNEGS, t) + oprangeset(AFSQRTD, t) + oprangeset(AFSQRTS, t) + oprangeset(AFRINTNS, t) + oprangeset(AFRINTND, t) + oprangeset(AFRINTPS, t) + oprangeset(AFRINTPD, t) + oprangeset(AFRINTMS, t) + oprangeset(AFRINTMD, t) + oprangeset(AFRINTZS, t) + oprangeset(AFRINTZD, t) + oprangeset(AFRINTAS, t) + oprangeset(AFRINTAD, t) + oprangeset(AFRINTXS, t) + oprangeset(AFRINTXD, t) + oprangeset(AFRINTIS, t) + oprangeset(AFRINTID, t) + oprangeset(AFCVTDH, t) + oprangeset(AFCVTHS, t) + oprangeset(AFCVTHD, t) + oprangeset(AFCVTSH, t) + + case AFCMPS: + oprangeset(AFCMPD, t) + oprangeset(AFCMPES, t) + oprangeset(AFCMPED, t) + + case AFCCMPS: + oprangeset(AFCCMPD, t) + oprangeset(AFCCMPES, t) + oprangeset(AFCCMPED, t) + + case AFCSELD: + oprangeset(AFCSELS, t) + + case AFMOVS, AFMOVD, AFMOVQ: + break + + case AFCVTZSD: + oprangeset(AFCVTZSDW, t) + oprangeset(AFCVTZSS, t) + oprangeset(AFCVTZSSW, t) + oprangeset(AFCVTZUD, t) + oprangeset(AFCVTZUDW, t) + oprangeset(AFCVTZUS, t) + oprangeset(AFCVTZUSW, t) + + case ASCVTFD: + oprangeset(ASCVTFS, t) + oprangeset(ASCVTFWD, t) + oprangeset(ASCVTFWS, t) + oprangeset(AUCVTFD, t) + oprangeset(AUCVTFS, t) + oprangeset(AUCVTFWD, t) + oprangeset(AUCVTFWS, t) + + case ASYS: + oprangeset(AAT, t) + oprangeset(ADC, t) + oprangeset(AIC, t) + oprangeset(ATLBI, t) + + case ASYSL, AHINT: + break + + case ADMB: + oprangeset(ADSB, t) + oprangeset(AISB, t) + + case AMRS, AMSR: + break + + case ALDAR: + oprangeset(ALDARW, t) + oprangeset(ALDARB, t) + oprangeset(ALDARH, t) + fallthrough + + case ALDXR: + oprangeset(ALDXRB, t) + oprangeset(ALDXRH, t) + oprangeset(ALDXRW, t) + + case ALDAXR: + oprangeset(ALDAXRB, t) + oprangeset(ALDAXRH, t) + oprangeset(ALDAXRW, t) + + case ALDXP: + oprangeset(ALDXPW, t) + oprangeset(ALDAXP, t) + oprangeset(ALDAXPW, t) + + case ASTLR: + oprangeset(ASTLRB, t) + oprangeset(ASTLRH, t) + oprangeset(ASTLRW, t) + + case ASTXR: + oprangeset(ASTXRB, t) + oprangeset(ASTXRH, t) + oprangeset(ASTXRW, t) + + case ASTLXR: + oprangeset(ASTLXRB, t) + oprangeset(ASTLXRH, t) + oprangeset(ASTLXRW, t) + + case ASTXP: + oprangeset(ASTLXP, t) + oprangeset(ASTLXPW, t) + oprangeset(ASTXPW, t) + + case AVADDP: + oprangeset(AVAND, t) + oprangeset(AVCMEQ, t) + oprangeset(AVORR, t) + oprangeset(AVEOR, t) + oprangeset(AVBSL, t) + oprangeset(AVBIT, t) + oprangeset(AVCMTST, t) + oprangeset(AVUZP1, t) + oprangeset(AVUZP2, t) + oprangeset(AVBIF, t) + + case AVADD: + oprangeset(AVSUB, t) + + case AAESD: + oprangeset(AAESE, t) + oprangeset(AAESMC, t) + oprangeset(AAESIMC, t) + oprangeset(ASHA1SU1, t) + oprangeset(ASHA256SU0, t) + oprangeset(ASHA512SU0, t) + + case ASHA1C: + oprangeset(ASHA1P, t) + oprangeset(ASHA1M, t) + + case ASHA256H: + oprangeset(ASHA256H2, t) + oprangeset(ASHA512H, t) + oprangeset(ASHA512H2, t) + + case ASHA1SU0: + oprangeset(ASHA256SU1, t) + oprangeset(ASHA512SU1, t) + + case AVADDV: + oprangeset(AVUADDLV, t) + + case AVFMLA: + oprangeset(AVFMLS, t) + + case AVPMULL: + oprangeset(AVPMULL2, t) + + case AVUSHR: + oprangeset(AVSHL, t) + oprangeset(AVSRI, t) + + case AVREV32: + oprangeset(AVCNT, t) + oprangeset(AVRBIT, t) + oprangeset(AVREV64, t) + oprangeset(AVREV16, t) + + case AVZIP1: + oprangeset(AVZIP2, t) + + case AVUXTL: + oprangeset(AVUXTL2, t) + + case AVUSHLL: + oprangeset(AVUSHLL2, t) + + case AVLD1R: + oprangeset(AVLD2, t) + oprangeset(AVLD2R, t) + oprangeset(AVLD3, t) + oprangeset(AVLD3R, t) + oprangeset(AVLD4, t) + oprangeset(AVLD4R, t) + + case ASHA1H, + AVCNT, + AVMOV, + AVLD1, + AVST1, + AVST2, + AVST3, + AVST4, + AVTBL, + AVDUP, + AVMOVI, + APRFM, + AVEXT: + break + + case obj.ANOP, + obj.AUNDEF, + obj.AFUNCDATA, + obj.APCALIGN, + obj.APCDATA, + obj.ADUFFZERO, + obj.ADUFFCOPY: + break + } + } +} + +// chipfloat7() checks if the immediate constants available in FMOVS/FMOVD instructions. +// For details of the range of constants available, see +// http://infocenter.arm.com/help/topic/com.arm.doc.dui0473m/dom1359731199385.html. +func (c *ctxt7) chipfloat7(e float64) int { + ei := math.Float64bits(e) + l := uint32(int32(ei)) + h := uint32(int32(ei >> 32)) + + if l != 0 || h&0xffff != 0 { + return -1 + } + h1 := h & 0x7fc00000 + if h1 != 0x40000000 && h1 != 0x3fc00000 { + return -1 + } + n := 0 + + // sign bit (a) + if h&0x80000000 != 0 { + n |= 1 << 7 + } + + // exp sign bit (b) + if h1 == 0x3fc00000 { + n |= 1 << 6 + } + + // rest of exp and mantissa (cd-efgh) + n |= int((h >> 16) & 0x3f) + + //print("match %.8lux %.8lux %d\n", l, h, n); + return n +} + +/* form offset parameter to SYS; special register number */ +func SYSARG5(op0 int, op1 int, Cn int, Cm int, op2 int) int { + return op0<<19 | op1<<16 | Cn<<12 | Cm<<8 | op2<<5 +} + +func SYSARG4(op1 int, Cn int, Cm int, op2 int) int { + return SYSARG5(0, op1, Cn, Cm, op2) +} + +// checkUnpredictable checks if the sourse and transfer registers are the same register. +// ARM64 manual says it is "constrained unpredictable" if the src and dst registers of STP/LDP are same. +func (c *ctxt7) checkUnpredictable(p *obj.Prog, isload bool, wback bool, rn int16, rt1 int16, rt2 int16) { + if wback && rn != REGSP && (rn == rt1 || rn == rt2) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + if isload && rt1 == rt2 { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } +} + +/* checkindex checks if index >= 0 && index <= maxindex */ +func (c *ctxt7) checkindex(p *obj.Prog, index, maxindex int) { + if index < 0 || index > maxindex { + c.ctxt.Diag("register element index out of range 0 to %d: %v", maxindex, p) + } +} + +/* checkoffset checks whether the immediate offset is valid for VLD[1-4].P and VST[1-4].P */ +func (c *ctxt7) checkoffset(p *obj.Prog, as obj.As) { + var offset, list, n, expect int64 + switch as { + case AVLD1, AVLD2, AVLD3, AVLD4, AVLD1R, AVLD2R, AVLD3R, AVLD4R: + offset = p.From.Offset + list = p.To.Offset + case AVST1, AVST2, AVST3, AVST4: + offset = p.To.Offset + list = p.From.Offset + default: + c.ctxt.Diag("invalid operation on op %v", p.As) + } + opcode := (list >> 12) & 15 + q := (list >> 30) & 1 + size := (list >> 10) & 3 + if offset == 0 { + return + } + switch opcode { + case 0x7: + n = 1 // one register + case 0xa: + n = 2 // two registers + case 0x6: + n = 3 // three registers + case 0x2: + n = 4 // four registers + default: + c.ctxt.Diag("invalid register numbers in ARM64 register list: %v", p) + } + + switch as { + case AVLD1R, AVLD2R, AVLD3R, AVLD4R: + if offset != n*(1<> 5) & 7 + switch p.As { + case AMOVB, AMOVBU: + if amount != 0 { + c.ctxt.Diag("invalid index shift amount: %v", p) + } + case AMOVH, AMOVHU: + if amount != 1 && amount != 0 { + c.ctxt.Diag("invalid index shift amount: %v", p) + } + case AMOVW, AMOVWU, AFMOVS: + if amount != 2 && amount != 0 { + c.ctxt.Diag("invalid index shift amount: %v", p) + } + case AMOVD, AFMOVD: + if amount != 3 && amount != 0 { + c.ctxt.Diag("invalid index shift amount: %v", p) + } + default: + panic("invalid operation") + } +} + +func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { + var os [5]uint32 + o1 := uint32(0) + o2 := uint32(0) + o3 := uint32(0) + o4 := uint32(0) + o5 := uint32(0) + if false { /*debug['P']*/ + fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_) + } + switch o.type_ { + default: + c.ctxt.Diag("%v: unknown asm %d", p, o.type_) + + case 0: /* pseudo ops */ + break + + case 1: /* op Rm,[Rn],Rd; default Rn=Rd -> op Rm<<0,[Rn,]Rd (shifted register) */ + o1 = c.oprrr(p, p.As) + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + if r == 0 { + r = rt + } + o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + + case 2: /* add/sub $(uimm12|uimm24)[,R],R; cmp $(uimm12|uimm24),R */ + o1 = c.opirr(p, p.As) + + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + if (o1 & Sbit) == 0 { + c.ctxt.Diag("ineffective ZR destination\n%v", p) + } + rt = REGZERO + } + + r := int(p.Reg) + if r == 0 { + r = rt + } + v := int32(c.regoff(&p.From)) + o1 = c.oaddi(p, int32(o1), v, r, rt) + + case 3: /* op R<> 10) & 63 + is64bit := o1 & (1 << 31) + if is64bit == 0 && amount >= 32 { + c.ctxt.Diag("shift amount out of range 0 to 31: %v", p) + } + o1 |= uint32(p.From.Offset) /* includes reg, op, etc */ + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + r := int(p.Reg) + if p.As == AMVN || p.As == AMVNW { + r = REGZERO + } else if r == 0 { + r = rt + } + o1 |= (uint32(r&31) << 5) | uint32(rt&31) + + case 4: /* mov $addcon, R; mov $recon, R; mov $racon, R; mov $addcon2, R */ + rt := int(p.To.Reg) + r := int(o.param) + + if r == 0 { + r = REGZERO + } else if r == REGFROM { + r = int(p.From.Reg) + } + if r == 0 { + r = REGSP + } + + v := int32(c.regoff(&p.From)) + var op int32 + if v < 0 { + v = -v + op = int32(c.opirr(p, ASUB)) + } else { + op = int32(c.opirr(p, AADD)) + } + + if int(o.size) == 8 { + o1 = c.oaddi(p, op, v&0xfff000, r, REGTMP) + o2 = c.oaddi(p, op, v&0x000fff, REGTMP, rt) + break + } + + o1 = c.oaddi(p, op, v, r, rt) + + case 5: /* b s; bl s */ + o1 = c.opbra(p, p.As) + + if p.To.Sym == nil { + o1 |= uint32(c.brdist(p, 0, 26, 2)) + break + } + + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.To.Sym + rel.Add = p.To.Offset + rel.Type = objabi.R_CALLARM64 + + case 6: /* b ,O(R); bl ,O(R) */ + o1 = c.opbrr(p, p.As) + + o1 |= uint32(p.To.Reg&31) << 5 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 0 + rel.Type = objabi.R_CALLIND + + case 7: /* beq s */ + o1 = c.opbra(p, p.As) + + o1 |= uint32(c.brdist(p, 0, 19, 2) << 5) + + case 8: /* lsl $c,[R],R -> ubfm $(W-1)-c,$(-c MOD (W-1)),Rn,Rd */ + rt := int(p.To.Reg) + + rf := int(p.Reg) + if rf == 0 { + rf = rt + } + v := int32(p.From.Offset) + switch p.As { + case AASR: + o1 = c.opbfm(p, ASBFM, int(v), 63, rf, rt) + + case AASRW: + o1 = c.opbfm(p, ASBFMW, int(v), 31, rf, rt) + + case ALSL: + o1 = c.opbfm(p, AUBFM, int((64-v)&63), int(63-v), rf, rt) + + case ALSLW: + o1 = c.opbfm(p, AUBFMW, int((32-v)&31), int(31-v), rf, rt) + + case ALSR: + o1 = c.opbfm(p, AUBFM, int(v), 63, rf, rt) + + case ALSRW: + o1 = c.opbfm(p, AUBFMW, int(v), 31, rf, rt) + + case AROR: + o1 = c.opextr(p, AEXTR, v, rf, rf, rt) + + case ARORW: + o1 = c.opextr(p, AEXTRW, v, rf, rf, rt) + + default: + c.ctxt.Diag("bad shift $con\n%v", p) + break + } + + case 9: /* lsl Rm,[Rn],Rd -> lslv Rm, Rn, Rd */ + o1 = c.oprrr(p, p.As) + + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + o1 |= (uint32(p.From.Reg&31) << 16) | (uint32(r&31) << 5) | uint32(p.To.Reg&31) + + case 10: /* brk/hvc/.../svc [$con] */ + o1 = c.opimm(p, p.As) + + if p.From.Type != obj.TYPE_NONE { + o1 |= uint32((p.From.Offset & 0xffff) << 5) + } + + case 11: /* dword */ + c.aclass(&p.To) + + o1 = uint32(c.instoffset) + o2 = uint32(c.instoffset >> 32) + if p.To.Sym != nil { + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.To.Sym + rel.Add = p.To.Offset + rel.Type = objabi.R_ADDR + o2 = 0 + o1 = o2 + } + + case 12: /* movT $vcon, reg */ + // NOTE: this case does not use REGTMP. If it ever does, + // remove the NOTUSETMP flag in optab. + num := c.omovlconst(p.As, p, &p.From, int(p.To.Reg), os[:]) + if num == 0 { + c.ctxt.Diag("invalid constant: %v", p) + } + o1 = os[0] + o2 = os[1] + o3 = os[2] + o4 = os[3] + + case 13: /* addop $vcon, [R], R (64 bit literal); cmp $lcon,R -> addop $lcon,R, ZR */ + o := uint32(0) + num := uint8(0) + cls := oclass(&p.From) + if isADDWop(p.As) { + if !cmp(C_LCON, cls) { + c.ctxt.Diag("illegal combination: %v", p) + } + num = c.omovlconst(AMOVW, p, &p.From, REGTMP, os[:]) + } else { + num = c.omovlconst(AMOVD, p, &p.From, REGTMP, os[:]) + } + if num == 0 { + c.ctxt.Diag("invalid constant: %v", p) + } + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + r := int(p.Reg) + if r == 0 { + r = rt + } + if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) { + o = c.opxrrr(p, p.As, false) + o |= REGTMP & 31 << 16 + o |= LSL0_64 + } else { + o = c.oprrr(p, p.As) + o |= REGTMP & 31 << 16 /* shift is 0 */ + } + + o |= uint32(r&31) << 5 + o |= uint32(rt & 31) + + os[num] = o + o1 = os[0] + o2 = os[1] + o3 = os[2] + o4 = os[3] + o5 = os[4] + + case 14: /* word */ + if c.aclass(&p.To) == C_ADDR { + c.ctxt.Diag("address constant needs DWORD\n%v", p) + } + o1 = uint32(c.instoffset) + if p.To.Sym != nil { + // This case happens with words generated + // in the PC stream as part of the literal pool. + rel := obj.Addrel(c.cursym) + + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.To.Sym + rel.Add = p.To.Offset + rel.Type = objabi.R_ADDR + o1 = 0 + } + + case 15: /* mul/mneg/umulh/umull r,[r,]r; madd/msub/fmadd/fmsub/fnmadd/fnmsub Rm,Ra,Rn,Rd */ + o1 = c.oprrr(p, p.As) + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + var r int + var ra int + if p.From3Type() == obj.TYPE_REG { + r = int(p.GetFrom3().Reg) + ra = int(p.Reg) + if ra == 0 { + ra = REGZERO + } + } else { + r = int(p.Reg) + if r == 0 { + r = rt + } + ra = REGZERO + } + + o1 |= (uint32(rf&31) << 16) | (uint32(ra&31) << 10) | (uint32(r&31) << 5) | uint32(rt&31) + + case 16: /* XremY R[,R],R -> XdivY; XmsubY */ + o1 = c.oprrr(p, p.As) + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + r = rt + } + o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | REGTMP&31 + o2 = c.oprrr(p, AMSUBW) + o2 |= o1 & (1 << 31) /* same size */ + o2 |= (uint32(rf&31) << 16) | (uint32(r&31) << 10) | (REGTMP & 31 << 5) | uint32(rt&31) + + case 17: /* op Rm,[Rn],Rd; default Rn=ZR */ + o1 = c.oprrr(p, p.As) + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + if r == 0 { + r = REGZERO + } + o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + + case 18: /* csel cond,Rn,Rm,Rd; cinc/cinv/cneg cond,Rn,Rd; cset cond,Rd */ + o1 = c.oprrr(p, p.As) + + cond := int(p.From.Reg) + if cond < COND_EQ || cond > COND_NV { + c.ctxt.Diag("invalid condition: %v", p) + } else { + cond -= COND_EQ + } + + r := int(p.Reg) + var rf int + if r != 0 { + if p.From3Type() == obj.TYPE_NONE { + /* CINC/CINV/CNEG */ + rf = r + cond ^= 1 + } else { + rf = int(p.GetFrom3().Reg) /* CSEL */ + } + } else { + /* CSET */ + rf = REGZERO + r = rf + cond ^= 1 + } + + rt := int(p.To.Reg) + o1 |= (uint32(rf&31) << 16) | (uint32(cond&15) << 12) | (uint32(r&31) << 5) | uint32(rt&31) + + case 19: /* CCMN cond, (Rm|uimm5),Rn, uimm4 -> ccmn Rn,Rm,uimm4,cond */ + nzcv := int(p.To.Offset) + + cond := int(p.From.Reg) + if cond < COND_EQ || cond > COND_NV { + c.ctxt.Diag("invalid condition\n%v", p) + } else { + cond -= COND_EQ + } + var rf int + if p.GetFrom3().Type == obj.TYPE_REG { + o1 = c.oprrr(p, p.As) + rf = int(p.GetFrom3().Reg) /* Rm */ + } else { + o1 = c.opirr(p, p.As) + rf = int(p.GetFrom3().Offset & 0x1F) + } + + o1 |= (uint32(rf&31) << 16) | (uint32(cond&15) << 12) | (uint32(p.Reg&31) << 5) | uint32(nzcv) + + case 20: /* movT R,O(R) -> strT */ + v := int32(c.regoff(&p.To)) + sz := int32(1 << uint(movesize(p.As))) + + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + if v < 0 || v%sz != 0 { /* unscaled 9-bit signed */ + o1 = c.olsr9s(p, int32(c.opstr9(p, p.As)), v, r, int(p.From.Reg)) + } else { + v = int32(c.offsetshift(p, int64(v), int(o.a4))) + o1 = c.olsr12u(p, int32(c.opstr12(p, p.As)), v, r, int(p.From.Reg)) + } + + case 21: /* movT O(R),R -> ldrT */ + v := int32(c.regoff(&p.From)) + sz := int32(1 << uint(movesize(p.As))) + + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + if v < 0 || v%sz != 0 { /* unscaled 9-bit signed */ + o1 = c.olsr9s(p, int32(c.opldr9(p, p.As)), v, r, int(p.To.Reg)) + } else { + v = int32(c.offsetshift(p, int64(v), int(o.a1))) + //print("offset=%lld v=%ld a1=%d\n", instoffset, v, o->a1); + o1 = c.olsr12u(p, int32(c.opldr12(p, p.As)), v, r, int(p.To.Reg)) + } + + case 22: /* movT (R)O!,R; movT O(R)!, R -> ldrT */ + if p.From.Reg != REGSP && p.From.Reg == p.To.Reg { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + + v := int32(p.From.Offset) + + if v < -256 || v > 255 { + c.ctxt.Diag("offset out of range [-255,254]: %v", p) + } + o1 = c.opldrpp(p, p.As) + if o.scond == C_XPOST { + o1 |= 1 << 10 + } else { + o1 |= 3 << 10 + } + o1 |= ((uint32(v) & 0x1FF) << 12) | (uint32(p.From.Reg&31) << 5) | uint32(p.To.Reg&31) + + case 23: /* movT R,(R)O!; movT O(R)!, R -> strT */ + if p.To.Reg != REGSP && p.From.Reg == p.To.Reg { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + + v := int32(p.To.Offset) + + if v < -256 || v > 255 { + c.ctxt.Diag("offset out of range [-255,254]: %v", p) + } + o1 = LD2STR(c.opldrpp(p, p.As)) + if o.scond == C_XPOST { + o1 |= 1 << 10 + } else { + o1 |= 3 << 10 + } + o1 |= ((uint32(v) & 0x1FF) << 12) | (uint32(p.To.Reg&31) << 5) | uint32(p.From.Reg&31) + + case 24: /* mov/mvn Rs,Rd -> add $0,Rs,Rd or orr Rs,ZR,Rd */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + s := rf == REGSP || rt == REGSP + if p.As == AMVN || p.As == AMVNW { + if s { + c.ctxt.Diag("illegal SP reference\n%v", p) + } + o1 = c.oprrr(p, p.As) + o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31) + } else if s { + o1 = c.opirr(p, p.As) + o1 |= (uint32(rf&31) << 5) | uint32(rt&31) + } else { + o1 = c.oprrr(p, p.As) + o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31) + } + + case 25: /* negX Rs, Rd -> subX Rs<<0, ZR, Rd */ + o1 = c.oprrr(p, p.As) + + rf := int(p.From.Reg) + if rf == C_NONE { + rf = int(p.To.Reg) + } + rt := int(p.To.Reg) + o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31) + + case 26: /* negX Rm< subX Rm<> 5) & 7 + if amount > 4 { + c.ctxt.Diag("shift amount out of range 0 to 4: %v", p) + } + o1 = c.opxrrr(p, p.As, true) + o1 |= c.encRegShiftOrExt(&p.From, p.From.Reg) /* includes reg, op, etc */ + } else { + o1 = c.opxrrr(p, p.As, false) + o1 |= uint32(p.From.Reg&31) << 16 + } + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + r := int(p.Reg) + if r == 0 { + r = rt + } + o1 |= (uint32(r&31) << 5) | uint32(rt&31) + + case 28: /* logop $vcon, [R], R (64 bit literal) */ + o := uint32(0) + num := uint8(0) + cls := oclass(&p.From) + if isANDWop(p.As) { + if !cmp(C_LCON, cls) { + c.ctxt.Diag("illegal combination: %v", p) + } + num = c.omovlconst(AMOVW, p, &p.From, REGTMP, os[:]) + } else { + num = c.omovlconst(AMOVD, p, &p.From, REGTMP, os[:]) + } + + if num == 0 { + c.ctxt.Diag("invalid constant: %v", p) + } + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + r := int(p.Reg) + if r == 0 { + r = rt + } + o = c.oprrr(p, p.As) + o |= REGTMP & 31 << 16 /* shift is 0 */ + o |= uint32(r&31) << 5 + o |= uint32(rt & 31) + + os[num] = o + o1 = os[0] + o2 = os[1] + o3 = os[2] + o4 = os[3] + o5 = os[4] + + case 29: /* op Rn, Rd */ + fc := c.aclass(&p.From) + tc := c.aclass(&p.To) + if (p.As == AFMOVD || p.As == AFMOVS) && (fc == C_REG || fc == C_ZCON || tc == C_REG || tc == C_ZCON) { + // FMOV Rx, Fy or FMOV Fy, Rx + o1 = FPCVTI(0, 0, 0, 0, 6) + if p.As == AFMOVD { + o1 |= 1<<31 | 1<<22 // 64-bit + } + if fc == C_REG || fc == C_ZCON { + o1 |= 1 << 16 // FMOV Rx, Fy + } + } else { + o1 = c.oprrr(p, p.As) + } + o1 |= uint32(p.From.Reg&31)<<5 | uint32(p.To.Reg&31) + + case 30: /* movT R,L(R) -> strT */ + // if offset L can be split into hi+lo, and both fit into instructions, do + // add $hi, R, Rtmp + // str R, lo(Rtmp) + // otherwise, use constant pool + // mov $L, Rtmp (from constant pool) + // str R, (R+Rtmp) + s := movesize(o.as) + if s < 0 { + c.ctxt.Diag("unexpected long move, op %v tab %v\n%v", p.As, o.as, p) + } + + r := int(p.To.Reg) + if r == 0 { + r = int(o.param) + } + + v := int32(c.regoff(&p.To)) + var hi int32 + if v < 0 || (v&((1<>uint(s))&0xFFF, REGTMP, int(p.From.Reg)) + break + + storeusepool: + if r == REGTMP || p.From.Reg == REGTMP { + c.ctxt.Diag("REGTMP used in large offset store: %v", p) + } + o1 = c.omovlit(AMOVD, p, &p.To, REGTMP) + o2 = c.olsxrr(p, int32(c.opstrr(p, p.As, false)), int(p.From.Reg), r, REGTMP) + + case 31: /* movT L(R), R -> ldrT */ + // if offset L can be split into hi+lo, and both fit into instructions, do + // add $hi, R, Rtmp + // ldr lo(Rtmp), R + // otherwise, use constant pool + // mov $L, Rtmp (from constant pool) + // ldr (R+Rtmp), R + s := movesize(o.as) + if s < 0 { + c.ctxt.Diag("unexpected long move, op %v tab %v\n%v", p.As, o.as, p) + } + + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + + v := int32(c.regoff(&p.From)) + var hi int32 + if v < 0 || (v&((1<>uint(s))&0xFFF, REGTMP, int(p.To.Reg)) + break + + loadusepool: + if r == REGTMP || p.From.Reg == REGTMP { + c.ctxt.Diag("REGTMP used in large offset load: %v", p) + } + o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) + o2 = c.olsxrr(p, int32(c.opldrr(p, p.As, false)), int(p.To.Reg), r, REGTMP) + + case 32: /* mov $con, R -> movz/movn */ + o1 = c.omovconst(p.As, p, &p.From, int(p.To.Reg)) + + case 33: /* movk $uimm16 << pos */ + o1 = c.opirr(p, p.As) + + d := p.From.Offset + s := movcon(d) + if s < 0 || s >= 4 { + c.ctxt.Diag("bad constant for MOVK: %#x\n%v", uint64(d), p) + } + if (o1&S64) == 0 && s >= 2 { + c.ctxt.Diag("illegal bit position\n%v", p) + } + if ((d >> uint(s*16)) >> 16) != 0 { + c.ctxt.Diag("requires uimm16\n%v", p) + } + rt := int(p.To.Reg) + + o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31)) + + case 34: /* mov $lacon,R */ + o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) + + if o1 == 0 { + break + } + o2 = c.opxrrr(p, AADD, false) + o2 |= REGTMP & 31 << 16 + o2 |= LSL0_64 + r := int(p.From.Reg) + if r == 0 { + r = int(o.param) + } + o2 |= uint32(r&31) << 5 + o2 |= uint32(p.To.Reg & 31) + + case 35: /* mov SPR,R -> mrs */ + o1 = c.oprrr(p, AMRS) + + // SysRegEnc function returns the system register encoding and accessFlags. + _, v, accessFlags := SysRegEnc(p.From.Reg) + if v == 0 { + c.ctxt.Diag("illegal system register:\n%v", p) + } + if (o1 & (v &^ (3 << 19))) != 0 { + c.ctxt.Diag("MRS register value overlap\n%v", p) + } + if accessFlags&SR_READ == 0 { + c.ctxt.Diag("system register is not readable: %v", p) + } + + o1 |= v + o1 |= uint32(p.To.Reg & 31) + + case 36: /* mov R,SPR */ + o1 = c.oprrr(p, AMSR) + + // SysRegEnc function returns the system register encoding and accessFlags. + _, v, accessFlags := SysRegEnc(p.To.Reg) + if v == 0 { + c.ctxt.Diag("illegal system register:\n%v", p) + } + if (o1 & (v &^ (3 << 19))) != 0 { + c.ctxt.Diag("MSR register value overlap\n%v", p) + } + if accessFlags&SR_WRITE == 0 { + c.ctxt.Diag("system register is not writable: %v", p) + } + + o1 |= v + o1 |= uint32(p.From.Reg & 31) + + case 37: /* mov $con,PSTATEfield -> MSR [immediate] */ + if (uint64(p.From.Offset) &^ uint64(0xF)) != 0 { + c.ctxt.Diag("illegal immediate for PSTATE field\n%v", p) + } + o1 = c.opirr(p, AMSR) + o1 |= uint32((p.From.Offset & 0xF) << 8) /* Crm */ + v := uint32(0) + for i := 0; i < len(pstatefield); i++ { + if pstatefield[i].reg == p.To.Reg { + v = pstatefield[i].enc + break + } + } + + if v == 0 { + c.ctxt.Diag("illegal PSTATE field for immediate move\n%v", p) + } + o1 |= v + + case 38: /* clrex [$imm] */ + o1 = c.opimm(p, p.As) + + if p.To.Type == obj.TYPE_NONE { + o1 |= 0xF << 8 + } else { + o1 |= uint32((p.To.Offset & 0xF) << 8) + } + + case 39: /* cbz R, rel */ + o1 = c.opirr(p, p.As) + + o1 |= uint32(p.From.Reg & 31) + o1 |= uint32(c.brdist(p, 0, 19, 2) << 5) + + case 40: /* tbz */ + o1 = c.opirr(p, p.As) + + v := int32(p.From.Offset) + if v < 0 || v > 63 { + c.ctxt.Diag("illegal bit number\n%v", p) + } + o1 |= ((uint32(v) & 0x20) << (31 - 5)) | ((uint32(v) & 0x1F) << 19) + o1 |= uint32(c.brdist(p, 0, 14, 2) << 5) + o1 |= uint32(p.Reg & 31) + + case 41: /* eret, nop, others with no operands */ + o1 = c.op0(p, p.As) + + case 42: /* bfm R,r,s,R */ + o1 = c.opbfm(p, p.As, int(p.From.Offset), int(p.GetFrom3().Offset), int(p.Reg), int(p.To.Reg)) + + case 43: /* bfm aliases */ + r := int(p.From.Offset) + s := int(p.GetFrom3().Offset) + rf := int(p.Reg) + rt := int(p.To.Reg) + if rf == 0 { + rf = rt + } + switch p.As { + case ABFI: + if r != 0 { + r = 64 - r + } + o1 = c.opbfm(p, ABFM, r, s-1, rf, rt) + + case ABFIW: + if r != 0 { + r = 32 - r + } + o1 = c.opbfm(p, ABFMW, r, s-1, rf, rt) + + case ABFXIL: + o1 = c.opbfm(p, ABFM, r, r+s-1, rf, rt) + + case ABFXILW: + o1 = c.opbfm(p, ABFMW, r, r+s-1, rf, rt) + + case ASBFIZ: + if r != 0 { + r = 64 - r + } + o1 = c.opbfm(p, ASBFM, r, s-1, rf, rt) + + case ASBFIZW: + if r != 0 { + r = 32 - r + } + o1 = c.opbfm(p, ASBFMW, r, s-1, rf, rt) + + case ASBFX: + o1 = c.opbfm(p, ASBFM, r, r+s-1, rf, rt) + + case ASBFXW: + o1 = c.opbfm(p, ASBFMW, r, r+s-1, rf, rt) + + case AUBFIZ: + if r != 0 { + r = 64 - r + } + o1 = c.opbfm(p, AUBFM, r, s-1, rf, rt) + + case AUBFIZW: + if r != 0 { + r = 32 - r + } + o1 = c.opbfm(p, AUBFMW, r, s-1, rf, rt) + + case AUBFX: + o1 = c.opbfm(p, AUBFM, r, r+s-1, rf, rt) + + case AUBFXW: + o1 = c.opbfm(p, AUBFMW, r, r+s-1, rf, rt) + + default: + c.ctxt.Diag("bad bfm alias\n%v", p) + break + } + + case 44: /* extr $b, Rn, Rm, Rd */ + o1 = c.opextr(p, p.As, int32(p.From.Offset), int(p.GetFrom3().Reg), int(p.Reg), int(p.To.Reg)) + + case 45: /* sxt/uxt[bhw] R,R; movT R,R -> sxtT R,R */ + rf := int(p.From.Reg) + + rt := int(p.To.Reg) + as := p.As + if rf == REGZERO { + as = AMOVWU /* clearer in disassembly */ + } + switch as { + case AMOVB, ASXTB: + o1 = c.opbfm(p, ASBFM, 0, 7, rf, rt) + + case AMOVH, ASXTH: + o1 = c.opbfm(p, ASBFM, 0, 15, rf, rt) + + case AMOVW, ASXTW: + o1 = c.opbfm(p, ASBFM, 0, 31, rf, rt) + + case AMOVBU, AUXTB: + o1 = c.opbfm(p, AUBFM, 0, 7, rf, rt) + + case AMOVHU, AUXTH: + o1 = c.opbfm(p, AUBFM, 0, 15, rf, rt) + + case AMOVWU: + o1 = c.oprrr(p, as) | (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31) + + case AUXTW: + o1 = c.opbfm(p, AUBFM, 0, 31, rf, rt) + + case ASXTBW: + o1 = c.opbfm(p, ASBFMW, 0, 7, rf, rt) + + case ASXTHW: + o1 = c.opbfm(p, ASBFMW, 0, 15, rf, rt) + + case AUXTBW: + o1 = c.opbfm(p, AUBFMW, 0, 7, rf, rt) + + case AUXTHW: + o1 = c.opbfm(p, AUBFMW, 0, 15, rf, rt) + + default: + c.ctxt.Diag("bad sxt %v", as) + break + } + + case 46: /* cls */ + o1 = c.opbit(p, p.As) + + o1 |= uint32(p.From.Reg&31) << 5 + o1 |= uint32(p.To.Reg & 31) + + case 47: /* SWPx/LDADDx/LDANDx/LDEORx/LDORx Rs, (Rb), Rt */ + rs := p.From.Reg + rt := p.RegTo2 + rb := p.To.Reg + + fields := atomicInstructions[p.As] + // rt can't be sp. rt can't be r31 when field A is 0, A bit is the 23rd bit. + if rt == REG_RSP || (rt == REGZERO && (fields&(1<<23) == 0)) { + c.ctxt.Diag("illegal destination register: %v\n", p) + } + o1 |= fields | uint32(rs&31)<<16 | uint32(rb&31)<<5 | uint32(rt&31) + + case 48: /* ADD $C_ADDCON2, Rm, Rd */ + // NOTE: this case does not use REGTMP. If it ever does, + // remove the NOTUSETMP flag in optab. + op := c.opirr(p, p.As) + if op&Sbit != 0 { + c.ctxt.Diag("can not break addition/subtraction when S bit is set", p) + } + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + r = rt + } + o1 = c.oaddi(p, int32(op), int32(c.regoff(&p.From))&0x000fff, r, rt) + o2 = c.oaddi(p, int32(op), int32(c.regoff(&p.From))&0xfff000, rt, rt) + + case 50: /* sys/sysl */ + o1 = c.opirr(p, p.As) + + if (p.From.Offset &^ int64(SYSARG4(0x7, 0xF, 0xF, 0x7))) != 0 { + c.ctxt.Diag("illegal SYS argument\n%v", p) + } + o1 |= uint32(p.From.Offset) + if p.To.Type == obj.TYPE_REG { + o1 |= uint32(p.To.Reg & 31) + } else if p.Reg != 0 { + o1 |= uint32(p.Reg & 31) + } else { + o1 |= 0x1F + } + + case 51: /* dmb */ + o1 = c.opirr(p, p.As) + + if p.From.Type == obj.TYPE_CONST { + o1 |= uint32((p.From.Offset & 0xF) << 8) + } + + case 52: /* hint */ + o1 = c.opirr(p, p.As) + + o1 |= uint32((p.From.Offset & 0x7F) << 5) + + case 53: /* and/or/eor/bic/tst/... $bitcon, Rn, Rd */ + a := p.As + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + r := int(p.Reg) + if r == 0 { + r = rt + } + mode := 64 + v := uint64(p.From.Offset) + switch p.As { + case AANDW, AORRW, AEORW, AANDSW, ATSTW: + mode = 32 + case ABIC, AORN, AEON, ABICS: + v = ^v + case ABICW, AORNW, AEONW, ABICSW: + v = ^v + mode = 32 + } + o1 = c.opirr(p, a) + o1 |= bitconEncode(v, mode) | uint32(r&31)<<5 | uint32(rt&31) + + case 54: /* floating point arith */ + o1 = c.oprrr(p, p.As) + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if (o1&(0x1F<<24)) == (0x1E<<24) && (o1&(1<<11)) == 0 { /* monadic */ + r = rf + rf = 0 + } else if r == 0 { + r = rt + } + o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + + case 55: /* floating-point constant */ + var rf int + o1 = 0xf<<25 | 1<<21 | 1<<12 + rf = c.chipfloat7(p.From.Val.(float64)) + if rf < 0 { + c.ctxt.Diag("invalid floating-point immediate\n%v", p) + } + if p.As == AFMOVD { + o1 |= 1 << 22 + } + o1 |= (uint32(rf&0xff) << 13) | uint32(p.To.Reg&31) + + case 56: /* floating point compare */ + o1 = c.oprrr(p, p.As) + + var rf int + if p.From.Type == obj.TYPE_FCONST { + o1 |= 8 /* zero */ + rf = 0 + } else { + rf = int(p.From.Reg) + } + rt := int(p.Reg) + o1 |= uint32(rf&31)<<16 | uint32(rt&31)<<5 + + case 57: /* floating point conditional compare */ + o1 = c.oprrr(p, p.As) + + cond := int(p.From.Reg) + if cond < COND_EQ || cond > COND_NV { + c.ctxt.Diag("invalid condition\n%v", p) + } else { + cond -= COND_EQ + } + + nzcv := int(p.To.Offset) + if nzcv&^0xF != 0 { + c.ctxt.Diag("implausible condition\n%v", p) + } + rf := int(p.Reg) + if p.GetFrom3() == nil || p.GetFrom3().Reg < REG_F0 || p.GetFrom3().Reg > REG_F31 { + c.ctxt.Diag("illegal FCCMP\n%v", p) + break + } + rt := int(p.GetFrom3().Reg) + o1 |= uint32(rf&31)<<16 | uint32(cond&15)<<12 | uint32(rt&31)<<5 | uint32(nzcv) + + case 58: /* ldar/ldarb/ldarh/ldaxp/ldxp/ldaxr/ldxr */ + o1 = c.opload(p, p.As) + + o1 |= 0x1F << 16 + o1 |= uint32(p.From.Reg&31) << 5 + if p.As == ALDXP || p.As == ALDXPW || p.As == ALDAXP || p.As == ALDAXPW { + if int(p.To.Reg) == int(p.To.Offset) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + o1 |= uint32(p.To.Offset&31) << 10 + } else { + o1 |= 0x1F << 10 + } + o1 |= uint32(p.To.Reg & 31) + + case 59: /* stxr/stlxr/stxp/stlxp */ + s := p.RegTo2 + n := p.To.Reg + t := p.From.Reg + if isSTLXRop(p.As) { + if s == t || (s == n && n != REGSP) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + } else if isSTXPop(p.As) { + t2 := int16(p.From.Offset) + if (s == t || s == t2) || (s == n && n != REGSP) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + } + if s == REG_RSP { + c.ctxt.Diag("illegal destination register: %v\n", p) + } + o1 = c.opstore(p, p.As) + + if p.RegTo2 != obj.REG_NONE { + o1 |= uint32(p.RegTo2&31) << 16 + } else { + o1 |= 0x1F << 16 + } + if isSTXPop(p.As) { + o1 |= uint32(p.From.Offset&31) << 10 + } + o1 |= uint32(p.To.Reg&31)<<5 | uint32(p.From.Reg&31) + + case 60: /* adrp label,r */ + d := c.brdist(p, 12, 21, 0) + + o1 = ADR(1, uint32(d), uint32(p.To.Reg)) + + case 61: /* adr label, r */ + d := c.brdist(p, 0, 21, 0) + + o1 = ADR(0, uint32(d), uint32(p.To.Reg)) + + case 62: /* op $movcon, [R], R -> mov $movcon, REGTMP + op REGTMP, [R], R */ + if p.Reg == REGTMP { + c.ctxt.Diag("cannot use REGTMP as source: %v\n", p) + } + if isADDWop(p.As) || isANDWop(p.As) { + o1 = c.omovconst(AMOVW, p, &p.From, REGTMP) + } else { + o1 = c.omovconst(AMOVD, p, &p.From, REGTMP) + } + + rt := int(p.To.Reg) + if p.To.Type == obj.TYPE_NONE { + rt = REGZERO + } + r := int(p.Reg) + if r == 0 { + r = rt + } + if p.To.Reg == REGSP || r == REGSP { + o2 = c.opxrrr(p, p.As, false) + o2 |= REGTMP & 31 << 16 + o2 |= LSL0_64 + } else { + o2 = c.oprrr(p, p.As) + o2 |= REGTMP & 31 << 16 /* shift is 0 */ + } + o2 |= uint32(r&31) << 5 + o2 |= uint32(rt & 31) + + /* reloc ops */ + case 64: /* movT R,addr -> adrp + add + movT R, (REGTMP) */ + o1 = ADR(1, 0, REGTMP) + o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.To.Sym + rel.Add = p.To.Offset + rel.Type = objabi.R_ADDRARM64 + o3 = c.olsr12u(p, int32(c.opstr12(p, p.As)), 0, REGTMP, int(p.From.Reg)) + + case 65: /* movT addr,R -> adrp + add + movT (REGTMP), R */ + o1 = ADR(1, 0, REGTMP) + o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Add = p.From.Offset + rel.Type = objabi.R_ADDRARM64 + o3 = c.olsr12u(p, int32(c.opldr12(p, p.As)), 0, REGTMP, int(p.To.Reg)) + + case 66: /* ldp O(R)!, (r1, r2); ldp (R)O!, (r1, r2) */ + v := int32(c.regoff(&p.From)) + r := int(p.From.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid ldp source: %v\n", p) + } + o1 |= c.opldpstp(p, o, v, uint32(r), uint32(p.To.Reg), uint32(p.To.Offset), 1) + + case 67: /* stp (r1, r2), O(R)!; stp (r1, r2), (R)O! */ + r := int(p.To.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid stp destination: %v\n", p) + } + v := int32(c.regoff(&p.To)) + o1 = c.opldpstp(p, o, v, uint32(r), uint32(p.From.Reg), uint32(p.From.Offset), 0) + + case 68: /* movT $vconaddr(SB), reg -> adrp + add + reloc */ + // NOTE: this case does not use REGTMP. If it ever does, + // remove the NOTUSETMP flag in optab. + if p.As == AMOVW { + c.ctxt.Diag("invalid load of 32-bit address: %v", p) + } + o1 = ADR(1, 0, uint32(p.To.Reg)) + o2 = c.opirr(p, AADD) | uint32(p.To.Reg&31)<<5 | uint32(p.To.Reg&31) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Add = p.From.Offset + rel.Type = objabi.R_ADDRARM64 + + case 69: /* LE model movd $tlsvar, reg -> movz reg, 0 + reloc */ + o1 = c.opirr(p, AMOVZ) + o1 |= uint32(p.To.Reg & 31) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.From.Sym + rel.Type = objabi.R_ARM64_TLS_LE + if p.From.Offset != 0 { + c.ctxt.Diag("invalid offset on MOVW $tlsvar") + } + + case 70: /* IE model movd $tlsvar, reg -> adrp REGTMP, 0; ldr reg, [REGTMP, #0] + relocs */ + o1 = ADR(1, 0, REGTMP) + o2 = c.olsr12u(p, int32(c.opldr12(p, AMOVD)), 0, REGTMP, int(p.To.Reg)) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Add = 0 + rel.Type = objabi.R_ARM64_TLS_IE + if p.From.Offset != 0 { + c.ctxt.Diag("invalid offset on MOVW $tlsvar") + } + + case 71: /* movd sym@GOT, reg -> adrp REGTMP, #0; ldr reg, [REGTMP, #0] + relocs */ + o1 = ADR(1, 0, REGTMP) + o2 = c.olsr12u(p, int32(c.opldr12(p, AMOVD)), 0, REGTMP, int(p.To.Reg)) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Add = 0 + rel.Type = objabi.R_ARM64_GOTPCREL + + case 72: /* vaddp/vand/vcmeq/vorr/vadd/veor/vfmla/vfmls/vbit/vbsl/vcmtst/vsub/vbif/vuzip1/vuzip2 Vm., Vn., Vd. */ + af := int((p.From.Reg >> 5) & 15) + af3 := int((p.Reg >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + if af != af3 || af != at { + c.ctxt.Diag("operand mismatch: %v", p) + break + } + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + r := int((p.Reg) & 31) + + Q := 0 + size := 0 + switch af { + case ARNG_16B: + Q = 1 + size = 0 + case ARNG_2D: + Q = 1 + size = 3 + case ARNG_2S: + Q = 0 + size = 2 + case ARNG_4H: + Q = 0 + size = 1 + case ARNG_4S: + Q = 1 + size = 2 + case ARNG_8B: + Q = 0 + size = 0 + case ARNG_8H: + Q = 1 + size = 1 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + + switch p.As { + case AVORR, AVAND, AVEOR, AVBIT, AVBSL, AVBIF: + if af != ARNG_16B && af != ARNG_8B { + c.ctxt.Diag("invalid arrangement: %v", p) + } + case AVFMLA, AVFMLS: + if af != ARNG_2D && af != ARNG_2S && af != ARNG_4S { + c.ctxt.Diag("invalid arrangement: %v", p) + } + } + switch p.As { + case AVAND, AVEOR: + size = 0 + case AVBSL: + size = 1 + case AVORR, AVBIT, AVBIF: + size = 2 + case AVFMLA, AVFMLS: + if af == ARNG_2D { + size = 1 + } else { + size = 0 + } + } + + o1 |= (uint32(Q&1) << 30) | (uint32(size&3) << 22) | (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + + case 73: /* vmov V.[index], R */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + imm5 := 0 + o1 = 7<<25 | 0xf<<10 + index := int(p.From.Index) + switch (p.From.Reg >> 5) & 15 { + case ARNG_B: + c.checkindex(p, index, 15) + imm5 |= 1 + imm5 |= index << 1 + case ARNG_H: + c.checkindex(p, index, 7) + imm5 |= 2 + imm5 |= index << 2 + case ARNG_S: + c.checkindex(p, index, 3) + imm5 |= 4 + imm5 |= index << 3 + case ARNG_D: + c.checkindex(p, index, 1) + imm5 |= 8 + imm5 |= index << 4 + o1 |= 1 << 30 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + o1 |= (uint32(imm5&0x1f) << 16) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 74: + // add $O, R, Rtmp or sub $O, R, Rtmp + // ldp (Rtmp), (R1, R2) + r := int(p.From.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid ldp source: %v", p) + } + v := int32(c.regoff(&p.From)) + + if v > 0 { + if v > 4095 { + c.ctxt.Diag("offset out of range: %v", p) + } + o1 = c.oaddi(p, int32(c.opirr(p, AADD)), v, r, REGTMP) + } + if v < 0 { + if v < -4095 { + c.ctxt.Diag("offset out of range: %v", p) + } + o1 = c.oaddi(p, int32(c.opirr(p, ASUB)), -v, r, REGTMP) + } + o2 |= c.opldpstp(p, o, 0, uint32(REGTMP), uint32(p.To.Reg), uint32(p.To.Offset), 1) + + case 75: + // mov $L, Rtmp (from constant pool) + // add Rtmp, R, Rtmp + // ldp (Rtmp), (R1, R2) + r := int(p.From.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid ldp source: %v", p) + } + o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) + o2 = c.opxrrr(p, AADD, false) + o2 |= (REGTMP & 31) << 16 + o2 |= uint32(r&31) << 5 + o2 |= uint32(REGTMP & 31) + o3 |= c.opldpstp(p, o, 0, uint32(REGTMP), uint32(p.To.Reg), uint32(p.To.Offset), 1) + + case 76: + // add $O, R, Rtmp or sub $O, R, Rtmp + // stp (R1, R2), (Rtmp) + r := int(p.To.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid stp destination: %v", p) + } + v := int32(c.regoff(&p.To)) + if v > 0 { + if v > 4095 { + c.ctxt.Diag("offset out of range: %v", p) + } + o1 = c.oaddi(p, int32(c.opirr(p, AADD)), v, r, REGTMP) + } + if v < 0 { + if v < -4095 { + c.ctxt.Diag("offset out of range: %v", p) + } + o1 = c.oaddi(p, int32(c.opirr(p, ASUB)), -v, r, REGTMP) + } + o2 |= c.opldpstp(p, o, 0, uint32(REGTMP), uint32(p.From.Reg), uint32(p.From.Offset), 0) + + case 77: + // mov $L, Rtmp (from constant pool) + // add Rtmp, R, Rtmp + // stp (R1, R2), (Rtmp) + r := int(p.To.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid stp destination: %v", p) + } + o1 = c.omovlit(AMOVD, p, &p.To, REGTMP) + o2 = c.opxrrr(p, AADD, false) + o2 |= REGTMP & 31 << 16 + o2 |= uint32(r&31) << 5 + o2 |= uint32(REGTMP & 31) + o3 |= c.opldpstp(p, o, 0, uint32(REGTMP), uint32(p.From.Reg), uint32(p.From.Offset), 0) + + case 78: /* vmov R, V.[index] */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + imm5 := 0 + o1 = 1<<30 | 7<<25 | 7<<10 + index := int(p.To.Index) + switch (p.To.Reg >> 5) & 15 { + case ARNG_B: + c.checkindex(p, index, 15) + imm5 |= 1 + imm5 |= index << 1 + case ARNG_H: + c.checkindex(p, index, 7) + imm5 |= 2 + imm5 |= index << 2 + case ARNG_S: + c.checkindex(p, index, 3) + imm5 |= 4 + imm5 |= index << 3 + case ARNG_D: + c.checkindex(p, index, 1) + imm5 |= 8 + imm5 |= index << 4 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + o1 |= (uint32(imm5&0x1f) << 16) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 79: /* vdup Vn.[index], Vd. */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + o1 = 7<<25 | 1<<10 + var imm5, Q int + index := int(p.From.Index) + switch (p.To.Reg >> 5) & 15 { + case ARNG_16B: + c.checkindex(p, index, 15) + Q = 1 + imm5 = 1 + imm5 |= index << 1 + case ARNG_2D: + c.checkindex(p, index, 1) + Q = 1 + imm5 = 8 + imm5 |= index << 4 + case ARNG_2S: + c.checkindex(p, index, 3) + Q = 0 + imm5 = 4 + imm5 |= index << 3 + case ARNG_4H: + c.checkindex(p, index, 7) + Q = 0 + imm5 = 2 + imm5 |= index << 2 + case ARNG_4S: + c.checkindex(p, index, 3) + Q = 1 + imm5 = 4 + imm5 |= index << 3 + case ARNG_8B: + c.checkindex(p, index, 15) + Q = 0 + imm5 = 1 + imm5 |= index << 1 + case ARNG_8H: + c.checkindex(p, index, 7) + Q = 1 + imm5 = 2 + imm5 |= index << 2 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + o1 |= (uint32(Q&1) << 30) | (uint32(imm5&0x1f) << 16) + o1 |= (uint32(rf&31) << 5) | uint32(rt&31) + + case 80: /* vmov V.[index], Vn */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + imm5 := 0 + index := int(p.From.Index) + switch p.As { + case AVMOV: + o1 = 1<<30 | 15<<25 | 1<<10 + switch (p.From.Reg >> 5) & 15 { + case ARNG_B: + c.checkindex(p, index, 15) + imm5 |= 1 + imm5 |= index << 1 + case ARNG_H: + c.checkindex(p, index, 7) + imm5 |= 2 + imm5 |= index << 2 + case ARNG_S: + c.checkindex(p, index, 3) + imm5 |= 4 + imm5 |= index << 3 + case ARNG_D: + c.checkindex(p, index, 1) + imm5 |= 8 + imm5 |= index << 4 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + default: + c.ctxt.Diag("unsupported op %v", p.As) + } + o1 |= (uint32(imm5&0x1f) << 16) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 81: /* vld[1-4]|vld[1-4]r (Rn), [Vt1., Vt2., ...] */ + c.checkoffset(p, p.As) + r := int(p.From.Reg) + o1 = c.oprrr(p, p.As) + if o.scond == C_XPOST { + o1 |= 1 << 23 + if p.From.Index == 0 { + // immediate offset variant + o1 |= 0x1f << 16 + } else { + // register offset variant + if isRegShiftOrExt(&p.From) { + c.ctxt.Diag("invalid extended register op: %v\n", p) + } + o1 |= uint32(p.From.Index&0x1f) << 16 + } + } + o1 |= uint32(p.To.Offset) + // cmd/asm/internal/arch/arm64.go:ARM64RegisterListOffset + // add opcode(bit 12-15) for vld1, mask it off if it's not vld1 + o1 = c.maskOpvldvst(p, o1) + o1 |= uint32(r&31) << 5 + + case 82: /* vmov Rn, Vd. */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + o1 = 7<<25 | 3<<10 + var imm5, Q uint32 + switch (p.To.Reg >> 5) & 15 { + case ARNG_16B: + Q = 1 + imm5 = 1 + case ARNG_2D: + Q = 1 + imm5 = 8 + case ARNG_2S: + Q = 0 + imm5 = 4 + case ARNG_4H: + Q = 0 + imm5 = 2 + case ARNG_4S: + Q = 1 + imm5 = 4 + case ARNG_8B: + Q = 0 + imm5 = 1 + case ARNG_8H: + Q = 1 + imm5 = 2 + default: + c.ctxt.Diag("invalid arrangement on VMOV Rn, Vd.: %v\n", p) + } + o1 |= (Q & 1 << 30) | (imm5 & 0x1f << 16) + o1 |= (uint32(rf&31) << 5) | uint32(rt&31) + + case 83: /* vmov Vn., Vd. */ + af := int((p.From.Reg >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + if af != at { + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + + var Q, size uint32 + switch af { + case ARNG_8B: + Q = 0 + size = 0 + case ARNG_16B: + Q = 1 + size = 0 + case ARNG_4H: + Q = 0 + size = 1 + case ARNG_8H: + Q = 1 + size = 1 + case ARNG_2S: + Q = 0 + size = 2 + case ARNG_4S: + Q = 1 + size = 2 + default: + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + + if (p.As == AVMOV || p.As == AVRBIT || p.As == AVCNT) && (af != ARNG_16B && af != ARNG_8B) { + c.ctxt.Diag("invalid arrangement: %v", p) + } + + if p.As == AVREV32 && (af == ARNG_2S || af == ARNG_4S) { + c.ctxt.Diag("invalid arrangement: %v", p) + } + + if p.As == AVREV16 && af != ARNG_8B && af != ARNG_16B { + c.ctxt.Diag("invalid arrangement: %v", p) + } + + if p.As == AVMOV { + o1 |= uint32(rf&31) << 16 + } + + if p.As == AVRBIT { + size = 1 + } + + o1 |= (Q&1)<<30 | (size&3)<<22 | uint32(rf&31)<<5 | uint32(rt&31) + + case 84: /* vst[1-4] [Vt1., Vt2., ...], (Rn) */ + c.checkoffset(p, p.As) + r := int(p.To.Reg) + o1 = 3 << 26 + if o.scond == C_XPOST { + o1 |= 1 << 23 + if p.To.Index == 0 { + // immediate offset variant + o1 |= 0x1f << 16 + } else { + // register offset variant + if isRegShiftOrExt(&p.To) { + c.ctxt.Diag("invalid extended register: %v\n", p) + } + o1 |= uint32(p.To.Index&31) << 16 + } + } + o1 |= uint32(p.From.Offset) + // cmd/asm/internal/arch/arm64.go:ARM64RegisterListOffset + // add opcode(bit 12-15) for vst1, mask it off if it's not vst1 + o1 = c.maskOpvldvst(p, o1) + o1 |= uint32(r&31) << 5 + + case 85: /* vaddv/vuaddlv Vn., Vd*/ + af := int((p.From.Reg >> 5) & 15) + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + Q := 0 + size := 0 + switch af { + case ARNG_8B: + Q = 0 + size = 0 + case ARNG_16B: + Q = 1 + size = 0 + case ARNG_4H: + Q = 0 + size = 1 + case ARNG_8H: + Q = 1 + size = 1 + case ARNG_4S: + Q = 1 + size = 2 + default: + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + o1 |= (uint32(Q&1) << 30) | (uint32(size&3) << 22) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 86: /* vmovi $imm8, Vd.*/ + at := int((p.To.Reg >> 5) & 15) + r := int(p.From.Offset) + if r > 255 || r < 0 { + c.ctxt.Diag("immediate constant out of range: %v\n", p) + } + rt := int((p.To.Reg) & 31) + Q := 0 + switch at { + case ARNG_8B: + Q = 0 + case ARNG_16B: + Q = 1 + default: + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + o1 = 0xf<<24 | 0xe<<12 | 1<<10 + o1 |= (uint32(Q&1) << 30) | (uint32((r>>5)&7) << 16) | (uint32(r&0x1f) << 5) | uint32(rt&31) + + case 87: /* stp (r,r), addr(SB) -> adrp + add + stp */ + o1 = ADR(1, 0, REGTMP) + o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.To.Sym + rel.Add = p.To.Offset + rel.Type = objabi.R_ADDRARM64 + o3 |= c.opldpstp(p, o, 0, uint32(REGTMP), uint32(p.From.Reg), uint32(p.From.Offset), 0) + + case 88: /* ldp addr(SB), (r,r) -> adrp + add + ldp */ + o1 = ADR(1, 0, REGTMP) + o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Add = p.From.Offset + rel.Type = objabi.R_ADDRARM64 + o3 |= c.opldpstp(p, o, 0, uint32(REGTMP), uint32(p.To.Reg), uint32(p.To.Offset), 1) + + case 89: /* vadd/vsub Vm, Vn, Vd */ + switch p.As { + case AVADD: + o1 = 5<<28 | 7<<25 | 7<<21 | 1<<15 | 1<<10 + + case AVSUB: + o1 = 7<<28 | 7<<25 | 7<<21 | 1<<15 | 1<<10 + + default: + c.ctxt.Diag("bad opcode: %v\n", p) + break + } + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + r = rt + } + o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + + // This is supposed to be something that stops execution. + // It's not supposed to be reached, ever, but if it is, we'd + // like to be able to tell how we got there. Assemble as + // 0xbea71700 which is guaranteed to raise undefined instruction + // exception. + case 90: + o1 = 0xbea71700 + + case 91: /* prfm imm(Rn), */ + imm := uint32(p.From.Offset) + r := p.From.Reg + v := uint32(0xff) + if p.To.Type == obj.TYPE_CONST { + v = uint32(p.To.Offset) + if v > 31 { + c.ctxt.Diag("illegal prefetch operation\n%v", p) + } + } else { + for i := 0; i < len(prfopfield); i++ { + if prfopfield[i].reg == p.To.Reg { + v = prfopfield[i].enc + break + } + } + if v == 0xff { + c.ctxt.Diag("illegal prefetch operation:\n%v", p) + } + } + + o1 = c.opldrpp(p, p.As) + o1 |= (uint32(r&31) << 5) | (uint32((imm>>3)&0xfff) << 10) | (uint32(v & 31)) + + case 92: /* vmov Vn.[index], Vd.[index] */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + imm4 := 0 + imm5 := 0 + o1 = 3<<29 | 7<<25 | 1<<10 + index1 := int(p.To.Index) + index2 := int(p.From.Index) + if ((p.To.Reg >> 5) & 15) != ((p.From.Reg >> 5) & 15) { + c.ctxt.Diag("operand mismatch: %v", p) + } + switch (p.To.Reg >> 5) & 15 { + case ARNG_B: + c.checkindex(p, index1, 15) + c.checkindex(p, index2, 15) + imm5 |= 1 + imm5 |= index1 << 1 + imm4 |= index2 + case ARNG_H: + c.checkindex(p, index1, 7) + c.checkindex(p, index2, 7) + imm5 |= 2 + imm5 |= index1 << 2 + imm4 |= index2 << 1 + case ARNG_S: + c.checkindex(p, index1, 3) + c.checkindex(p, index2, 3) + imm5 |= 4 + imm5 |= index1 << 3 + imm4 |= index2 << 2 + case ARNG_D: + c.checkindex(p, index1, 1) + c.checkindex(p, index2, 1) + imm5 |= 8 + imm5 |= index1 << 4 + imm4 |= index2 << 3 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + o1 |= (uint32(imm5&0x1f) << 16) | (uint32(imm4&0xf) << 11) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 93: /* vpmull{2} Vm., Vn., Vd */ + af := int((p.From.Reg >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + a := int((p.Reg >> 5) & 15) + + var Q, size uint32 + if p.As == AVPMULL { + Q = 0 + } else { + Q = 1 + } + + var fArng int + switch at { + case ARNG_8H: + if Q == 0 { + fArng = ARNG_8B + } else { + fArng = ARNG_16B + } + size = 0 + case ARNG_1Q: + if Q == 0 { + fArng = ARNG_1D + } else { + fArng = ARNG_2D + } + size = 3 + default: + c.ctxt.Diag("invalid arrangement on Vd.: %v", p) + } + + if af != a || af != fArng { + c.ctxt.Diag("invalid arrangement: %v", p) + } + + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + r := int((p.Reg) & 31) + + o1 |= ((Q & 1) << 30) | ((size & 3) << 22) | (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + + case 94: /* vext $imm4, Vm., Vn., Vd. */ + af := int(((p.GetFrom3().Reg) >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + a := int((p.Reg >> 5) & 15) + index := int(p.From.Offset) + + if af != a || af != at { + c.ctxt.Diag("invalid arrangement: %v", p) + break + } + + var Q uint32 + var b int + if af == ARNG_8B { + Q = 0 + b = 7 + } else if af == ARNG_16B { + Q = 1 + b = 15 + } else { + c.ctxt.Diag("invalid arrangement, should be B8 or B16: %v", p) + break + } + + if index < 0 || index > b { + c.ctxt.Diag("illegal offset: %v", p) + } + + o1 = c.opirr(p, p.As) + rf := int((p.GetFrom3().Reg) & 31) + rt := int((p.To.Reg) & 31) + r := int((p.Reg) & 31) + + o1 |= ((Q & 1) << 30) | (uint32(r&31) << 16) | (uint32(index&15) << 11) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 95: /* vushr $shift, Vn., Vd. */ + at := int((p.To.Reg >> 5) & 15) + af := int((p.Reg >> 5) & 15) + shift := int(p.From.Offset) + + if af != at { + c.ctxt.Diag("invalid arrangement on op Vn., Vd.: %v", p) + } + + var Q uint32 + var imax, esize int + + switch af { + case ARNG_8B, ARNG_4H, ARNG_2S: + Q = 0 + case ARNG_16B, ARNG_8H, ARNG_4S, ARNG_2D: + Q = 1 + default: + c.ctxt.Diag("invalid arrangement on op Vn., Vd.: %v", p) + } + + switch af { + case ARNG_8B, ARNG_16B: + imax = 15 + esize = 8 + case ARNG_4H, ARNG_8H: + imax = 31 + esize = 16 + case ARNG_2S, ARNG_4S: + imax = 63 + esize = 32 + case ARNG_2D: + imax = 127 + esize = 64 + } + + imm := 0 + + switch p.As { + case AVUSHR, AVSRI: + imm = esize*2 - shift + if imm < esize || imm > imax { + c.ctxt.Diag("shift out of range: %v", p) + } + case AVSHL: + imm = esize + shift + if imm > imax { + c.ctxt.Diag("shift out of range: %v", p) + } + default: + c.ctxt.Diag("invalid instruction %v\n", p) + } + + o1 = c.opirr(p, p.As) + rt := int((p.To.Reg) & 31) + rf := int((p.Reg) & 31) + + o1 |= ((Q & 1) << 30) | (uint32(imm&127) << 16) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 96: /* vst1 Vt1.[index], offset(Rn) */ + af := int((p.From.Reg >> 5) & 15) + rt := int((p.From.Reg) & 31) + rf := int((p.To.Reg) & 31) + r := int(p.To.Index & 31) + index := int(p.From.Index) + offset := int32(c.regoff(&p.To)) + + if o.scond == C_XPOST { + if (p.To.Index != 0) && (offset != 0) { + c.ctxt.Diag("invalid offset: %v", p) + } + if p.To.Index == 0 && offset == 0 { + c.ctxt.Diag("invalid offset: %v", p) + } + } + + if offset != 0 { + r = 31 + } + + var Q, S, size int + var opcode uint32 + switch af { + case ARNG_B: + c.checkindex(p, index, 15) + if o.scond == C_XPOST && offset != 0 && offset != 1 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index >> 3 + S = (index >> 2) & 1 + size = index & 3 + opcode = 0 + case ARNG_H: + c.checkindex(p, index, 7) + if o.scond == C_XPOST && offset != 0 && offset != 2 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index >> 2 + S = (index >> 1) & 1 + size = (index & 1) << 1 + opcode = 2 + case ARNG_S: + c.checkindex(p, index, 3) + if o.scond == C_XPOST && offset != 0 && offset != 4 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index >> 1 + S = index & 1 + size = 0 + opcode = 4 + case ARNG_D: + c.checkindex(p, index, 1) + if o.scond == C_XPOST && offset != 0 && offset != 8 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index + S = 0 + size = 1 + opcode = 4 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + + if o.scond == C_XPOST { + o1 |= 27 << 23 + } else { + o1 |= 26 << 23 + } + + o1 |= (uint32(Q&1) << 30) | (uint32(r&31) << 16) | ((opcode & 7) << 13) | (uint32(S&1) << 12) | (uint32(size&3) << 10) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 97: /* vld1 offset(Rn), vt.[index] */ + at := int((p.To.Reg >> 5) & 15) + rt := int((p.To.Reg) & 31) + rf := int((p.From.Reg) & 31) + r := int(p.From.Index & 31) + index := int(p.To.Index) + offset := int32(c.regoff(&p.From)) + + if o.scond == C_XPOST { + if (p.From.Index != 0) && (offset != 0) { + c.ctxt.Diag("invalid offset: %v", p) + } + if p.From.Index == 0 && offset == 0 { + c.ctxt.Diag("invalid offset: %v", p) + } + } + + if offset != 0 { + r = 31 + } + + Q := 0 + S := 0 + size := 0 + var opcode uint32 + switch at { + case ARNG_B: + c.checkindex(p, index, 15) + if o.scond == C_XPOST && offset != 0 && offset != 1 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index >> 3 + S = (index >> 2) & 1 + size = index & 3 + opcode = 0 + case ARNG_H: + c.checkindex(p, index, 7) + if o.scond == C_XPOST && offset != 0 && offset != 2 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index >> 2 + S = (index >> 1) & 1 + size = (index & 1) << 1 + opcode = 2 + case ARNG_S: + c.checkindex(p, index, 3) + if o.scond == C_XPOST && offset != 0 && offset != 4 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index >> 1 + S = index & 1 + size = 0 + opcode = 4 + case ARNG_D: + c.checkindex(p, index, 1) + if o.scond == C_XPOST && offset != 0 && offset != 8 { + c.ctxt.Diag("invalid offset: %v", p) + } + Q = index + S = 0 + size = 1 + opcode = 4 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + + if o.scond == C_XPOST { + o1 |= 110 << 21 + } else { + o1 |= 106 << 21 + } + + o1 |= (uint32(Q&1) << 30) | (uint32(r&31) << 16) | ((opcode & 7) << 13) | (uint32(S&1) << 12) | (uint32(size&3) << 10) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 98: /* MOVD (Rn)(Rm.SXTW[<, [Vt1., Vt2., ...], Vd. */ + af := int((p.From.Reg >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + if af != at { + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + var q, len uint32 + switch af { + case ARNG_8B: + q = 0 + case ARNG_16B: + q = 1 + default: + c.ctxt.Diag("invalid arrangement: %v", p) + } + rf := int(p.From.Reg) + rt := int(p.To.Reg) + offset := int(p.GetFrom3().Offset) + opcode := (offset >> 12) & 15 + switch opcode { + case 0x7: + len = 0 // one register + case 0xa: + len = 1 // two register + case 0x6: + len = 2 // three registers + case 0x2: + len = 3 // four registers + default: + c.ctxt.Diag("invalid register numbers in ARM64 register list: %v", p) + } + o1 = q<<30 | 0xe<<24 | len<<13 + o1 |= (uint32(rf&31) << 16) | uint32(offset&31)<<5 | uint32(rt&31) + + case 101: // FOMVQ/FMOVD $vcon, Vd -> load from constant pool. + o1 = c.omovlit(p.As, p, &p.From, int(p.To.Reg)) + + case 102: /* vushll, vushll2, vuxtl, vuxtl2 */ + o1 = c.opirr(p, p.As) + rf := p.Reg + af := uint8((p.Reg >> 5) & 15) + at := uint8((p.To.Reg >> 5) & 15) + shift := int(p.From.Offset) + if p.As == AVUXTL || p.As == AVUXTL2 { + rf = p.From.Reg + af = uint8((p.From.Reg >> 5) & 15) + shift = 0 + } + + pack := func(q, x, y uint8) uint32 { + return uint32(q)<<16 | uint32(x)<<8 | uint32(y) + } + + var Q uint8 = uint8(o1>>30) & 1 + var immh, width uint8 + switch pack(Q, af, at) { + case pack(0, ARNG_8B, ARNG_8H): + immh, width = 1, 8 + case pack(1, ARNG_16B, ARNG_8H): + immh, width = 1, 8 + case pack(0, ARNG_4H, ARNG_4S): + immh, width = 2, 16 + case pack(1, ARNG_8H, ARNG_4S): + immh, width = 2, 16 + case pack(0, ARNG_2S, ARNG_2D): + immh, width = 4, 32 + case pack(1, ARNG_4S, ARNG_2D): + immh, width = 4, 32 + default: + c.ctxt.Diag("operand mismatch: %v\n", p) + } + if !(0 <= shift && shift <= int(width-1)) { + c.ctxt.Diag("shift amount out of range: %v\n", p) + } + o1 |= uint32(immh)<<19 | uint32(shift)<<16 | uint32(rf&31)<<5 | uint32(p.To.Reg&31) + } + out[0] = o1 + out[1] = o2 + out[2] = o3 + out[3] = o4 + out[4] = o5 +} + +/* + * basic Rm op Rn -> Rd (using shifted register with 0) + * also op Rn -> Rt + * also Rm*Rn op Ra -> Rd + * also Vm op Vn -> Vd + */ +func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { + switch a { + case AADC: + return S64 | 0<<30 | 0<<29 | 0xd0<<21 | 0<<10 + + case AADCW: + return S32 | 0<<30 | 0<<29 | 0xd0<<21 | 0<<10 + + case AADCS: + return S64 | 0<<30 | 1<<29 | 0xd0<<21 | 0<<10 + + case AADCSW: + return S32 | 0<<30 | 1<<29 | 0xd0<<21 | 0<<10 + + case ANGC, ASBC: + return S64 | 1<<30 | 0<<29 | 0xd0<<21 | 0<<10 + + case ANGCS, ASBCS: + return S64 | 1<<30 | 1<<29 | 0xd0<<21 | 0<<10 + + case ANGCW, ASBCW: + return S32 | 1<<30 | 0<<29 | 0xd0<<21 | 0<<10 + + case ANGCSW, ASBCSW: + return S32 | 1<<30 | 1<<29 | 0xd0<<21 | 0<<10 + + case AADD: + return S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case AADDW: + return S32 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case ACMN, AADDS: + return S64 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case ACMNW, AADDSW: + return S32 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case ASUB: + return S64 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case ASUBW: + return S32 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case ACMP, ASUBS: + return S64 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case ACMPW, ASUBSW: + return S32 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 0<<21 | 0<<10 + + case AAND: + return S64 | 0<<29 | 0xA<<24 + + case AANDW: + return S32 | 0<<29 | 0xA<<24 + + case AMOVD, AORR: + return S64 | 1<<29 | 0xA<<24 + + // case AMOVW: + case AMOVWU, AORRW: + return S32 | 1<<29 | 0xA<<24 + + case AEOR: + return S64 | 2<<29 | 0xA<<24 + + case AEORW: + return S32 | 2<<29 | 0xA<<24 + + case AANDS, ATST: + return S64 | 3<<29 | 0xA<<24 + + case AANDSW, ATSTW: + return S32 | 3<<29 | 0xA<<24 + + case ABIC: + return S64 | 0<<29 | 0xA<<24 | 1<<21 + + case ABICW: + return S32 | 0<<29 | 0xA<<24 | 1<<21 + + case ABICS: + return S64 | 3<<29 | 0xA<<24 | 1<<21 + + case ABICSW: + return S32 | 3<<29 | 0xA<<24 | 1<<21 + + case AEON: + return S64 | 2<<29 | 0xA<<24 | 1<<21 + + case AEONW: + return S32 | 2<<29 | 0xA<<24 | 1<<21 + + case AMVN, AORN: + return S64 | 1<<29 | 0xA<<24 | 1<<21 + + case AMVNW, AORNW: + return S32 | 1<<29 | 0xA<<24 | 1<<21 + + case AASR: + return S64 | OPDP2(10) /* also ASRV */ + + case AASRW: + return S32 | OPDP2(10) + + case ALSL: + return S64 | OPDP2(8) + + case ALSLW: + return S32 | OPDP2(8) + + case ALSR: + return S64 | OPDP2(9) + + case ALSRW: + return S32 | OPDP2(9) + + case AROR: + return S64 | OPDP2(11) + + case ARORW: + return S32 | OPDP2(11) + + case ACCMN: + return S64 | 0<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4 /* cond<<12 | nzcv<<0 */ + + case ACCMNW: + return S32 | 0<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4 + + case ACCMP: + return S64 | 1<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4 /* imm5<<16 | cond<<12 | nzcv<<0 */ + + case ACCMPW: + return S32 | 1<<30 | 1<<29 | 0xD2<<21 | 0<<11 | 0<<10 | 0<<4 + + case ACRC32B: + return S32 | OPDP2(16) + + case ACRC32H: + return S32 | OPDP2(17) + + case ACRC32W: + return S32 | OPDP2(18) + + case ACRC32X: + return S64 | OPDP2(19) + + case ACRC32CB: + return S32 | OPDP2(20) + + case ACRC32CH: + return S32 | OPDP2(21) + + case ACRC32CW: + return S32 | OPDP2(22) + + case ACRC32CX: + return S64 | OPDP2(23) + + case ACSEL: + return S64 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10 + + case ACSELW: + return S32 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10 + + case ACSET: + return S64 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10 + + case ACSETW: + return S32 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10 + + case ACSETM: + return S64 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10 + + case ACSETMW: + return S32 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10 + + case ACINC, ACSINC: + return S64 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10 + + case ACINCW, ACSINCW: + return S32 | 0<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10 + + case ACINV, ACSINV: + return S64 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10 + + case ACINVW, ACSINVW: + return S32 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 0<<10 + + case ACNEG, ACSNEG: + return S64 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10 + + case ACNEGW, ACSNEGW: + return S32 | 1<<30 | 0<<29 | 0xD4<<21 | 0<<11 | 1<<10 + + case AMUL, AMADD: + return S64 | 0<<29 | 0x1B<<24 | 0<<21 | 0<<15 + + case AMULW, AMADDW: + return S32 | 0<<29 | 0x1B<<24 | 0<<21 | 0<<15 + + case AMNEG, AMSUB: + return S64 | 0<<29 | 0x1B<<24 | 0<<21 | 1<<15 + + case AMNEGW, AMSUBW: + return S32 | 0<<29 | 0x1B<<24 | 0<<21 | 1<<15 + + case AMRS: + return SYSOP(1, 2, 0, 0, 0, 0, 0) + + case AMSR: + return SYSOP(0, 2, 0, 0, 0, 0, 0) + + case ANEG: + return S64 | 1<<30 | 0<<29 | 0xB<<24 | 0<<21 + + case ANEGW: + return S32 | 1<<30 | 0<<29 | 0xB<<24 | 0<<21 + + case ANEGS: + return S64 | 1<<30 | 1<<29 | 0xB<<24 | 0<<21 + + case ANEGSW: + return S32 | 1<<30 | 1<<29 | 0xB<<24 | 0<<21 + + case AREM, ASDIV: + return S64 | OPDP2(3) + + case AREMW, ASDIVW: + return S32 | OPDP2(3) + + case ASMULL, ASMADDL: + return OPDP3(1, 0, 1, 0) + + case ASMNEGL, ASMSUBL: + return OPDP3(1, 0, 1, 1) + + case ASMULH: + return OPDP3(1, 0, 2, 0) + + case AUMULL, AUMADDL: + return OPDP3(1, 0, 5, 0) + + case AUMNEGL, AUMSUBL: + return OPDP3(1, 0, 5, 1) + + case AUMULH: + return OPDP3(1, 0, 6, 0) + + case AUREM, AUDIV: + return S64 | OPDP2(2) + + case AUREMW, AUDIVW: + return S32 | OPDP2(2) + + case AAESE: + return 0x4E<<24 | 2<<20 | 8<<16 | 4<<12 | 2<<10 + + case AAESD: + return 0x4E<<24 | 2<<20 | 8<<16 | 5<<12 | 2<<10 + + case AAESMC: + return 0x4E<<24 | 2<<20 | 8<<16 | 6<<12 | 2<<10 + + case AAESIMC: + return 0x4E<<24 | 2<<20 | 8<<16 | 7<<12 | 2<<10 + + case ASHA1C: + return 0x5E<<24 | 0<<12 + + case ASHA1P: + return 0x5E<<24 | 1<<12 + + case ASHA1M: + return 0x5E<<24 | 2<<12 + + case ASHA1SU0: + return 0x5E<<24 | 3<<12 + + case ASHA256H: + return 0x5E<<24 | 4<<12 + + case ASHA256H2: + return 0x5E<<24 | 5<<12 + + case ASHA256SU1: + return 0x5E<<24 | 6<<12 + + case ASHA1H: + return 0x5E<<24 | 2<<20 | 8<<16 | 0<<12 | 2<<10 + + case ASHA1SU1: + return 0x5E<<24 | 2<<20 | 8<<16 | 1<<12 | 2<<10 + + case ASHA256SU0: + return 0x5E<<24 | 2<<20 | 8<<16 | 2<<12 | 2<<10 + + case ASHA512H: + return 0xCE<<24 | 3<<21 | 8<<12 + + case ASHA512H2: + return 0xCE<<24 | 3<<21 | 8<<12 | 4<<8 + + case ASHA512SU1: + return 0xCE<<24 | 3<<21 | 8<<12 | 8<<8 + + case ASHA512SU0: + return 0xCE<<24 | 3<<22 | 8<<12 + + case AFCVTZSD: + return FPCVTI(1, 0, 1, 3, 0) + + case AFCVTZSDW: + return FPCVTI(0, 0, 1, 3, 0) + + case AFCVTZSS: + return FPCVTI(1, 0, 0, 3, 0) + + case AFCVTZSSW: + return FPCVTI(0, 0, 0, 3, 0) + + case AFCVTZUD: + return FPCVTI(1, 0, 1, 3, 1) + + case AFCVTZUDW: + return FPCVTI(0, 0, 1, 3, 1) + + case AFCVTZUS: + return FPCVTI(1, 0, 0, 3, 1) + + case AFCVTZUSW: + return FPCVTI(0, 0, 0, 3, 1) + + case ASCVTFD: + return FPCVTI(1, 0, 1, 0, 2) + + case ASCVTFS: + return FPCVTI(1, 0, 0, 0, 2) + + case ASCVTFWD: + return FPCVTI(0, 0, 1, 0, 2) + + case ASCVTFWS: + return FPCVTI(0, 0, 0, 0, 2) + + case AUCVTFD: + return FPCVTI(1, 0, 1, 0, 3) + + case AUCVTFS: + return FPCVTI(1, 0, 0, 0, 3) + + case AUCVTFWD: + return FPCVTI(0, 0, 1, 0, 3) + + case AUCVTFWS: + return FPCVTI(0, 0, 0, 0, 3) + + case AFADDS: + return FPOP2S(0, 0, 0, 2) + + case AFADDD: + return FPOP2S(0, 0, 1, 2) + + case AFSUBS: + return FPOP2S(0, 0, 0, 3) + + case AFSUBD: + return FPOP2S(0, 0, 1, 3) + + case AFMADDD: + return FPOP3S(0, 0, 1, 0, 0) + + case AFMADDS: + return FPOP3S(0, 0, 0, 0, 0) + + case AFMSUBD: + return FPOP3S(0, 0, 1, 0, 1) + + case AFMSUBS: + return FPOP3S(0, 0, 0, 0, 1) + + case AFNMADDD: + return FPOP3S(0, 0, 1, 1, 0) + + case AFNMADDS: + return FPOP3S(0, 0, 0, 1, 0) + + case AFNMSUBD: + return FPOP3S(0, 0, 1, 1, 1) + + case AFNMSUBS: + return FPOP3S(0, 0, 0, 1, 1) + + case AFMULS: + return FPOP2S(0, 0, 0, 0) + + case AFMULD: + return FPOP2S(0, 0, 1, 0) + + case AFDIVS: + return FPOP2S(0, 0, 0, 1) + + case AFDIVD: + return FPOP2S(0, 0, 1, 1) + + case AFMAXS: + return FPOP2S(0, 0, 0, 4) + + case AFMINS: + return FPOP2S(0, 0, 0, 5) + + case AFMAXD: + return FPOP2S(0, 0, 1, 4) + + case AFMIND: + return FPOP2S(0, 0, 1, 5) + + case AFMAXNMS: + return FPOP2S(0, 0, 0, 6) + + case AFMAXNMD: + return FPOP2S(0, 0, 1, 6) + + case AFMINNMS: + return FPOP2S(0, 0, 0, 7) + + case AFMINNMD: + return FPOP2S(0, 0, 1, 7) + + case AFNMULS: + return FPOP2S(0, 0, 0, 8) + + case AFNMULD: + return FPOP2S(0, 0, 1, 8) + + case AFCMPS: + return FPCMP(0, 0, 0, 0, 0) + + case AFCMPD: + return FPCMP(0, 0, 1, 0, 0) + + case AFCMPES: + return FPCMP(0, 0, 0, 0, 16) + + case AFCMPED: + return FPCMP(0, 0, 1, 0, 16) + + case AFCCMPS: + return FPCCMP(0, 0, 0, 0) + + case AFCCMPD: + return FPCCMP(0, 0, 1, 0) + + case AFCCMPES: + return FPCCMP(0, 0, 0, 1) + + case AFCCMPED: + return FPCCMP(0, 0, 1, 1) + + case AFCSELS: + return 0x1E<<24 | 0<<22 | 1<<21 | 3<<10 + + case AFCSELD: + return 0x1E<<24 | 1<<22 | 1<<21 | 3<<10 + + case AFMOVS: + return FPOP1S(0, 0, 0, 0) + + case AFABSS: + return FPOP1S(0, 0, 0, 1) + + case AFNEGS: + return FPOP1S(0, 0, 0, 2) + + case AFSQRTS: + return FPOP1S(0, 0, 0, 3) + + case AFCVTSD: + return FPOP1S(0, 0, 0, 5) + + case AFCVTSH: + return FPOP1S(0, 0, 0, 7) + + case AFRINTNS: + return FPOP1S(0, 0, 0, 8) + + case AFRINTPS: + return FPOP1S(0, 0, 0, 9) + + case AFRINTMS: + return FPOP1S(0, 0, 0, 10) + + case AFRINTZS: + return FPOP1S(0, 0, 0, 11) + + case AFRINTAS: + return FPOP1S(0, 0, 0, 12) + + case AFRINTXS: + return FPOP1S(0, 0, 0, 14) + + case AFRINTIS: + return FPOP1S(0, 0, 0, 15) + + case AFMOVD: + return FPOP1S(0, 0, 1, 0) + + case AFABSD: + return FPOP1S(0, 0, 1, 1) + + case AFNEGD: + return FPOP1S(0, 0, 1, 2) + + case AFSQRTD: + return FPOP1S(0, 0, 1, 3) + + case AFCVTDS: + return FPOP1S(0, 0, 1, 4) + + case AFCVTDH: + return FPOP1S(0, 0, 1, 7) + + case AFRINTND: + return FPOP1S(0, 0, 1, 8) + + case AFRINTPD: + return FPOP1S(0, 0, 1, 9) + + case AFRINTMD: + return FPOP1S(0, 0, 1, 10) + + case AFRINTZD: + return FPOP1S(0, 0, 1, 11) + + case AFRINTAD: + return FPOP1S(0, 0, 1, 12) + + case AFRINTXD: + return FPOP1S(0, 0, 1, 14) + + case AFRINTID: + return FPOP1S(0, 0, 1, 15) + + case AFCVTHS: + return FPOP1S(0, 0, 3, 4) + + case AFCVTHD: + return FPOP1S(0, 0, 3, 5) + + case AVADD: + return 7<<25 | 1<<21 | 1<<15 | 1<<10 + + case AVSUB: + return 0x17<<25 | 1<<21 | 1<<15 | 1<<10 + + case AVADDP: + return 7<<25 | 1<<21 | 1<<15 | 15<<10 + + case AVAND: + return 7<<25 | 1<<21 | 7<<10 + + case AVCMEQ: + return 1<<29 | 0x71<<21 | 0x23<<10 + + case AVCNT: + return 0xE<<24 | 0x10<<17 | 5<<12 | 2<<10 + + case AVZIP1: + return 0xE<<24 | 3<<12 | 2<<10 + + case AVZIP2: + return 0xE<<24 | 1<<14 | 3<<12 | 2<<10 + + case AVEOR: + return 1<<29 | 0x71<<21 | 7<<10 + + case AVORR: + return 7<<25 | 5<<21 | 7<<10 + + case AVREV16: + return 3<<26 | 2<<24 | 1<<21 | 3<<11 + + case AVREV32: + return 11<<26 | 2<<24 | 1<<21 | 1<<11 + + case AVREV64: + return 3<<26 | 2<<24 | 1<<21 | 1<<11 + + case AVMOV: + return 7<<25 | 5<<21 | 7<<10 + + case AVADDV: + return 7<<25 | 3<<20 | 3<<15 | 7<<11 + + case AVUADDLV: + return 1<<29 | 7<<25 | 3<<20 | 7<<11 + + case AVFMLA: + return 7<<25 | 0<<23 | 1<<21 | 3<<14 | 3<<10 + + case AVFMLS: + return 7<<25 | 1<<23 | 1<<21 | 3<<14 | 3<<10 + + case AVPMULL, AVPMULL2: + return 0xE<<24 | 1<<21 | 0x38<<10 + + case AVRBIT: + return 0x2E<<24 | 1<<22 | 0x10<<17 | 5<<12 | 2<<10 + + case AVLD1, AVLD2, AVLD3, AVLD4: + return 3<<26 | 1<<22 + + case AVLD1R, AVLD3R: + return 0xD<<24 | 1<<22 + + case AVLD2R, AVLD4R: + return 0xD<<24 | 3<<21 + + case AVBIF: + return 1<<29 | 7<<25 | 7<<21 | 7<<10 + + case AVBIT: + return 1<<29 | 0x75<<21 | 7<<10 + + case AVBSL: + return 1<<29 | 0x73<<21 | 7<<10 + + case AVCMTST: + return 0xE<<24 | 1<<21 | 0x23<<10 + + case AVUZP1: + return 7<<25 | 3<<11 + + case AVUZP2: + return 7<<25 | 1<<14 | 3<<11 + } + + c.ctxt.Diag("%v: bad rrr %d %v", p, a, a) + return 0 +} + +/* + * imm -> Rd + * imm op Rn -> Rd + */ +func (c *ctxt7) opirr(p *obj.Prog, a obj.As) uint32 { + switch a { + /* op $addcon, Rn, Rd */ + case AMOVD, AADD: + return S64 | 0<<30 | 0<<29 | 0x11<<24 + + case ACMN, AADDS: + return S64 | 0<<30 | 1<<29 | 0x11<<24 + + case AMOVW, AADDW: + return S32 | 0<<30 | 0<<29 | 0x11<<24 + + case ACMNW, AADDSW: + return S32 | 0<<30 | 1<<29 | 0x11<<24 + + case ASUB: + return S64 | 1<<30 | 0<<29 | 0x11<<24 + + case ACMP, ASUBS: + return S64 | 1<<30 | 1<<29 | 0x11<<24 + + case ASUBW: + return S32 | 1<<30 | 0<<29 | 0x11<<24 + + case ACMPW, ASUBSW: + return S32 | 1<<30 | 1<<29 | 0x11<<24 + + /* op $imm(SB), Rd; op label, Rd */ + case AADR: + return 0<<31 | 0x10<<24 + + case AADRP: + return 1<<31 | 0x10<<24 + + /* op $bimm, Rn, Rd */ + case AAND, ABIC: + return S64 | 0<<29 | 0x24<<23 + + case AANDW, ABICW: + return S32 | 0<<29 | 0x24<<23 | 0<<22 + + case AORR, AORN: + return S64 | 1<<29 | 0x24<<23 + + case AORRW, AORNW: + return S32 | 1<<29 | 0x24<<23 | 0<<22 + + case AEOR, AEON: + return S64 | 2<<29 | 0x24<<23 + + case AEORW, AEONW: + return S32 | 2<<29 | 0x24<<23 | 0<<22 + + case AANDS, ABICS, ATST: + return S64 | 3<<29 | 0x24<<23 + + case AANDSW, ABICSW, ATSTW: + return S32 | 3<<29 | 0x24<<23 | 0<<22 + + case AASR: + return S64 | 0<<29 | 0x26<<23 /* alias of SBFM */ + + case AASRW: + return S32 | 0<<29 | 0x26<<23 | 0<<22 + + /* op $width, $lsb, Rn, Rd */ + case ABFI: + return S64 | 2<<29 | 0x26<<23 | 1<<22 + /* alias of BFM */ + + case ABFIW: + return S32 | 2<<29 | 0x26<<23 | 0<<22 + + /* op $imms, $immr, Rn, Rd */ + case ABFM: + return S64 | 1<<29 | 0x26<<23 | 1<<22 + + case ABFMW: + return S32 | 1<<29 | 0x26<<23 | 0<<22 + + case ASBFM: + return S64 | 0<<29 | 0x26<<23 | 1<<22 + + case ASBFMW: + return S32 | 0<<29 | 0x26<<23 | 0<<22 + + case AUBFM: + return S64 | 2<<29 | 0x26<<23 | 1<<22 + + case AUBFMW: + return S32 | 2<<29 | 0x26<<23 | 0<<22 + + case ABFXIL: + return S64 | 1<<29 | 0x26<<23 | 1<<22 /* alias of BFM */ + + case ABFXILW: + return S32 | 1<<29 | 0x26<<23 | 0<<22 + + case AEXTR: + return S64 | 0<<29 | 0x27<<23 | 1<<22 | 0<<21 + + case AEXTRW: + return S32 | 0<<29 | 0x27<<23 | 0<<22 | 0<<21 + + case ACBNZ: + return S64 | 0x1A<<25 | 1<<24 + + case ACBNZW: + return S32 | 0x1A<<25 | 1<<24 + + case ACBZ: + return S64 | 0x1A<<25 | 0<<24 + + case ACBZW: + return S32 | 0x1A<<25 | 0<<24 + + case ACCMN: + return S64 | 0<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4 /* imm5<<16 | cond<<12 | nzcv<<0 */ + + case ACCMNW: + return S32 | 0<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4 + + case ACCMP: + return S64 | 1<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4 /* imm5<<16 | cond<<12 | nzcv<<0 */ + + case ACCMPW: + return S32 | 1<<30 | 1<<29 | 0xD2<<21 | 1<<11 | 0<<10 | 0<<4 + + case AMOVK: + return S64 | 3<<29 | 0x25<<23 + + case AMOVKW: + return S32 | 3<<29 | 0x25<<23 + + case AMOVN: + return S64 | 0<<29 | 0x25<<23 + + case AMOVNW: + return S32 | 0<<29 | 0x25<<23 + + case AMOVZ: + return S64 | 2<<29 | 0x25<<23 + + case AMOVZW: + return S32 | 2<<29 | 0x25<<23 + + case AMSR: + return SYSOP(0, 0, 0, 4, 0, 0, 0x1F) /* MSR (immediate) */ + + case AAT, + ADC, + AIC, + ATLBI, + ASYS: + return SYSOP(0, 1, 0, 0, 0, 0, 0) + + case ASYSL: + return SYSOP(1, 1, 0, 0, 0, 0, 0) + + case ATBZ: + return 0x36 << 24 + + case ATBNZ: + return 0x37 << 24 + + case ADSB: + return SYSOP(0, 0, 3, 3, 0, 4, 0x1F) + + case ADMB: + return SYSOP(0, 0, 3, 3, 0, 5, 0x1F) + + case AISB: + return SYSOP(0, 0, 3, 3, 0, 6, 0x1F) + + case AHINT: + return SYSOP(0, 0, 3, 2, 0, 0, 0x1F) + + case AVEXT: + return 0x2E<<24 | 0<<23 | 0<<21 | 0<<15 + + case AVUSHR: + return 0x5E<<23 | 1<<10 + + case AVSHL: + return 0x1E<<23 | 21<<10 + + case AVSRI: + return 0x5E<<23 | 17<<10 + + case AVUSHLL, AVUXTL: + return 1<<29 | 15<<24 | 0x29<<10 + + case AVUSHLL2, AVUXTL2: + return 3<<29 | 15<<24 | 0x29<<10 + } + + c.ctxt.Diag("%v: bad irr %v", p, a) + return 0 +} + +func (c *ctxt7) opbit(p *obj.Prog, a obj.As) uint32 { + switch a { + case ACLS: + return S64 | OPBIT(5) + + case ACLSW: + return S32 | OPBIT(5) + + case ACLZ: + return S64 | OPBIT(4) + + case ACLZW: + return S32 | OPBIT(4) + + case ARBIT: + return S64 | OPBIT(0) + + case ARBITW: + return S32 | OPBIT(0) + + case AREV: + return S64 | OPBIT(3) + + case AREVW: + return S32 | OPBIT(2) + + case AREV16: + return S64 | OPBIT(1) + + case AREV16W: + return S32 | OPBIT(1) + + case AREV32: + return S64 | OPBIT(2) + + default: + c.ctxt.Diag("bad bit op\n%v", p) + return 0 + } +} + +/* + * add/subtract sign or zero-extended register + */ +func (c *ctxt7) opxrrr(p *obj.Prog, a obj.As, extend bool) uint32 { + extension := uint32(0) + if !extend { + switch a { + case AADD, ACMN, AADDS, ASUB, ACMP, ASUBS: + extension = LSL0_64 + + case AADDW, ACMNW, AADDSW, ASUBW, ACMPW, ASUBSW: + extension = LSL0_32 + } + } + + switch a { + case AADD: + return S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + + case AADDW: + return S32 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + + case ACMN, AADDS: + return S64 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + + case ACMNW, AADDSW: + return S32 | 0<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + + case ASUB: + return S64 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + + case ASUBW: + return S32 | 1<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + + case ACMP, ASUBS: + return S64 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + + case ACMPW, ASUBSW: + return S32 | 1<<30 | 1<<29 | 0x0b<<24 | 0<<22 | 1<<21 | extension + } + + c.ctxt.Diag("bad opxrrr %v\n%v", a, p) + return 0 +} + +func (c *ctxt7) opimm(p *obj.Prog, a obj.As) uint32 { + switch a { + case ASVC: + return 0xD4<<24 | 0<<21 | 1 /* imm16<<5 */ + + case AHVC: + return 0xD4<<24 | 0<<21 | 2 + + case ASMC: + return 0xD4<<24 | 0<<21 | 3 + + case ABRK: + return 0xD4<<24 | 1<<21 | 0 + + case AHLT: + return 0xD4<<24 | 2<<21 | 0 + + case ADCPS1: + return 0xD4<<24 | 5<<21 | 1 + + case ADCPS2: + return 0xD4<<24 | 5<<21 | 2 + + case ADCPS3: + return 0xD4<<24 | 5<<21 | 3 + + case ACLREX: + return SYSOP(0, 0, 3, 3, 0, 2, 0x1F) + } + + c.ctxt.Diag("%v: bad imm %v", p, a) + return 0 +} + +func (c *ctxt7) brdist(p *obj.Prog, preshift int, flen int, shift int) int64 { + v := int64(0) + t := int64(0) + q := p.To.Target() + if q == nil { + // TODO: don't use brdist for this case, as it isn't a branch. + // (Calls from omovlit, and maybe adr/adrp opcodes as well.) + q = p.Pool + } + if q != nil { + v = (q.Pc >> uint(preshift)) - (c.pc >> uint(preshift)) + if (v & ((1 << uint(shift)) - 1)) != 0 { + c.ctxt.Diag("misaligned label\n%v", p) + } + v >>= uint(shift) + t = int64(1) << uint(flen-1) + if v < -t || v >= t { + c.ctxt.Diag("branch too far %#x vs %#x [%p]\n%v\n%v", v, t, c.blitrl, p, q) + panic("branch too far") + } + } + + return v & ((t << 1) - 1) +} + +/* + * pc-relative branches + */ +func (c *ctxt7) opbra(p *obj.Prog, a obj.As) uint32 { + switch a { + case ABEQ: + return OPBcc(0x0) + + case ABNE: + return OPBcc(0x1) + + case ABCS: + return OPBcc(0x2) + + case ABHS: + return OPBcc(0x2) + + case ABCC: + return OPBcc(0x3) + + case ABLO: + return OPBcc(0x3) + + case ABMI: + return OPBcc(0x4) + + case ABPL: + return OPBcc(0x5) + + case ABVS: + return OPBcc(0x6) + + case ABVC: + return OPBcc(0x7) + + case ABHI: + return OPBcc(0x8) + + case ABLS: + return OPBcc(0x9) + + case ABGE: + return OPBcc(0xa) + + case ABLT: + return OPBcc(0xb) + + case ABGT: + return OPBcc(0xc) + + case ABLE: + return OPBcc(0xd) /* imm19<<5 | cond */ + + case AB: + return 0<<31 | 5<<26 /* imm26 */ + + case obj.ADUFFZERO, obj.ADUFFCOPY, ABL: + return 1<<31 | 5<<26 + } + + c.ctxt.Diag("%v: bad bra %v", p, a) + return 0 +} + +func (c *ctxt7) opbrr(p *obj.Prog, a obj.As) uint32 { + switch a { + case ABL: + return OPBLR(1) /* BLR */ + + case AB: + return OPBLR(0) /* BR */ + + case obj.ARET: + return OPBLR(2) /* RET */ + } + + c.ctxt.Diag("%v: bad brr %v", p, a) + return 0 +} + +func (c *ctxt7) op0(p *obj.Prog, a obj.As) uint32 { + switch a { + case ADRPS: + return 0x6B<<25 | 5<<21 | 0x1F<<16 | 0x1F<<5 + + case AERET: + return 0x6B<<25 | 4<<21 | 0x1F<<16 | 0<<10 | 0x1F<<5 + + case ANOOP: + return SYSHINT(0) + + case AYIELD: + return SYSHINT(1) + + case AWFE: + return SYSHINT(2) + + case AWFI: + return SYSHINT(3) + + case ASEV: + return SYSHINT(4) + + case ASEVL: + return SYSHINT(5) + } + + c.ctxt.Diag("%v: bad op0 %v", p, a) + return 0 +} + +/* + * register offset + */ +func (c *ctxt7) opload(p *obj.Prog, a obj.As) uint32 { + switch a { + case ALDAR: + return LDSTX(3, 1, 1, 0, 1) | 0x1F<<10 + + case ALDARW: + return LDSTX(2, 1, 1, 0, 1) | 0x1F<<10 + + case ALDARB: + return LDSTX(0, 1, 1, 0, 1) | 0x1F<<10 + + case ALDARH: + return LDSTX(1, 1, 1, 0, 1) | 0x1F<<10 + + case ALDAXP: + return LDSTX(3, 0, 1, 1, 1) + + case ALDAXPW: + return LDSTX(2, 0, 1, 1, 1) + + case ALDAXR: + return LDSTX(3, 0, 1, 0, 1) | 0x1F<<10 + + case ALDAXRW: + return LDSTX(2, 0, 1, 0, 1) | 0x1F<<10 + + case ALDAXRB: + return LDSTX(0, 0, 1, 0, 1) | 0x1F<<10 + + case ALDAXRH: + return LDSTX(1, 0, 1, 0, 1) | 0x1F<<10 + + case ALDXR: + return LDSTX(3, 0, 1, 0, 0) | 0x1F<<10 + + case ALDXRB: + return LDSTX(0, 0, 1, 0, 0) | 0x1F<<10 + + case ALDXRH: + return LDSTX(1, 0, 1, 0, 0) | 0x1F<<10 + + case ALDXRW: + return LDSTX(2, 0, 1, 0, 0) | 0x1F<<10 + + case ALDXP: + return LDSTX(3, 0, 1, 1, 0) + + case ALDXPW: + return LDSTX(2, 0, 1, 1, 0) + + case AMOVNP: + return S64 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22 + + case AMOVNPW: + return S32 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22 + } + + c.ctxt.Diag("bad opload %v\n%v", a, p) + return 0 +} + +func (c *ctxt7) opstore(p *obj.Prog, a obj.As) uint32 { + switch a { + case ASTLR: + return LDSTX(3, 1, 0, 0, 1) | 0x1F<<10 + + case ASTLRB: + return LDSTX(0, 1, 0, 0, 1) | 0x1F<<10 + + case ASTLRH: + return LDSTX(1, 1, 0, 0, 1) | 0x1F<<10 + + case ASTLP: + return LDSTX(3, 0, 0, 1, 1) + + case ASTLPW: + return LDSTX(2, 0, 0, 1, 1) + + case ASTLRW: + return LDSTX(2, 1, 0, 0, 1) | 0x1F<<10 + + case ASTLXP: + return LDSTX(3, 0, 0, 1, 1) + + case ASTLXPW: + return LDSTX(2, 0, 0, 1, 1) + + case ASTLXR: + return LDSTX(3, 0, 0, 0, 1) | 0x1F<<10 + + case ASTLXRB: + return LDSTX(0, 0, 0, 0, 1) | 0x1F<<10 + + case ASTLXRH: + return LDSTX(1, 0, 0, 0, 1) | 0x1F<<10 + + case ASTLXRW: + return LDSTX(2, 0, 0, 0, 1) | 0x1F<<10 + + case ASTXR: + return LDSTX(3, 0, 0, 0, 0) | 0x1F<<10 + + case ASTXRB: + return LDSTX(0, 0, 0, 0, 0) | 0x1F<<10 + + case ASTXRH: + return LDSTX(1, 0, 0, 0, 0) | 0x1F<<10 + + case ASTXP: + return LDSTX(3, 0, 0, 1, 0) + + case ASTXPW: + return LDSTX(2, 0, 0, 1, 0) + + case ASTXRW: + return LDSTX(2, 0, 0, 0, 0) | 0x1F<<10 + + case AMOVNP: + return S64 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22 + + case AMOVNPW: + return S32 | 0<<30 | 5<<27 | 0<<26 | 0<<23 | 1<<22 + } + + c.ctxt.Diag("bad opstore %v\n%v", a, p) + return 0 +} + +/* + * load/store register (unsigned immediate) C3.3.13 + * these produce 64-bit values (when there's an option) + */ +func (c *ctxt7) olsr12u(p *obj.Prog, o int32, v int32, b int, r int) uint32 { + if v < 0 || v >= (1<<12) { + c.ctxt.Diag("offset out of range: %d\n%v", v, p) + } + o |= (v & 0xFFF) << 10 + o |= int32(b&31) << 5 + o |= int32(r & 31) + return uint32(o) +} + +func (c *ctxt7) opldr12(p *obj.Prog, a obj.As) uint32 { + switch a { + case AMOVD: + return LDSTR12U(3, 0, 1) /* imm12<<10 | Rn<<5 | Rt */ + + case AMOVW: + return LDSTR12U(2, 0, 2) + + case AMOVWU: + return LDSTR12U(2, 0, 1) + + case AMOVH: + return LDSTR12U(1, 0, 2) + + case AMOVHU: + return LDSTR12U(1, 0, 1) + + case AMOVB: + return LDSTR12U(0, 0, 2) + + case AMOVBU: + return LDSTR12U(0, 0, 1) + + case AFMOVS: + return LDSTR12U(2, 1, 1) + + case AFMOVD: + return LDSTR12U(3, 1, 1) + } + + c.ctxt.Diag("bad opldr12 %v\n%v", a, p) + return 0 +} + +func (c *ctxt7) opstr12(p *obj.Prog, a obj.As) uint32 { + return LD2STR(c.opldr12(p, a)) +} + +/* + * load/store register (unscaled immediate) C3.3.12 + */ +func (c *ctxt7) olsr9s(p *obj.Prog, o int32, v int32, b int, r int) uint32 { + if v < -256 || v > 255 { + c.ctxt.Diag("offset out of range: %d\n%v", v, p) + } + o |= (v & 0x1FF) << 12 + o |= int32(b&31) << 5 + o |= int32(r & 31) + return uint32(o) +} + +func (c *ctxt7) opldr9(p *obj.Prog, a obj.As) uint32 { + switch a { + case AMOVD: + return LDSTR9S(3, 0, 1) /* simm9<<12 | Rn<<5 | Rt */ + + case AMOVW: + return LDSTR9S(2, 0, 2) + + case AMOVWU: + return LDSTR9S(2, 0, 1) + + case AMOVH: + return LDSTR9S(1, 0, 2) + + case AMOVHU: + return LDSTR9S(1, 0, 1) + + case AMOVB: + return LDSTR9S(0, 0, 2) + + case AMOVBU: + return LDSTR9S(0, 0, 1) + + case AFMOVS: + return LDSTR9S(2, 1, 1) + + case AFMOVD: + return LDSTR9S(3, 1, 1) + } + + c.ctxt.Diag("bad opldr9 %v\n%v", a, p) + return 0 +} + +func (c *ctxt7) opstr9(p *obj.Prog, a obj.As) uint32 { + return LD2STR(c.opldr9(p, a)) +} + +func (c *ctxt7) opldrpp(p *obj.Prog, a obj.As) uint32 { + switch a { + case AMOVD: + return 3<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 /* simm9<<12 | Rn<<5 | Rt */ + + case AMOVW: + return 2<<30 | 7<<27 | 0<<26 | 0<<24 | 2<<22 + + case AMOVWU: + return 2<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 + + case AMOVH: + return 1<<30 | 7<<27 | 0<<26 | 0<<24 | 2<<22 + + case AMOVHU: + return 1<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 + + case AMOVB: + return 0<<30 | 7<<27 | 0<<26 | 0<<24 | 2<<22 + + case AMOVBU: + return 0<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 + + case AFMOVS: + return 2<<30 | 7<<27 | 1<<26 | 0<<24 | 1<<22 + + case AFMOVD: + return 3<<30 | 7<<27 | 1<<26 | 0<<24 | 1<<22 + + case APRFM: + return 0xf9<<24 | 2<<22 + + } + + c.ctxt.Diag("bad opldr %v\n%v", a, p) + return 0 +} + +// olsxrr attaches register operands to a load/store opcode supplied in o. +// The result either encodes a load of r from (r1+r2) or a store of r to (r1+r2). +func (c *ctxt7) olsxrr(p *obj.Prog, o int32, r int, r1 int, r2 int) uint32 { + o |= int32(r1&31) << 5 + o |= int32(r2&31) << 16 + o |= int32(r & 31) + return uint32(o) +} + +// opldrr returns the ARM64 opcode encoding corresponding to the obj.As opcode +// for load instruction with register offset. +// The offset register can be (Rn)(Rm.UXTW<<2) or (Rn)(Rm<<2) or (Rn)(Rm). +func (c *ctxt7) opldrr(p *obj.Prog, a obj.As, extension bool) uint32 { + OptionS := uint32(0x1a) + if extension { + OptionS = uint32(0) // option value and S value have been encoded into p.From.Offset. + } + switch a { + case AMOVD: + return OptionS<<10 | 0x3<<21 | 0x1f<<27 + case AMOVW: + return OptionS<<10 | 0x5<<21 | 0x17<<27 + case AMOVWU: + return OptionS<<10 | 0x3<<21 | 0x17<<27 + case AMOVH: + return OptionS<<10 | 0x5<<21 | 0x0f<<27 + case AMOVHU: + return OptionS<<10 | 0x3<<21 | 0x0f<<27 + case AMOVB: + return OptionS<<10 | 0x5<<21 | 0x07<<27 + case AMOVBU: + return OptionS<<10 | 0x3<<21 | 0x07<<27 + case AFMOVS: + return OptionS<<10 | 0x3<<21 | 0x17<<27 | 1<<26 + case AFMOVD: + return OptionS<<10 | 0x3<<21 | 0x1f<<27 | 1<<26 + } + c.ctxt.Diag("bad opldrr %v\n%v", a, p) + return 0 +} + +// opstrr returns the ARM64 opcode encoding corresponding to the obj.As opcode +// for store instruction with register offset. +// The offset register can be (Rn)(Rm.UXTW<<2) or (Rn)(Rm<<2) or (Rn)(Rm). +func (c *ctxt7) opstrr(p *obj.Prog, a obj.As, extension bool) uint32 { + OptionS := uint32(0x1a) + if extension { + OptionS = uint32(0) // option value and S value have been encoded into p.To.Offset. + } + switch a { + case AMOVD: + return OptionS<<10 | 0x1<<21 | 0x1f<<27 + case AMOVW, AMOVWU: + return OptionS<<10 | 0x1<<21 | 0x17<<27 + case AMOVH, AMOVHU: + return OptionS<<10 | 0x1<<21 | 0x0f<<27 + case AMOVB, AMOVBU: + return OptionS<<10 | 0x1<<21 | 0x07<<27 + case AFMOVS: + return OptionS<<10 | 0x1<<21 | 0x17<<27 | 1<<26 + case AFMOVD: + return OptionS<<10 | 0x1<<21 | 0x1f<<27 | 1<<26 + } + c.ctxt.Diag("bad opstrr %v\n%v", a, p) + return 0 +} + +func (c *ctxt7) oaddi(p *obj.Prog, o1 int32, v int32, r int, rt int) uint32 { + if (v & 0xFFF000) != 0 { + if v&0xFFF != 0 { + c.ctxt.Diag("%v misuses oaddi", p) + } + v >>= 12 + o1 |= 1 << 22 + } + + o1 |= ((v & 0xFFF) << 10) | (int32(r&31) << 5) | int32(rt&31) + return uint32(o1) +} + +/* + * load a literal value into dr + */ +func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { + var o1 int32 + if p.Pool == nil { /* not in literal pool */ + c.aclass(a) + c.ctxt.Logf("omovlit add %d (%#x)\n", c.instoffset, uint64(c.instoffset)) + + /* TODO: could be clever, and use general constant builder */ + o1 = int32(c.opirr(p, AADD)) + + v := int32(c.instoffset) + if v != 0 && (v&0xFFF) == 0 { + v >>= 12 + o1 |= 1 << 22 /* shift, by 12 */ + } + + o1 |= ((v & 0xFFF) << 10) | (REGZERO & 31 << 5) | int32(dr&31) + } else { + fp, w := 0, 0 + switch as { + case AFMOVS: + fp = 1 + w = 0 /* 32-bit SIMD/FP */ + + case AFMOVD: + fp = 1 + w = 1 /* 64-bit SIMD/FP */ + + case AFMOVQ: + fp = 1 + w = 2 /* 128-bit SIMD/FP */ + + case AMOVD: + if p.Pool.As == ADWORD { + w = 1 /* 64-bit */ + } else if p.Pool.To.Offset < 0 { + w = 2 /* 32-bit, sign-extended to 64-bit */ + } else if p.Pool.To.Offset >= 0 { + w = 0 /* 32-bit, zero-extended to 64-bit */ + } else { + c.ctxt.Diag("invalid operand %v in %v", a, p) + } + + case AMOVBU, AMOVHU, AMOVWU: + w = 0 /* 32-bit, zero-extended to 64-bit */ + + case AMOVB, AMOVH, AMOVW: + w = 2 /* 32-bit, sign-extended to 64-bit */ + + default: + c.ctxt.Diag("invalid operation %v in %v", as, p) + } + + v := int32(c.brdist(p, 0, 19, 2)) + o1 = (int32(w) << 30) | (int32(fp) << 26) | (3 << 27) + o1 |= (v & 0x7FFFF) << 5 + o1 |= int32(dr & 31) + } + + return uint32(o1) +} + +// load a constant (MOVCON or BITCON) in a into rt +func (c *ctxt7) omovconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int) (o1 uint32) { + if cls := oclass(a); cls == C_BITCON || cls == C_ABCON || cls == C_ABCON0 { + // or $bitcon, REGZERO, rt + mode := 64 + var as1 obj.As + switch as { + case AMOVW: + as1 = AORRW + mode = 32 + case AMOVD: + as1 = AORR + } + o1 = c.opirr(p, as1) + o1 |= bitconEncode(uint64(a.Offset), mode) | uint32(REGZERO&31)<<5 | uint32(rt&31) + return o1 + } + + if as == AMOVW { + d := uint32(a.Offset) + s := movcon(int64(d)) + if s < 0 || 16*s >= 32 { + d = ^d + s = movcon(int64(d)) + if s < 0 || 16*s >= 32 { + c.ctxt.Diag("impossible 32-bit move wide: %#x\n%v", uint32(a.Offset), p) + } + o1 = c.opirr(p, AMOVNW) + } else { + o1 = c.opirr(p, AMOVZW) + } + o1 |= MOVCONST(int64(d), s, rt) + } + if as == AMOVD { + d := a.Offset + s := movcon(d) + if s < 0 || 16*s >= 64 { + d = ^d + s = movcon(d) + if s < 0 || 16*s >= 64 { + c.ctxt.Diag("impossible 64-bit move wide: %#x\n%v", uint64(a.Offset), p) + } + o1 = c.opirr(p, AMOVN) + } else { + o1 = c.opirr(p, AMOVZ) + } + o1 |= MOVCONST(d, s, rt) + } + return o1 +} + +// load a 32-bit/64-bit large constant (LCON or VCON) in a.Offset into rt +// put the instruction sequence in os and return the number of instructions. +func (c *ctxt7) omovlconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int, os []uint32) (num uint8) { + switch as { + case AMOVW: + d := uint32(a.Offset) + // use MOVZW and MOVKW to load a constant to rt + os[0] = c.opirr(p, AMOVZW) + os[0] |= MOVCONST(int64(d), 0, rt) + os[1] = c.opirr(p, AMOVKW) + os[1] |= MOVCONST(int64(d), 1, rt) + return 2 + + case AMOVD: + d := a.Offset + dn := ^d + var immh [4]uint64 + var i int + zeroCount := int(0) + negCount := int(0) + for i = 0; i < 4; i++ { + immh[i] = uint64((d >> uint(i*16)) & 0xffff) + if immh[i] == 0 { + zeroCount++ + } else if immh[i] == 0xffff { + negCount++ + } + } + + if zeroCount == 4 || negCount == 4 { + c.ctxt.Diag("the immediate should be MOVCON: %v", p) + } + switch { + case zeroCount == 3: + // one MOVZ + for i = 0; i < 4; i++ { + if immh[i] != 0 { + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, i, rt) + break + } + } + return 1 + + case negCount == 3: + // one MOVN + for i = 0; i < 4; i++ { + if immh[i] != 0xffff { + os[0] = c.opirr(p, AMOVN) + os[0] |= MOVCONST(dn, i, rt) + break + } + } + return 1 + + case zeroCount == 2: + // one MOVZ and one MOVK + for i = 0; i < 4; i++ { + if immh[i] != 0 { + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, i, rt) + i++ + break + } + } + for ; i < 4; i++ { + if immh[i] != 0 { + os[1] = c.opirr(p, AMOVK) + os[1] |= MOVCONST(d, i, rt) + } + } + return 2 + + case negCount == 2: + // one MOVN and one MOVK + for i = 0; i < 4; i++ { + if immh[i] != 0xffff { + os[0] = c.opirr(p, AMOVN) + os[0] |= MOVCONST(dn, i, rt) + i++ + break + } + } + for ; i < 4; i++ { + if immh[i] != 0xffff { + os[1] = c.opirr(p, AMOVK) + os[1] |= MOVCONST(d, i, rt) + } + } + return 2 + + case zeroCount == 1: + // one MOVZ and two MOVKs + for i = 0; i < 4; i++ { + if immh[i] != 0 { + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, i, rt) + i++ + break + } + } + + for j := 1; i < 4; i++ { + if immh[i] != 0 { + os[j] = c.opirr(p, AMOVK) + os[j] |= MOVCONST(d, i, rt) + j++ + } + } + return 3 + + case negCount == 1: + // one MOVN and two MOVKs + for i = 0; i < 4; i++ { + if immh[i] != 0xffff { + os[0] = c.opirr(p, AMOVN) + os[0] |= MOVCONST(dn, i, rt) + i++ + break + } + } + + for j := 1; i < 4; i++ { + if immh[i] != 0xffff { + os[j] = c.opirr(p, AMOVK) + os[j] |= MOVCONST(d, i, rt) + j++ + } + } + return 3 + + default: + // one MOVZ and 3 MOVKs + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, 0, rt) + for i = 1; i < 4; i++ { + os[i] = c.opirr(p, AMOVK) + os[i] |= MOVCONST(d, i, rt) + } + return 4 + } + default: + return 0 + } +} + +func (c *ctxt7) opbfm(p *obj.Prog, a obj.As, r int, s int, rf int, rt int) uint32 { + var b uint32 + o := c.opirr(p, a) + if (o & (1 << 31)) == 0 { + b = 32 + } else { + b = 64 + } + if r < 0 || uint32(r) >= b { + c.ctxt.Diag("illegal bit number\n%v", p) + } + o |= (uint32(r) & 0x3F) << 16 + if s < 0 || uint32(s) >= b { + c.ctxt.Diag("illegal bit number\n%v", p) + } + o |= (uint32(s) & 0x3F) << 10 + o |= (uint32(rf&31) << 5) | uint32(rt&31) + return o +} + +func (c *ctxt7) opextr(p *obj.Prog, a obj.As, v int32, rn int, rm int, rt int) uint32 { + var b uint32 + o := c.opirr(p, a) + if (o & (1 << 31)) != 0 { + b = 63 + } else { + b = 31 + } + if v < 0 || uint32(v) > b { + c.ctxt.Diag("illegal bit number\n%v", p) + } + o |= uint32(v) << 10 + o |= uint32(rn&31) << 5 + o |= uint32(rm&31) << 16 + o |= uint32(rt & 31) + return o +} + +/* genrate instruction encoding for LDP/LDPW/LDPSW/STP/STPW */ +func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uint32) uint32 { + wback := false + if o.scond == C_XPOST || o.scond == C_XPRE { + wback = true + } + switch p.As { + case ALDP, ALDPW, ALDPSW: + c.checkUnpredictable(p, true, wback, p.From.Reg, p.To.Reg, int16(p.To.Offset)) + case ASTP, ASTPW: + if wback == true { + c.checkUnpredictable(p, false, true, p.To.Reg, p.From.Reg, int16(p.From.Offset)) + } + case AFLDPD, AFLDPS: + c.checkUnpredictable(p, true, false, p.From.Reg, p.To.Reg, int16(p.To.Offset)) + } + var ret uint32 + // check offset + switch p.As { + case AFLDPD, AFSTPD: + if vo < -512 || vo > 504 || vo%8 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 8 + ret = 1<<30 | 1<<26 + case ALDP, ASTP: + if vo < -512 || vo > 504 || vo%8 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 8 + ret = 2 << 30 + case AFLDPS, AFSTPS: + if vo < -256 || vo > 252 || vo%4 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 4 + ret = 1 << 26 + case ALDPW, ASTPW: + if vo < -256 || vo > 252 || vo%4 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 4 + ret = 0 + case ALDPSW: + if vo < -256 || vo > 252 || vo%4 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 4 + ret = 1 << 30 + default: + c.ctxt.Diag("invalid instruction %v\n", p) + } + // check register pair + switch p.As { + case AFLDPD, AFLDPS, AFSTPD, AFSTPS: + if rl < REG_F0 || REG_F31 < rl || rh < REG_F0 || REG_F31 < rh { + c.ctxt.Diag("invalid register pair %v\n", p) + } + case ALDP, ALDPW, ALDPSW: + if rl < REG_R0 || REG_R30 < rl || rh < REG_R0 || REG_R30 < rh { + c.ctxt.Diag("invalid register pair %v\n", p) + } + case ASTP, ASTPW: + if rl < REG_R0 || REG_R31 < rl || rh < REG_R0 || REG_R31 < rh { + c.ctxt.Diag("invalid register pair %v\n", p) + } + } + // other conditional flag bits + switch o.scond { + case C_XPOST: + ret |= 1 << 23 + case C_XPRE: + ret |= 3 << 23 + default: + ret |= 2 << 23 + } + ret |= 5<<27 | (ldp&1)<<22 | uint32(vo&0x7f)<<15 | (rh&31)<<10 | (rbase&31)<<5 | (rl & 31) + return ret +} + +func (c *ctxt7) maskOpvldvst(p *obj.Prog, o1 uint32) uint32 { + if p.As == AVLD1 || p.As == AVST1 { + return o1 + } + + o1 &^= 0xf000 // mask out "opcode" field (bit 12-15) + switch p.As { + case AVLD1R, AVLD2R: + o1 |= 0xC << 12 + case AVLD3R, AVLD4R: + o1 |= 0xE << 12 + case AVLD2, AVST2: + o1 |= 8 << 12 + case AVLD3, AVST3: + o1 |= 4 << 12 + case AVLD4, AVST4: + default: + c.ctxt.Diag("unsupported instruction:%v\n", p.As) + } + return o1 +} + +/* + * size in log2(bytes) + */ +func movesize(a obj.As) int { + switch a { + case AMOVD: + return 3 + + case AMOVW, AMOVWU: + return 2 + + case AMOVH, AMOVHU: + return 1 + + case AMOVB, AMOVBU: + return 0 + + case AFMOVS: + return 2 + + case AFMOVD: + return 3 + + default: + return -1 + } +} + +// rm is the Rm register value, o is the extension, amount is the left shift value. +func roff(rm int16, o uint32, amount int16) uint32 { + return uint32(rm&31)<<16 | o<<13 | uint32(amount)<<10 +} + +// encRegShiftOrExt returns the encoding of shifted/extended register, Rx<> 5) & 7 + rm = r & 31 + switch { + case REG_UXTB <= r && r < REG_UXTH: + return roff(rm, 0, num) + case REG_UXTH <= r && r < REG_UXTW: + return roff(rm, 1, num) + case REG_UXTW <= r && r < REG_UXTX: + if a.Type == obj.TYPE_MEM { + if num == 0 { + return roff(rm, 2, 2) + } else { + return roff(rm, 2, 6) + } + } else { + return roff(rm, 2, num) + } + case REG_UXTX <= r && r < REG_SXTB: + return roff(rm, 3, num) + case REG_SXTB <= r && r < REG_SXTH: + return roff(rm, 4, num) + case REG_SXTH <= r && r < REG_SXTW: + return roff(rm, 5, num) + case REG_SXTW <= r && r < REG_SXTX: + if a.Type == obj.TYPE_MEM { + if num == 0 { + return roff(rm, 6, 2) + } else { + return roff(rm, 6, 6) + } + } else { + return roff(rm, 6, num) + } + case REG_SXTX <= r && r < REG_SPECIAL: + if a.Type == obj.TYPE_MEM { + if num == 0 { + return roff(rm, 7, 2) + } else { + return roff(rm, 7, 6) + } + } else { + return roff(rm, 7, num) + } + case REG_LSL <= r && r < (REG_LSL+1<<8): + return roff(rm, 3, 6) + default: + c.ctxt.Diag("unsupported register extension type.") + } + + return 0 +} diff --git a/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go new file mode 100644 index 0000000..7515217 --- /dev/null +++ b/vendor/github.com/twitchyliquid64/golang-asm/obj/arm64/doc.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package arm64 implements an ARM64 assembler. Go assembly syntax is different from GNU ARM64 +syntax, but we can still follow the general rules to map between them. + +Instructions mnemonics mapping rules + +1. Most instructions use width suffixes of instruction names to indicate operand width rather than +using different register names. + + Examples: + ADC R24, R14, R12 <=> adc x12, x24 + ADDW R26->24, R21, R15 <=> add w15, w21, w26, asr #24 + FCMPS F2, F3 <=> fcmp s3, s2 + FCMPD F2, F3 <=> fcmp d3, d2 + FCVTDH F2, F3 <=> fcvt h3, d2 + +2. Go uses .P and .W suffixes to indicate post-increment and pre-increment. + + Examples: + MOVD.P -8(R10), R8 <=> ldr x8, [x10],#-8 + MOVB.W 16(R16), R10 <=> ldrsb x10, [x16,#16]! + MOVBU.W 16(R16), R10 <=> ldrb x10, [x16,#16]! + +3. Go uses a series of MOV instructions as load and store. + +64-bit variant ldr, str, stur => MOVD; +32-bit variant str, stur, ldrsw => MOVW; +32-bit variant ldr => MOVWU; +ldrb => MOVBU; ldrh => MOVHU; +ldrsb, sturb, strb => MOVB; +ldrsh, sturh, strh => MOVH. + +4. Go moves conditions into opcode suffix, like BLT. + +5. Go adds a V prefix for most floating-point and SIMD instructions, except cryptographic extension +instructions and floating-point(scalar) instructions. + + Examples: + VADD V5.H8, V18.H8, V9.H8 <=> add v9.8h, v18.8h, v5.8h + VLD1.P (R6)(R11), [V31.D1] <=> ld1 {v31.1d}, [x6], x11 + VFMLA V29.S2, V20.S2, V14.S2 <=> fmla v14.2s, v20.2s, v29.2s + AESD V22.B16, V19.B16 <=> aesd v19.16b, v22.16b + SCVTFWS R3, F16 <=> scvtf s17, w6 + +6. Align directive + +Go asm supports the PCALIGN directive, which indicates that the next instruction should be aligned +to a specified boundary by padding with NOOP instruction. The alignment value supported on arm64 +must be a power of 2 and in the range of [8, 2048]. + + Examples: + PCALIGN $16 + MOVD $2, R0 // This instruction is aligned with 16 bytes. + PCALIGN $1024 + MOVD $3, R1 // This instruction is aligned with 1024 bytes. + +PCALIGN also changes the function alignment. If a function has one or more PCALIGN directives, +its address will be aligned to the same or coarser boundary, which is the maximum of all the +alignment values. + +In the following example, the function Add is aligned with 128 bytes. + Examples: + TEXT ·Add(SB),$40-16 + MOVD $2, R0 + PCALIGN $32 + MOVD $4, R1 + PCALIGN $128 + MOVD $8, R2 + RET + +On arm64, functions in Go are aligned to 16 bytes by default, we can also use PCALGIN to set the +function alignment. The functions that need to be aligned are preferably using NOFRAME and NOSPLIT +to avoid the impact of the prologues inserted by the assembler, so that the function address will +have the same alignment as the first hand-written instruction. + +In the following example, PCALIGN at the entry of the function Add will align its address to 2048 bytes. + + Examples: + TEXT ·Add(SB),NOSPLIT|NOFRAME,$0 + PCALIGN $2048 + MOVD $1, R0 + MOVD $1, R1 + RET + +Special Cases. + +(1) umov is written as VMOV. + +(2) br is renamed JMP, blr is renamed CALL. + +(3) No need to add "W" suffix: LDARB, LDARH, LDAXRB, LDAXRH, LDTRH, LDXRB, LDXRH. + +(4) In Go assembly syntax, NOP is a zero-width pseudo-instruction serves generic purpose, nothing +related to real ARM64 instruction. NOOP serves for the hardware nop instruction. NOOP is an alias of +HINT $0. + + Examples: + VMOV V13.B[1], R20 <=> mov x20, v13.b[1] + VMOV V13.H[1], R20 <=> mov w20, v13.h[1] + JMP (R3) <=> br x3 + CALL (R17) <=> blr x17 + LDAXRB (R19), R16 <=> ldaxrb w16, [x19] + NOOP <=> nop + + +Register mapping rules + +1. All basic register names are written as Rn. + +2. Go uses ZR as the zero register and RSP as the stack pointer. + +3. Bn, Hn, Dn, Sn and Qn instructions are written as Fn in floating-point instructions and as Vn +in SIMD instructions. + + +Argument mapping rules + +1. The operands appear in left-to-right assignment order. + +Go reverses the arguments of most instructions. + + Examples: + ADD R11.SXTB<<1, RSP, R25 <=> add x25, sp, w11, sxtb #1 + VADD V16, V19, V14 <=> add d14, d19, d16 + +Special Cases. + +(1) Argument order is the same as in the GNU ARM64 syntax: cbz, cbnz and some store instructions, +such as str, stur, strb, sturb, strh, sturh stlr, stlrb. stlrh, st1. + + Examples: + MOVD R29, 384(R19) <=> str x29, [x19,#384] + MOVB.P R30, 30(R4) <=> strb w30, [x4],#30 + STLRH R21, (R19) <=> stlrh w21, [x19] + +(2) MADD, MADDW, MSUB, MSUBW, SMADDL, SMSUBL, UMADDL, UMSUBL , , , + + Examples: + MADD R2, R30, R22, R6 <=> madd x6, x22, x2, x30 + SMSUBL R10, R3, R17, R27 <=> smsubl x27, w17, w10, x3 + +(3) FMADDD, FMADDS, FMSUBD, FMSUBS, FNMADDD, FNMADDS, FNMSUBD, FNMSUBS , , , + + Examples: + FMADDD F30, F20, F3, F29 <=> fmadd d29, d3, d30, d20 + FNMSUBS F7, F25, F7, F22 <=> fnmsub s22, s7, s7, s25 + +(4) BFI, BFXIL, SBFIZ, SBFX, UBFIZ, UBFX $, , $, + + Examples: + BFIW $16, R20, $6, R0 <=> bfi w0, w20, #16, #6 + UBFIZ $34, R26, $5, R20 <=> ubfiz x20, x26, #34, #5 + +(5) FCCMPD, FCCMPS, FCCMPED, FCCMPES , Fm. Fn, $ + + Examples: + FCCMPD AL, F8, F26, $0 <=> fccmp d26, d8, #0x0, al + FCCMPS VS, F29, F4, $4 <=> fccmp s4, s29, #0x4, vs + FCCMPED LE, F20, F5, $13 <=> fccmpe d5, d20, #0xd, le + FCCMPES NE, F26, F10, $0 <=> fccmpe s10, s26, #0x0, ne + +(6) CCMN, CCMNW, CCMP, CCMPW , , $, $ + + Examples: + CCMP MI, R22, $12, $13 <=> ccmp x22, #0xc, #0xd, mi + CCMNW AL, R1, $11, $8 <=> ccmn w1, #0xb, #0x8, al + +(7) CCMN, CCMNW, CCMP, CCMPW , , , $ + + Examples: + CCMN VS, R13, R22, $10 <=> ccmn x13, x22, #0xa, vs + CCMPW HS, R19, R14, $11 <=> ccmp w19, w14, #0xb, cs + +(9) CSEL, CSELW, CSNEG, CSNEGW, CSINC, CSINCW , , , ; +FCSELD, FCSELS , , , + + Examples: + CSEL GT, R0, R19, R1 <=> csel x1, x0, x19, gt + CSNEGW GT, R7, R17, R8 <=> csneg w8, w7, w17, gt + FCSELD EQ, F15, F18, F16 <=> fcsel d16, d15, d18, eq + +(10) TBNZ, TBZ $, ,