处理AI胡乱生成的乱摊子

This commit is contained in:
2025-09-07 20:36:02 +08:00
parent ba513e0827
commit c4522b974b
403 changed files with 22915 additions and 44424 deletions

View File

@@ -80,6 +80,32 @@ Rich Feature Set includes:
rpc server/client codec to support msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
# Supported build tags
We gain performance by code-generating fast-paths for slices and maps of built-in types,
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
The results are 20-40% performance improvements.
Building and running is configured using build tags as below.
At runtime:
- codec.safe: run in safe mode (not using unsafe optimizations)
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
Pls use these mostly during development - use codec.XXX in your go files.
Build only:
- codec.build: used to generate fastpath and monomorphization code
Test only:
- codec.notmammoth: skip the mammoth generated tests
# Extension Support
Users can register a function to handle the encoding or decoding of their custom
@@ -219,6 +245,12 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
go test -tags "alltests codec.safe" -run Suite
```
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
```
go test -tags codec.notmono -run Json
```
# Running Benchmarks
```

File diff suppressed because it is too large Load Diff

View File

@@ -1,232 +1,61 @@
#!/bin/bash
# Run all the different permutations of all the tests and other things
# This helps ensure that nothing gets broken.
# Build and Run the different test permutations.
# This helps validate that nothing gets broken.
_tests() {
local vet="" # TODO: make it off
local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
[[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
[[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
case $gover in
go1.[7-9]*|go1.1[0-9]*|go2.*|devel*) true ;;
*) return 1
esac
# note that codecgen requires fastpath, so you cannot do "codecgen codec.notfastpath"
# we test the following permutations wnich all execute different code paths as below.
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe), (codecgen/unsafe)"
local echo=1
local nc=2 # count
local cpus="1,$(nproc)"
# if using the race detector, then set nc to
if [[ " ${zargs[@]} " =~ "-race" ]]; then
cpus="$(nproc)"
fi
local a=( "" "codec.notfastpath" "codec.safe" "codec.notfastpath codec.safe" "codecgen" )
local b=()
local c=()
for i in "${a[@]}"
do
local i2=${i:-default}
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
[[ "$zcover" == "1" ]] && c=( -coverprofile "${i2// /-}.cov.out" )
true &&
${gocmd} vet -printfuncs "errorf" "$@" &&
if [[ "$echo" == 1 ]]; then set -o xtrace; fi &&
${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "alltests $i" -count $nc -cpu $cpus -run "TestCodecSuite" "${c[@]}" "$@" &
if [[ "$echo" == 1 ]]; then set +o xtrace; fi
b+=("${i2// /-}.cov.out")
[[ "$zwait" == "1" ]] && wait
# if [[ "$?" != 0 ]]; then return 1; fi
_build_proceed() {
# return success (0) if we should, and 1 (fail) if not
if [[ "${zforce}" ]]; then return 0; fi
for a in "fastpath.generated.go" "json.mono.generated.go"; do
if [[ ! -e "$a" ]]; then return 0; fi
for b in `ls -1 *.go.tmpl gen.go gen_mono.go values_test.go`; do
if [[ "$a" -ot "$b" ]]; then return 0; fi
done
done
if [[ "$zextra" == "1" ]]; then
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'codec.notfastpath x'; RUN: 'Test.*X$'"
[[ "$zcover" == "1" ]] && c=( -coverprofile "x.cov.out" )
${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "codec.notfastpath x" -count $nc -run 'Test.*X$' "${c[@]}" &
b+=("x.cov.out")
[[ "$zwait" == "1" ]] && wait
fi
wait
# go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
[[ "$zcover" == "1" ]] &&
command -v gocovmerge &&
gocovmerge "${b[@]}" > __merge.cov.out &&
${gocmd} tool cover -html=__merge.cov.out
return 1
}
# is a generation needed?
_ng() {
local a="$1"
if [[ ! -e "$a" ]]; then echo 1; return; fi
for i in `ls -1 *.go.tmpl gen.go values_test.go`
do
if [[ "$a" -ot "$i" ]]; then echo 1; return; fi
done
}
_prependbt() {
cat > ${2} <<EOF
// +build generated
EOF
cat ${1} >> ${2}
rm -f ${1}
}
# _build generates fast-path.go and gen-helper.go.
# _build generates fastpath.go
_build() {
if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi
# if ! [[ "${zforce}" || $(_ng "fastpath.generated.go") || $(_ng "json.mono.generated.go") ]]; then return 0; fi
_build_proceed
if [ $? -eq 1 ]; then return 0; fi
if [ "${zbak}" ]; then
_zts=`date '+%m%d%Y_%H%M%S'`
_gg=".generated.go"
[ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
[ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
[ -e "fastpath${_gg}" ] && mv fastpath${_gg} fastpath${_gg}__${_zts}.bak
[ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak
fi
rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \
*safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
fi
rm -f fast*path.generated.go *mono*generated.go *_generated_test.go gen-from-tmpl*.generated.go
cat > gen.generated.go <<EOF
// +build codecgen.exec
local btags="codec.build codec.notmono codec.safe codec.notfastpath"
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = \`
EOF
cat >> gen.generated.go < gen-dec-map.go.tmpl
cat >> gen.generated.go <<EOF
\`
const genDecListTmpl = \`
EOF
cat >> gen.generated.go < gen-dec-array.go.tmpl
cat >> gen.generated.go <<EOF
\`
const genEncChanTmpl = \`
EOF
cat >> gen.generated.go < gen-enc-chan.go.tmpl
cat >> gen.generated.go <<EOF
\`
EOF
cat > gen-from-tmpl.codec.generated.go <<EOF
package codec
func GenRunTmpl2Go(in, out string) { genRunTmpl2Go(in, out) }
func GenRunSortTmpl2Go(in, out string) { genRunSortTmpl2Go(in, out) }
EOF
# stub xxxRv and xxxRvSlice creation, before you create it
cat > gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
// +build codecgen.sort_slice
package codec
import "reflect"
import "time"
func GenTmplRun2Go(in, out string) { genTmplRun2Go(in, out) }
func GenMonoAll() { genMonoAll() }
EOF
for i in string bool uint64 int64 float64 bytes time; do
local i2=$i
case $i in
'time' ) i2="time.Time";;
'bytes' ) i2="[]byte";;
esac
cat >> gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
type ${i}Rv struct { v ${i2}; r reflect.Value }
type ${i}RvSlice []${i}Rv
func (${i}RvSlice) Len() int { return 0 }
func (${i}RvSlice) Less(i, j int) bool { return false }
func (${i}RvSlice) Swap(i, j int) {}
type ${i}Intf struct { v ${i2}; i interface{} }
type ${i}IntfSlice []${i}Intf
func (${i}IntfSlice) Len() int { return 0 }
func (${i}IntfSlice) Less(i, j int) bool { return false }
func (${i}IntfSlice) Swap(i, j int) {}
cat > gen-from-tmpl.generated.go <<EOF
//go:build ignore
package main
import "${zpkg}"
func main() {
codec.GenTmplRun2Go("fastpath.go.tmpl", "base.fastpath.generated.go")
codec.GenTmplRun2Go("fastpath.notmono.go.tmpl", "base.fastpath.notmono.generated.go")
codec.GenTmplRun2Go("mammoth_test.go.tmpl", "mammoth_generated_test.go")
codec.GenMonoAll()
}
EOF
done
sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/ugorji/go/codec"+' \
shared_test.go > bench/shared_test.go
# explicitly return 0 if this passes, else return 1
local btags="codec.notfastpath codec.safe codecgen.exec"
rm -f sort-slice.generated.go fast-path.generated.go gen-helper.generated.go mammoth_generated_test.go mammoth2_generated_test.go
cat > gen-from-tmpl.sort-slice.generated.go <<EOF
// +build ignore
package main
import "${zpkg}"
func main() {
codec.GenRunSortTmpl2Go("sort-slice.go.tmpl", "sort-slice.generated.go")
}
EOF
${gocmd} run -tags "$btags codecgen.sort_slice" gen-from-tmpl.sort-slice.generated.go || return 1
rm -f gen-from-tmpl.sort-slice.generated.go
cat > gen-from-tmpl.generated.go <<EOF
// +build ignore
package main
import "${zpkg}"
func main() {
codec.GenRunTmpl2Go("fast-path.go.tmpl", "fast-path.generated.go")
codec.GenRunTmpl2Go("gen-helper.go.tmpl", "gen-helper.generated.go")
codec.GenRunTmpl2Go("mammoth-test.go.tmpl", "mammoth_generated_test.go")
codec.GenRunTmpl2Go("mammoth2-test.go.tmpl", "mammoth2_generated_test.go")
}
EOF
${gocmd} run -tags "$btags" gen-from-tmpl.generated.go || return 1
rm -f gen-from-tmpl.generated.go
rm -f gen-from-tmpl.*generated.go
rm -f gen-from-tmpl*.generated.go
return 0
}
_codegenerators() {
local c5="_generated_test.go"
local c7="$PWD/codecgen"
local c8="$c7/__codecgen"
local c9="codecgen-scratch.go"
if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi
# Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen
true &&
echo "codecgen ... " &&
if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then
echo "rebuilding codecgen ... " && ( cd codecgen && ${gocmd} build -o $c8 ${zargs[*]} . )
fi &&
$c8 -rt 'codecgen' -t 'codecgen generated' -o "values_codecgen${c5}" -d 19780 "$zfin" "$zfin2" &&
cp mammoth2_generated_test.go $c9 &&
$c8 -t 'codecgen,!codec.notfastpath,!codec.notmammoth generated,!codec.notfastpath,!codec.notmammoth' -o "mammoth2_codecgen${c5}" -d 19781 "mammoth2_generated_test.go" &&
rm -f $c9 &&
echo "generators done!"
}
_prebuild() {
echo "prebuild: zforce: $zforce"
local d="$PWD"
local zfin="test_values.generated.go"
local zfin2="test_values_flex.generated.go"
@@ -236,13 +65,12 @@ _prebuild() {
# zpkg=${d##*/src/}
# zgobase=${d%%/src/*}
# rm -f *_generated_test.go
rm -f codecgen-*.go &&
# if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
true &&
_build &&
cp $d/values_test.go $d/$zfin &&
cp $d/values_flex_test.go $d/$zfin2 &&
_codegenerators &&
if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi &&
if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
returncode=0 &&
echo "prebuild done successfully"
rm -f $d/$zfin $d/$zfin2
@@ -251,54 +79,67 @@ _prebuild() {
}
_make() {
local makeforce=${zforce}
zforce=1
(cd codecgen && ${gocmd} install ${zargs[*]} .) && _prebuild && ${gocmd} install ${zargs[*]} .
zforce=${makeforce}
_prebuild && ${gocmd} install ${zargs[*]} .
}
_clean() {
rm -f \
gen-from-tmpl.*generated.go \
codecgen-*.go \
test_values.generated.go test_values_flex.generated.go
}
_release() {
local reply
read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply
echo
if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi
_tests_run_one() {
local tt="alltests $i"
local rr="TestCodecSuite"
if [[ "x$i" == "xx" ]]; then tt="codec.notmono codec.notfastpath x"; rr='Test.*X$'; fi
local g=( ${zargs[*]} ${ztestargs[*]} -count $nc -cpu $cpus -vet "$vet" -tags "$tt" -run "$rr" )
[[ "$zcover" == "1" ]] && g+=( -cover )
# g+=( -ti "$k" )
g+=( -tdiff )
[[ "$zcover" == "1" ]] && g+=( -test.gocoverdir $covdir )
local -
set -x
${gocmd} test "${g[@]}" &
}
# expects GOROOT, GOROOT_BOOTSTRAP to have been set.
if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi
# (cd $GOROOT && git checkout -f master && git pull && git reset --hard)
(cd $GOROOT && git pull)
local f=`pwd`/make.release.out
cat > $f <<EOF
========== `date` ===========
EOF
# # go 1.6 and below kept giving memory errors on Mac OS X during SDK build or go run execution,
# # that is fine, as we only explicitly test the last 3 releases and tip (2 years).
local makeforce=${zforce}
zforce=1
for i in 1.10 1.11 1.12 master
do
echo "*********** $i ***********" >>$f
if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi
(false ||
(echo "===== BUILDING GO SDK for branch: $i ... =====" &&
cd $GOROOT &&
git checkout -f $i && git reset --hard && git clean -f . &&
cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) &&
echo "===== GO SDK BUILD DONE =====" &&
_prebuild &&
echo "===== PREBUILD DONE with exit: $? =====" &&
_tests "$@"
if [[ "$?" != 0 ]]; then return 1; fi
_tests() {
local vet="" # TODO: make it off
local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
# go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
[[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
[[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
case $gover in
go1.2[0-9]*|go2.*|devel*) true ;;
*) return 1
esac
# we test the following permutations wnich all execute different code paths as below.
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe)"
local nc=2 # count
local cpus="1,$(nproc)"
# if using the race detector, then set nc to
if [[ " ${zargs[@]} " =~ "-race" ]]; then
cpus="$(nproc)"
fi
local covdir=""
local a=( "" "codec.safe" "codec.notfastpath" "codec.safe codec.notfastpath"
"codec.notmono" "codec.notmono codec.safe"
"codec.notmono codec.notfastpath" "codec.notmono codec.safe codec.notfastpath" )
[[ "$zextra" == "1" ]] && a+=( "x" )
[[ "$zcover" == "1" ]] && covdir=`mktemp -d`
${gocmd} vet -printfuncs "errorf" "$@" || return 1
for i in "${a[@]}"; do
local j=${i:-default}; j="${j// /-}"; j="${j//codec./}"
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
_tests_run_one
[[ "$zwait" == "1" ]] && wait
# if [[ "$?" != 0 ]]; then return 1; fi
done
zforce=${makeforce}
echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++"
wait
[[ "$zcover" == "1" ]] &&
echo "go tool covdata output" &&
${gocmd} tool covdata percent -i $covdir &&
${gocmd} tool covdata textfmt -i $covdir -o __cov.out &&
${gocmd} tool cover -html=__cov.out
}
_usage() {
@@ -306,11 +147,10 @@ _usage() {
# -pf [p=prebuild (f=force)]
cat <<EOF
primary usage: $0
primary usage: $0
-t[esow] -> t=tests [e=extra, s=short, o=cover, w=wait]
-[md] -> [m=make, d=race detector]
-[n l i] -> [n=inlining diagnostics, l=mid-stack inlining, i=check inlining for path (path)]
-v -> v=verbose
-v -> v=verbose (more v's to increase verbose level)
EOF
if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi
}
@@ -331,15 +171,15 @@ _main() {
local gocmd=${MYGOCMD:-go}
OPTIND=1
while getopts ":cetmnrgpfvldsowkxyzi" flag
while getopts ":cetmnrgpfvldsowikxyz" flag
do
case "x$flag" in
'xw') zwait=1 ;;
'xv') zverbose+=(1) ;;
'xo') zcover=1 ;;
'xe') zextra=1 ;;
'xw') zwait=1 ;;
'xf') zforce=1 ;;
'xs') ztestargs+=("-short") ;;
'xv') zverbose+=(1) ;;
'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;;
'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;;
'xd') zargs+=("-race") ;;
@@ -357,14 +197,23 @@ _main() {
'xg') _go ;;
'xp') _prebuild "$@" ;;
'xc') _clean "$@" ;;
esac
# handle from local run.sh
case "x$x" in
'xi') _check_inlining_one "$@" ;;
'xk') _go_compiler_validation_suite ;;
'xx') _analyze_checks "$@" ;;
'xy') _analyze_debug_types "$@" ;;
'xz') _analyze_do_inlining_and_more "$@" ;;
'xk') _go_compiler_validation_suite ;;
'xi') _check_inlining_one "$@" ;;
esac
# unset zforce zargs zbenchflags
}
[ "." = `dirname $0` ] && _main "$@"
# _xtrace() {
# local -
# set -x
# "${@}"
# }

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build codecgen || generated
// +build codecgen generated
package codec
// this file sets the codecgen variable to true
// when the build tag codecgen is set.
//
// some tests depend on knowing whether in the context of codecgen or not.
// For example, some tests should be skipped during codecgen e.g. missing fields tests.
func init() {
codecgen = true
}

View File

@@ -8,6 +8,19 @@ import (
"strconv"
)
type readFloatResult struct {
mantissa uint64
exp int8
neg bool
trunc bool
bad bool // bad decimal string
hardexp bool // exponent is hard to handle (> 2 digits, etc)
ok bool
// sawdot bool
// sawexp bool
//_ [2]bool // padding
}
// Per go spec, floats are represented in memory as
// IEEE single or double precision floating point values.
//
@@ -234,6 +247,10 @@ func parseFloat64_custom(b []byte) (f float64, err error) {
}
func parseUint64_simple(b []byte) (n uint64, ok bool) {
if len(b) > 1 && b[0] == '0' { // punt on numbers with leading zeros
return
}
var i int
var n1 uint64
var c uint8
@@ -356,19 +373,6 @@ func parseNumber(b []byte, z *fauxUnion, preferSignedInt bool) (err error) {
return
}
type readFloatResult struct {
mantissa uint64
exp int8
neg bool
trunc bool
bad bool // bad decimal string
hardexp bool // exponent is hard to handle (> 2 digits, etc)
ok bool
// sawdot bool
// sawexp bool
//_ [2]bool // padding
}
func readFloat(s []byte, y floatinfo) (r readFloatResult) {
var i uint // uint, so that we eliminate bounds checking
var slen = uint(len(s))
@@ -384,13 +388,23 @@ func readFloat(s []byte, y floatinfo) (r readFloatResult) {
i++
}
// we considered punting early if string has length > maxMantDigits, but this doesn't account
// considered punting early if string has length > maxMantDigits, but doesn't account
// for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20
var nd, ndMant, dp int8
var sawdot, sawexp bool
var xu uint64
if i+1 < slen && s[i] == '0' {
switch s[i+1] {
case '.', 'e', 'E':
// ok
default:
r.bad = true
return
}
}
LOOP:
for ; i < slen; i++ {
switch s[i] {

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,7 @@ Supported Serialization formats are:
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
- simple: (unpublished)
This package will carefully use 'package unsafe' for performance reasons in specific places.
You can build without unsafe use by passing the safe or appengine tag
@@ -78,6 +78,32 @@ Rich Feature Set includes:
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
# Supported build tags
We gain performance by code-generating fast-paths for slices and maps of built-in types,
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
The results are 20-40% performance improvements.
Building and running is configured using build tags as below.
At runtime:
- codec.safe: run in safe mode (not using unsafe optimizations)
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
Pls use these mostly during development - use codec.XXX in your go files.
Build only:
- codec.build: used to generate fastpath and monomorphization code
Test only:
- codec.notmammoth: skip the mammoth generated tests
# Extension Support
Users can register a function to handle the encoding or decoding of
@@ -203,6 +229,10 @@ You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
go test -tags codec.safe -run Json
go test -tags "alltests codec.safe" -run Suite
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
go test -tags codec.notmono -run Json
Running Benchmarks
cd bench
@@ -225,3 +255,87 @@ Embedded fields are encoded as if they exist in the top-level struct,
with some caveats. See Encode documentation.
*/
package codec
/*
Generics
Generics are used across to board to reduce boilerplate, and hopefully
improve performance by
- reducing need for interface calls (de-virtualization)
- resultant inlining of those calls
encoder/decoder --> Driver (json/cbor/...) --> input/output (bytes or io abstraction)
There are 2 * 5 * 2 (20) combinations of monomorphized values.
Key rules
- do not use top-level generic functions.
Due to type inference, monomorphizing them proves challenging
- only use generic methods.
Monomorphizing is done at the type once, and method names need not change
- do not have method calls have a parameter of an encWriter or decReader.
All those calls are handled directly by the driver.
- Include a helper type for each parameterized thing, and add all generic functions to them e.g.
helperEncWriter[T encWriter]
helperEncReader[T decReader]
helperEncDriver[T encDriver]
helperDecDriver[T decDriver]
- Always use T as the generic type name (when needed)
- No inline types
- No closures taking parameters of generic types
*/
/*
Naming convention:
Currently, as generic and non-generic types/functions/vars are put in the same files,
we suffer because:
- build takes longer as non-generic code is built when a build tag wants only monomorphised code
- files have many lines which are not used at runtime (due to type parameters)
- code coverage is inaccurate on a single run
To resolve this, we are streamlining our file naming strategy.
Basically, we will have the following nomenclature for filenames:
- fastpath (tag:notfastpath): *.notfastpath.*.go vs *.fastpath.*.go
- typed parameters (tag:notmono): *.notmono.*.go vs *.mono.*.go
- safe (tag:safe): *.safe.*.go vs *.unsafe.go
- generated files: *.generated.go
- all others (tags:N/A): *.go without safe/mono/fastpath/generated in the name
The following files will be affected and split/renamed accordingly
Base files:
- binc.go
- cbor.go
- json.go
- msgpack.go
- simple.go
- decode.go
- encode.go
For each base file, split into __file__.go (containing type parameters) and __file__.base.go.
__file__.go will only build with notmono.
Other files:
- fastpath.generated.go -> base.fastpath.generated.go and base.fastpath.notmono.generated.go
- fastpath.not.go -> base.notfastpath.go
- init.go -> init.notmono.go
Appropriate build tags will be included in the files, and the right ones only used for
monomorphization.
*/
/*
Caching Handle options for fast runtime use
If using cached values from Handle options, then
- re-cache them at each reset() call
- reset is always called at the start of each (Must)(En|De)code
- which calls (en|de)coder.reset([]byte|io.Reader|String)
- which calls (en|de)cDriver.reset()
- at reset, (en|de)c(oder|Driver) can re-cache Handle options before each run
Some examples:
- json: e.rawext,di,d,ks,is / d.rawext
- decode: (decoderBase) d.jsms,mtr,str,
*/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,555 +0,0 @@
// +build !notfastpath
// +build !codec.notfastpath
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from fast-path.go.tmpl - DO NOT EDIT.
package codec
// Fast path functions try to create a fast path encode or decode implementation
// for common maps and slices.
//
// We define the functions and register them in this single file
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
// This file can be omitted without causing a build failure.
//
// The advantage of fast paths is:
// - Many calls bypass reflection altogether
//
// Currently support
// - slice of all builtin types (numeric, bool, string, []byte)
// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
// AND values of type type int8/16/32, uint16/32
// This should provide adequate "typical" implementations.
//
// Note that fast track decode functions must handle values for which an address cannot be obtained.
// For example:
// m2 := map[string]int{}
// p2 := []interface{}{m2}
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
//
{{/*
fastpathEncMapStringUint64R (called by fastpath...switch)
EncMapStringUint64V (called by codecgen)
fastpathEncSliceBoolR: (called by fastpath...switch) (checks f.ti.mbs and calls one of them below)
EncSliceBoolV (also called by codecgen)
EncAsMapSliceBoolV (delegate when mapbyslice=true)
fastpathDecSliceIntfR (called by fastpath...switch) (calls Y or N below depending on if it can be updated)
DecSliceIntfX (called by codecgen) (calls Y below)
DecSliceIntfY (delegate when slice CAN be updated)
DecSliceIntfN (delegate when slice CANNOT be updated e.g. from array or non-addressable slice)
fastpathDecMap...R (called by fastpath...switch) (calls L or X? below)
DecMap...X (called by codecgen)
DecMap...L (delegated to by both above)
*/ -}}
import (
"reflect"
"sort"
)
const fastpathEnabled = true
{{/*
const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v"
*/ -}}
type fastpathT struct {}
var fastpathTV fastpathT
type fastpathE struct {
{{/* rtid uintptr */ -}}
rt reflect.Type
encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*Decoder, *codecFnInfo, reflect.Value)
}
type fastpathA [{{ .FastpathLen }}]fastpathE
type fastpathARtid [{{ .FastpathLen }}]uintptr
var fastpathAv fastpathA
var fastpathAvRtid fastpathARtid
type fastpathAslice struct{}
func (fastpathAslice) Len() int { return {{ .FastpathLen }} }
func (fastpathAslice) Less(i, j int) bool {
return fastpathAvRtid[uint(i)] < fastpathAvRtid[uint(j)]
}
func (fastpathAslice) Swap(i, j int) {
fastpathAvRtid[uint(i)], fastpathAvRtid[uint(j)] = fastpathAvRtid[uint(j)], fastpathAvRtid[uint(i)]
fastpathAv[uint(i)], fastpathAv[uint(j)] = fastpathAv[uint(j)], fastpathAv[uint(i)]
}
func fastpathAvIndex(rtid uintptr) int {
// use binary search to grab the index (adapted from sort/search.go)
// Note: we use goto (instead of for loop) so this can be inlined.
// h, i, j := 0, 0, {{ .FastpathLen }}
var h, i uint
var j uint = {{ .FastpathLen }}
LOOP:
if i < j {
h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
if fastpathAvRtid[h] < rtid {
i = h + 1
} else {
j = h
}
goto LOOP
}
if i < {{ .FastpathLen }} && fastpathAvRtid[i] == rtid {
return int(i)
}
return -1
}
// due to possible initialization loop error, make fastpath in an init()
func init() {
var i uint = 0
fn := func(v interface{},
fe func(*Encoder, *codecFnInfo, reflect.Value),
fd func(*Decoder, *codecFnInfo, reflect.Value)) {
xrt := reflect.TypeOf(v)
xptr := rt2id(xrt)
fastpathAvRtid[i] = xptr
fastpathAv[i] = fastpathE{xrt, fe, fd}
i++
}
{{/* do not register []byte in fast-path */}}
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R)
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R)
{{end}}{{end}}{{end}}
sort.Sort(fastpathAslice{})
}
// -- encode
// -- -- fast path type switch
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case []{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
case *[]{{ .Elem }}:
if *v == nil {
e.e.EncodeNil()
} else {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
case map[{{ .MapKey }}]{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
case *map[{{ .MapKey }}]{{ .Elem }}:
if *v == nil {
e.e.EncodeNil()
} else {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
}
{{end}}{{end}}{{end -}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
var v []{{ .Elem }}
if rv.Kind() == reflect.Array {
rvGetSlice4Array(rv, &v)
} else {
v = rv2i(rv).([]{{ .Elem }})
}
if f.ti.mbs {
fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(v, e)
} else {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
}
}
func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
{{/* if v == nil { e.e.EncodeNil(); return } */ -}}
{{ if eq .Elem "uint8" "byte" -}}
e.e.EncodeStringBytesRaw(v)
{{ else -}}
e.arrayStart(len(v))
for j := range v {
e.arrayElem()
{{ encmd .Elem "v[j]"}}
}
e.arrayEnd()
{{ end -}}
}
func (fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
{{/* if v == nil { e.e.EncodeNil() } else */ -}}
e.haltOnMbsOddLen(len(v))
{{/*
if len(v)&1 != 0 { // similar to &1==1 or %2 == 1
e.errorf(fastpathMapBySliceErrMsg, len(v))
}
*/ -}}
e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
for j := range v {
if j&1 == 0 { // if j%2 == 0 {
e.mapElemKey()
} else {
e.mapElemValue()
}
{{ encmd .Elem "v[j]"}}
}
e.mapEnd()
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
}
func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
{{/* if v == nil { e.e.EncodeNil(); return } */ -}}
e.mapStart(len(v))
if e.h.Canonical { {{/* need to figure out .NoCanonical */}}
{{if eq .MapKey "interface{}"}}{{/* out of band */ -}}
var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
e2 := NewEncoderBytes(&mksv, e.hh)
v2 := make([]bytesIntf, len(v))
var i, l uint {{/* put loop variables outside. seems currently needed for better perf */}}
var vp *bytesIntf
for k2 := range v {
l = uint(len(mksv))
e2.MustEncode(k2)
vp = &v2[i]
vp.v = mksv[l:]
vp.i = k2
i++
}
sort.Sort(bytesIntfSlice(v2))
for j := range v2 {
e.mapElemKey()
e.asis(v2[j].v)
e.mapElemValue()
e.encode(v[v2[j].i])
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
var i uint
for k := range v {
v2[i] = {{if eq $x .MapKey}}k{{else}}{{ $x }}(k){{end}}
i++
}
sort.Sort({{ sorttype .MapKey false}}(v2))
for _, k2 := range v2 {
e.mapElemKey()
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{if eq $x .MapKey }}{{ $y = "k2" }}{{end}}{{ encmd .MapKey $y }}{{end}}
e.mapElemValue()
{{ $y := printf "v[%s(k2)]" .MapKey }}{{if eq $x .MapKey }}{{ $y = "v[k2]" }}{{end}}{{ encmd .Elem $y }}
} {{end}}
} else {
for k2, v2 := range v {
e.mapElemKey()
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ encmd .MapKey "k2"}}{{end}}
e.mapElemValue()
{{ encmd .Elem "v2"}}
}
}
e.mapEnd()
}
{{end}}{{end}}{{end -}}
// -- decode
// -- -- fast path type switch
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
var changed bool
var containerLen int
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case []{{ .Elem }}:
fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d)
case *[]{{ .Elem }}:
var v2 []{{ .Elem }}
if v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed {
*v = v2
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
// maps only change if nil, and in that case, there's no point copying
*/ -}}
case map[{{ .MapKey }}]{{ .Elem }}:
containerLen = d.mapStart(d.d.ReadMapStart())
if containerLen != containerLenNil {
if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d)
}
d.mapEnd()
}
case *map[{{ .MapKey }}]{{ .Elem }}:
{{/*
containerLen = d.mapStart(d.d.ReadMapStart())
if containerLen == 0 {
d.mapEnd()
} else if containerLen == containerLenNil {
*v = nil
} else {
if *v == nil {
*v = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*v, containerLen, d)
}
// consider delegating fully to X - encoding *map is uncommon, so ok to pay small function call cost
*/ -}}
fastpathTV.{{ .MethodNamePfx "Dec" false }}X(v, d)
{{end}}{{end}}{{end -}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
switch v := iv.(type) {
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
case *[]{{ .Elem }}:
*v = nil
{{end}}{{end}}{{end}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
case *map[{{ .MapKey }}]{{ .Elem }}:
*v = nil
{{end}}{{end}}{{end}}
default:
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
return false
}
return true
}
// -- -- fast path functions
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
{{/*
Slices can change if they
- did not come from an array
- are addressable (from a ptr)
- are settable (e.g. contained in an interface{})
*/}}
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
{{/*
// seqTypeArray=true means that we are not getting a pointer, so no need to check that.
if f.seq != seqTypeArray && rv.Kind() == reflect.Ptr {
*/ -}}
var v []{{ .Elem }}
switch rv.Kind() {
case reflect.Ptr:
vp := rv2i(rv).(*[]{{ .Elem }})
var changed bool
if v, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed {
*vp = v
}
case reflect.Array:
rvGetSlice4Array(rv, &v)
fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d)
default:
fastpathTV.{{ .MethodNamePfx "Dec" false }}N(rv2i(rv).([]{{ .Elem }}), d)
}
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
if v, changed := f.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { *vp = v }
}
func (fastpathT) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *Decoder) (v2 []{{ .Elem }}, changed bool) {
{{ if eq .Elem "uint8" "byte" -}}
switch d.d.ContainerType() {
case valueTypeNil, valueTypeMap:
break
default:
v2 = d.decodeBytesInto(v[:len(v):len(v)])
changed = !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) // not same slice
return
}
{{ end -}}
slh, containerLenS := d.decSliceHelperStart()
if slh.IsNil {
if v == nil { return }
return nil, true
}
if containerLenS == 0 {
if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] }
slh.End()
return v, true
}
hasLen := containerLenS > 0
var xlen int
if hasLen {
if containerLenS > cap(v) {
xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
if xlen <= cap(v) {
v = v[:uint(xlen)]
} else {
v = make([]{{ .Elem }}, uint(xlen))
}
changed = true
} else if containerLenS != len(v) {
v = v[:containerLenS]
changed = true
}
}
var j int
for j = 0; d.containerNext(j, containerLenS, hasLen); j++ {
if j == 0 && len(v) == 0 { // means hasLen == false
xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) {{/* xlen = decDefSliceCap */}}
v = make([]{{ .Elem }}, uint(xlen))
changed = true
}
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
if j >= len(v) {
v = append(v, {{ zerocmd .Elem }})
changed = true
}
slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }}
}
if j < len(v) {
v = v[:uint(j)]
changed = true
} else if j == 0 && v == nil {
v = []{{ .Elem }}{}
changed = true
}
slh.End()
return v, changed
}
func (fastpathT) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *Decoder) {
{{ if eq .Elem "uint8" "byte" -}}
switch d.d.ContainerType() {
case valueTypeNil, valueTypeMap:
break
default:
v2 := d.decodeBytesInto(v[:len(v):len(v)])
if !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) { // not same slice
copy(v, v2)
}
return
}
{{ end -}}
slh, containerLenS := d.decSliceHelperStart()
if slh.IsNil {
return
}
if containerLenS == 0 {
slh.End()
return
}
hasLen := containerLenS > 0
for j := 0; d.containerNext(j, containerLenS, hasLen); j++ {
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
if j >= len(v) {
slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
return
}
slh.ElemContainerState(j)
{{ if eq .Elem "interface{}" -}}
d.decode(&v[uint(j)])
{{- else -}}
v[uint(j)] = {{ decmd .Elem false }}
{{- end }}
}
slh.End()
}
{{end}}{{end}}{{end -}}
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
{{/*
Maps can change if they are
- addressable (from a ptr)
- settable (e.g. contained in an interface{})
Also, these methods are called by decodeValue directly, after handling a TryNil.
Consequently, there's no need to check for containerLenNil here.
*/ -}}
func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
containerLen := d.mapStart(d.d.ReadMapStart())
{{/*
if containerLen == containerLenNil {
if rv.Kind() == reflect.Ptr {
*(rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})) = nil
}
return
}
*/ -}}
if rv.Kind() == reflect.Ptr {
vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
} else if containerLen != 0 {
fastpathTV.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d)
}
d.mapEnd()
}
func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
containerLen := d.mapStart(d.d.ReadMapStart())
if containerLen == containerLenNil {
*vp = nil
} else {
if *vp == nil {
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
}
if containerLen != 0 {
f.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
}
d.mapEnd()
}
}
func (fastpathT) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *Decoder) {
{{/* No need to check if containerLen == containerLenNil, as that is checked by R and L above */ -}}
if v == nil {
d.errorf("cannot decode into nil map[{{ .MapKey }}]{{ .Elem }} given stream length: %v", containerLen)
{{/* d.swallowMapContents(containerLen) */ -}}
return
}
{{if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
{{else if eq .Elem "bytes" "[]byte" }}mapGet := v != nil && !d.h.MapValueReset
{{end -}}
var mk {{ .MapKey }}
var mv {{ .Elem }}
hasLen := containerLen > 0
for j := 0; d.containerNext(j, containerLen, hasLen); j++ {
d.mapElemKey()
{{ if eq .MapKey "interface{}" }}mk = nil
d.decode(&mk)
if bv, bok := mk.([]byte); bok {
mk = d.stringZC(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
}{{ else }}mk = {{ decmd .MapKey true }}{{ end }}
d.mapElemValue()
{{ if eq .Elem "interface{}" "[]byte" "bytes" -}}
if mapGet { mv = v[mk] } else { mv = nil }
{{ end -}}
{{ if eq .Elem "interface{}" -}}
d.decode(&mv)
{{ else if eq .Elem "[]byte" "bytes" -}}
mv = d.decodeBytesInto(mv)
{{ else -}}
mv = {{ decmd .Elem false }}
{{ end -}}
v[mk] = mv
}
}
{{end}}{{end}}{{end}}

View File

@@ -1,41 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build notfastpath || codec.notfastpath
// +build notfastpath codec.notfastpath
package codec
import "reflect"
const fastpathEnabled = false
// The generated fast-path code is very large, and adds a few seconds to the build time.
// This causes test execution, execution of small tools which use codec, etc
// to take a long time.
//
// To mitigate, we now support the notfastpath tag.
// This tag disables fastpath during build, allowing for faster build, test execution,
// short-program runs, etc.
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
// func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
// func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
type fastpathT struct{}
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(*Encoder, *codecFnInfo, reflect.Value)
decfn func(*Decoder, *codecFnInfo, reflect.Value)
}
type fastpathA [0]fastpathE
func fastpathAvIndex(rtid uintptr) int { return -1 }
var fastpathAv fastpathA
var fastpathTV fastpathT

View File

@@ -1,90 +0,0 @@
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
{{if not isArray -}}
var {{var "c"}} bool {{/* // changed */}}
_ = {{var "c"}}
if {{var "h"}}.IsNil {
if {{var "v"}} != nil {
{{var "v"}} = nil
{{var "c"}} = true
}
} else {{end -}}
if {{var "l"}} == 0 {
{{if isSlice -}}
if {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
} else if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{else if isChan }}if {{var "v"}} == nil {
{{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true
}
{{end -}}
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
_ = {{var "rl"}}
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "c"}} = true
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
}
}
{{end -}}
var {{var "j"}} int
{{/* // var {{var "dn"}} bool */ -}}
for {{var "j"}} = 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
}
{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
{{var "c"}} = true
}
{{end -}}
{{var "h"}}.ElemContainerState({{var "j"}})
{{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}}
{{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
{{ decLineVar $x -}}
{{var "v"}} <- {{ $x }}
{{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
{{var "c"}} = true
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end -}}
}
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}}
}
{{end -}}
}
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
}
{{end -}}
}
{{var "h"}}.End()
{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}
{{end -}}

View File

@@ -1,58 +0,0 @@
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := z.DecReadMapStart()
if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} {
*{{ .Varname }} = nil
} else {
if {{var "v"}} == nil {
{{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}}
}
{{ $mk := var "mk" -}}
var {{ $mk }} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if z.DecBasicHandle().MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} }
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
z.DecReadMapElemKey()
{{ if eq .KTyp "string" -}}
{{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}}
{{ else -}}
{{ decLineVarK $mk -}}
{{ end -}}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}}
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = z.DecStringZC({{var "bv"}})
}
{{ end -}}
{{if decElemKindPtr -}}
{{var "ms"}} = true
{{end -}}
if {{var "mg"}} {
{{if decElemKindPtr -}}
{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}]
if {{var "mok"}} {
{{var "ms"}} = false
}
{{else -}}
{{var "mv"}} = {{var "v"}}[{{ $mk }}]
{{end -}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecReadMapElemValue()
{{var "mdn"}} = false
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}}
if {{var "mdn"}} {
{{var "v"}}[{{ $mk }}] = {{decElemZero}}
} else {{if decElemKindPtr}} if {{var "ms"}} {{end}} {
{{var "v"}}[{{ $mk }}] = {{var "mv"}}
}
}
} // else len==0: leave as-is (do not clear map entries)
z.DecReadMapEnd()
}

View File

@@ -1,27 +0,0 @@
{{.Label}}:
switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
case timeout{{.Sfx}} == 0: // only consume available
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
default:
break {{.Label}}
}
}
case timeout{{.Sfx}} > 0: // consume until timeout
tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
case <-tt{{.Sfx}}.C:
// close(tt.C)
break {{.Label}}
}
}
default: // consume until close
for b{{.Sfx}} := range {{.Chan}} {
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
}
}

View File

@@ -1,294 +0,0 @@
// comment this out // + build ignore
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
package codec
import (
"encoding"
"reflect"
)
// GenVersion is the current version of codecgen.
const GenVersion = 28
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelper() (g genHelper) { return }
type genHelper struct{}
func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
ge = genHelperEncoder{e: e}
ee = genHelperEncDriver{encDriver: e.e}
return
}
func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
gd = genHelperDecoder{d: d}
dd = genHelperDecDriver{decDriver: d.d}
return
}
type genHelperEncDriver struct {
encDriver
}
type genHelperDecDriver struct {
decDriver
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
M mustHdl
F fastpathT
e *Encoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
C checkOverflow
F fastpathT
d *Decoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWr() *encWr {
return f.e.w()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshalUtf8(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshalAsis(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshalRaw(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.e.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFnGivenAddr(v interface{}) *codecFn {
return f.e.h.fn(reflect.TypeOf(v).Elem())
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeNumBoolStrKindGivenAddr(v interface{}, encFn *codecFn) {
f.e.encodeValueNonNil(reflect.ValueOf(v).Elem(), encFn)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) {
if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) {
f.e.encodeValueNonNil(reflect.ValueOf(v), nil)
}
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) DecScratchBuffer() []byte {
// return f.d.b[:]
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
rv := reflect.ValueOf(iv)
if chkPtr {
if x, _ := isDecodeable(rv); !x {
f.d.haltAsNotDecodeable(rv)
}
}
f.d.decodeValue(rv, nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
f.d.jsonUnmarshalV(tm)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil)))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.d.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerNext(j, containerLen int, hasLen bool) bool {
// return f.d.containerNext(j, containerLen, hasLen)
// rewriting so it can be inlined
if hasLen {
return j < containerLen
}
return !f.d.checkBreak()
}

View File

@@ -1,273 +0,0 @@
// comment this out // + build ignore
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
package codec
import (
"encoding"
"reflect"
)
// GenVersion is the current version of codecgen.
const GenVersion = {{ .Version }}
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice.
{{/*
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
//
// When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types.
*/ -}}
// GenHelperEncoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelper() (g genHelper) { return }
type genHelper struct {}
func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
ge = genHelperEncoder{e: e}
ee = genHelperEncDriver{encDriver: e.e}
return
}
func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
gd = genHelperDecoder{d: d}
dd = genHelperDecDriver{decDriver: d.d}
return
}
type genHelperEncDriver struct {
encDriver
}
type genHelperDecDriver struct {
decDriver
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
M mustHdl
F fastpathT
e *Encoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
C checkOverflow
F fastpathT
d *Decoder
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWr() *encWr {
return f.e.w()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// f.e.encodeI(iv, false, false)
f.e.encodeValue(reflect.ValueOf(iv), nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshalUtf8(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshalAsis(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshalRaw(bs, fnerr)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.e.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFnGivenAddr(v interface{}) *codecFn { return f.e.h.fn(reflect.TypeOf(v).Elem()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeNumBoolStrKindGivenAddr(v interface{}, encFn *codecFn) {
f.e.encodeValueNonNil(reflect.ValueOf(v).Elem(), encFn)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) {
if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) {
f.e.encodeValueNonNil(reflect.ValueOf(v), nil)
}
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperDecoder) DecScratchBuffer() []byte {
// return f.d.b[:]
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
return &f.d.b
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
rv := reflect.ValueOf(iv)
if chkPtr {
if x, _ := isDecodeable(rv); !x {
f.d.haltAsNotDecodeable(rv)
}
}
f.d.decodeValue(rv, nil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
f.d.jsonUnmarshalV(tm)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil)))
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) {
return f.d.h.getExtForI(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerNext(j, containerLen int, hasLen bool) bool {
// return f.d.containerNext(j, containerLen, hasLen)
// rewriting so it can be inlined
if hasLen {
return j < containerLen
}
return !f.d.checkBreak()
}
{{/*
// MARKER: remove WriteStr, as it cannot be inlined as of 20230201.
// Instead, generated code calls (*encWr).WriteStr directly.
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
// func (f genHelperEncoder) WriteStr(s string) {
// f.e.encWr.writestr(s)
// }
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
return i2rtid(v)
}
*/ -}}

View File

@@ -1,192 +0,0 @@
// +build codecgen.exec
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = `
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := z.DecReadMapStart()
if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} {
*{{ .Varname }} = nil
} else {
if {{var "v"}} == nil {
{{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}}
}
{{ $mk := var "mk" -}}
var {{ $mk }} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if z.DecBasicHandle().MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} }
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
z.DecReadMapElemKey()
{{ if eq .KTyp "string" -}}
{{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}}
{{ else -}}
{{ decLineVarK $mk -}}
{{ end -}}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}}
if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = z.DecStringZC({{var "bv"}})
}
{{ end -}}
{{if decElemKindPtr -}}
{{var "ms"}} = true
{{end -}}
if {{var "mg"}} {
{{if decElemKindPtr -}}
{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}]
if {{var "mok"}} {
{{var "ms"}} = false
}
{{else -}}
{{var "mv"}} = {{var "v"}}[{{ $mk }}]
{{end -}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecReadMapElemValue()
{{var "mdn"}} = false
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}}
if {{var "mdn"}} {
{{var "v"}}[{{ $mk }}] = {{decElemZero}}
} else {{if decElemKindPtr}} if {{var "ms"}} {{end}} {
{{var "v"}}[{{ $mk }}] = {{var "mv"}}
}
}
} // else len==0: leave as-is (do not clear map entries)
z.DecReadMapEnd()
}
`
const genDecListTmpl = `
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
{{if not isArray -}}
var {{var "c"}} bool {{/* // changed */}}
_ = {{var "c"}}
if {{var "h"}}.IsNil {
if {{var "v"}} != nil {
{{var "v"}} = nil
{{var "c"}} = true
}
} else {{end -}}
if {{var "l"}} == 0 {
{{if isSlice -}}
if {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
} else if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{else if isChan }}if {{var "v"}} == nil {
{{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true
}
{{end -}}
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
_ = {{var "rl"}}
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "c"}} = true
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
}
}
{{end -}}
var {{var "j"}} int
{{/* // var {{var "dn"}} bool */ -}}
for {{var "j"}} = 0; z.DecContainerNext({{var "j"}}, {{var "l"}}, {{var "hl"}}); {{var "j"}}++ {
{{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
{{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
}
{{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
{{var "c"}} = true
}
{{end -}}
{{var "h"}}.ElemContainerState({{var "j"}})
{{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}}
{{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
{{ decLineVar $x -}}
{{var "v"}} <- {{ $x }}
{{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
{{var "c"}} = true
{{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end -}}
}
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}}
}
{{end -}}
}
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
}
{{end -}}
}
{{var "h"}}.End()
{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}
{{end -}}
`
const genEncChanTmpl = `
{{.Label}}:
switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
case timeout{{.Sfx}} == 0: // only consume available
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
default:
break {{.Label}}
}
}
case timeout{{.Sfx}} > 0: // consume until timeout
tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
for {
select {
case b{{.Sfx}} := <-{{.Chan}}:
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
case <-tt{{.Sfx}}.C:
// close(tt.C)
break {{.Label}}
}
}
default: // consume until close
for b{{.Sfx}} := range {{.Chan}} {
{{.Slice}} = append({{.Slice}}, b{{.Sfx}})
}
}
`

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.5
// +build go1.5
package codec
import "reflect"
const reflectArrayOfSupported = true
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
return reflect.ArrayOf(count, elem)
}

View File

@@ -1,20 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.5
// +build !go1.5
package codec
import (
"errors"
"reflect"
)
const reflectArrayOfSupported = false
var errNoReflectArrayOf = errors.New("codec: reflect.ArrayOf unsupported by this go version")
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
panic(errNoReflectArrayOf)
}

View File

@@ -1,13 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.5
// +build go1.5
package codec
import "time"
func fmtTime(t time.Time, fmt string, b []byte) []byte {
return t.AppendFormat(b, fmt)
}

View File

@@ -1,16 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.5
// +build !go1.5
package codec
import "time"
func fmtTime(t time.Time, fmt string, b []byte) []byte {
s := t.Format(fmt)
b = b[:len(s)]
copy(b, s)
return b
}

View File

@@ -1,28 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.20 && !safe && !codec.safe && !appengine
// +build go1.20,!safe,!codec.safe,!appengine
package codec
import (
_ "reflect" // needed for go linkname(s)
"unsafe"
)
func growslice(typ unsafe.Pointer, old unsafeSlice, num int) (s unsafeSlice) {
// culled from GOROOT/runtime/slice.go
num -= old.Cap - old.Len
s = rtgrowslice(old.Data, old.Cap+num, old.Cap, num, typ)
s.Len = old.Len
return
}
//go:linkname rtgrowslice runtime.growslice
//go:noescape
func rtgrowslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, typ unsafe.Pointer) unsafeSlice
// //go:linkname growslice reflect.growslice
// //go:noescape
// func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice

View File

@@ -1,16 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.9 && !go1.20 && !safe && !codec.safe && !appengine
// +build go1.9,!go1.20,!safe,!codec.safe,!appengine
package codec
import (
_ "runtime" // needed for go linkname(s)
"unsafe"
)
//go:linkname growslice runtime.growslice
//go:noescape
func growslice(typ unsafe.Pointer, old unsafeSlice, num int) unsafeSlice

View File

@@ -1,13 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.10
// +build !go1.10
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMap(t)
}

View File

@@ -1,14 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.10 && (safe || codec.safe || appengine)
// +build go1.10
// +build safe codec.safe appengine
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMapWithSize(t, size)
}

View File

@@ -1,25 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.10 && !safe && !codec.safe && !appengine
// +build go1.10,!safe,!codec.safe,!appengine
package codec
import (
"reflect"
"unsafe"
)
func makeMapReflect(typ reflect.Type, size int) (rv reflect.Value) {
t := (*unsafeIntf)(unsafe.Pointer(&typ)).ptr
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
urv.typ = t
urv.flag = uintptr(reflect.Map)
urv.ptr = makemap(t, size, nil)
return
}
//go:linkname makemap runtime.makemap
//go:noescape
func makemap(typ unsafe.Pointer, size int, h unsafe.Pointer) unsafe.Pointer

View File

@@ -1,41 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.12 && (safe || codec.safe || appengine)
// +build go1.12
// +build safe codec.safe appengine
package codec
import "reflect"
type mapIter struct {
t *reflect.MapIter
m reflect.Value
values bool
}
func (t *mapIter) Next() (r bool) {
return t.t.Next()
}
func (t *mapIter) Key() reflect.Value {
return t.t.Key()
}
func (t *mapIter) Value() (r reflect.Value) {
if t.values {
return t.t.Value()
}
return
}
func (t *mapIter) Done() {}
func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
*t = mapIter{
m: m,
t: m.MapRange(),
values: values,
}
}

View File

@@ -1,45 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.7 && !go1.12 && (safe || codec.safe || appengine)
// +build go1.7
// +build !go1.12
// +build safe codec.safe appengine
package codec
import "reflect"
type mapIter struct {
m reflect.Value
keys []reflect.Value
j int
values bool
}
func (t *mapIter) Next() (r bool) {
t.j++
return t.j < len(t.keys)
}
func (t *mapIter) Key() reflect.Value {
return t.keys[t.j]
}
func (t *mapIter) Value() (r reflect.Value) {
if t.values {
return t.m.MapIndex(t.keys[t.j])
}
return
}
func (t *mapIter) Done() {}
func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
*t = mapIter{
m: m,
keys: m.MapKeys(),
values: values,
j: -1,
}
}

View File

@@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.10
// +build go1.10
package codec
const allowSetUnexportedEmbeddedPtr = false

View File

@@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.10
// +build !go1.10
package codec
const allowSetUnexportedEmbeddedPtr = true

View File

@@ -1,22 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.4
// +build !go1.4
package codec
import "errors"
// This codec package will only work for go1.4 and above.
// This is for the following reasons:
// - go 1.4 was released in 2014
// - go runtime is written fully in go
// - interface only holds pointers
// - reflect.Value is stabilized as 3 words
var errCodecSupportedOnlyFromGo14 = errors.New("codec: go 1.3 and below are not supported")
func init() {
panic(errCodecSupportedOnlyFromGo14)
}

View File

@@ -1,11 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.5 && !go1.6
// +build go1.5,!go1.6
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"

View File

@@ -1,11 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.6 && !go1.7
// +build go1.6,!go1.7
package codec
import "os"
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"

View File

@@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build go1.7
// +build go1.7
package codec
const genCheckVendor = true

View File

@@ -1,9 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.5
// +build !go1.5
package codec
var genCheckVendor = false

File diff suppressed because it is too large Load Diff

View File

@@ -1,147 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// maxArrayLen is the size of uint, which determines
// the maximum length of any array.
const maxArrayLen = 1<<((32<<(^uint(0)>>63))-1) - 1
// All non-std package dependencies live in this file,
// so porting to different environment is easy (just update functions).
func pruneSignExt(v []byte, pos bool) (n int) {
if len(v) < 2 {
} else if pos && v[0] == 0 {
for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
}
} else if !pos && v[0] == 0xff {
for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
}
}
return
}
func halfFloatToFloatBits(h uint16) (f uint32) {
// retrofitted from:
// - OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
s := uint32(h >> 15)
m := uint32(h & 0x03ff)
e := int32((h >> 10) & 0x1f)
if e == 0 {
if m == 0 { // plus or minus 0
return s << 31
}
// Denormalized number -- renormalize it
for (m & 0x0400) == 0 {
m <<= 1
e -= 1
}
e += 1
m &= ^uint32(0x0400)
} else if e == 31 {
if m == 0 { // Inf
return (s << 31) | 0x7f800000
}
return (s << 31) | 0x7f800000 | (m << 13) // NaN
}
e = e + (127 - 15)
m = m << 13
return (s << 31) | (uint32(e) << 23) | m
}
func floatToHalfFloatBits(i uint32) (h uint16) {
// retrofitted from:
// - OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
// - http://www.java2s.com/example/java-utility-method/float-to/floattohalf-float-f-fae00.html
s := (i >> 16) & 0x8000
e := int32(((i >> 23) & 0xff) - (127 - 15))
m := i & 0x7fffff
var h32 uint32
if e <= 0 {
if e < -10 { // zero
h32 = s // track -0 vs +0
} else {
m = (m | 0x800000) >> uint32(1-e)
h32 = s | (m >> 13)
}
} else if e == 0xff-(127-15) {
if m == 0 { // Inf
h32 = s | 0x7c00
} else { // NAN
m >>= 13
var me uint32
if m == 0 {
me = 1
}
h32 = s | 0x7c00 | m | me
}
} else {
if e > 30 { // Overflow
h32 = s | 0x7c00
} else {
h32 = s | (uint32(e) << 10) | (m >> 13)
}
}
h = uint16(h32)
return
}
// growCap will return a new capacity for a slice, given the following:
// - oldCap: current capacity
// - unit: in-memory size of an element
// - num: number of elements to add
func growCap(oldCap, unit, num uint) (newCap uint) {
// appendslice logic (if cap < 1024, *2, else *1.25):
// leads to many copy calls, especially when copying bytes.
// bytes.Buffer model (2*cap + n): much better for bytes.
// smarter way is to take the byte-size of the appended element(type) into account
// maintain 1 thresholds:
// t1: if cap <= t1, newcap = 2x
// else newcap = 1.5x
//
// t1 is always >= 1024.
// This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same)
//
// With this, appending for bytes increase by:
// 100% up to 4K
// 50% beyond that
// unit can be 0 e.g. for struct{}{}; handle that appropriately
maxCap := num + (oldCap * 3 / 2)
if unit == 0 || maxCap > maxArrayLen || maxCap < oldCap { // handle wraparound, etc
return maxArrayLen
}
var t1 uint = 1024 // default thresholds for large values
if unit <= 4 {
t1 = 8 * 1024
} else if unit <= 16 {
t1 = 2 * 1024
}
newCap = 2 + num
if oldCap > 0 {
if oldCap <= t1 { // [0,t1]
newCap = num + (oldCap * 2)
} else { // (t1,infinity]
newCap = maxCap
}
}
// ensure newCap takes multiples of a cache line (size is a multiple of 64)
t1 = newCap * unit
if t2 := t1 % 64; t2 != 0 {
t1 += 64 - t2
newCap = t1 / unit
}
return
}

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.9 || safe || codec.safe || appengine
// +build !go1.9 safe codec.safe appengine
package codec
@@ -19,8 +18,11 @@ import (
const safeMode = true
const transientSizeMax = 0
const transientValueHasStringSlice = true
func isTransientType4Size(size uint32) bool { return true }
type mapReqParams struct{}
func getMapReqParams(ti *typeInfo) (r mapReqParams) { return }
func byteAt(b []byte, index uint) byte {
return b[index]
@@ -30,14 +32,6 @@ func setByteAt(b []byte, index uint, val byte) {
b[index] = val
}
func byteSliceOf(b []byte, start, end uint) []byte {
return b[start:end]
}
// func byteSliceWithLen(b []byte, length uint) []byte {
// return b[:length]
// }
func stringView(v []byte) string {
return string(v)
}
@@ -50,34 +44,26 @@ func byteSliceSameData(v1 []byte, v2 []byte) bool {
return cap(v1) != 0 && cap(v2) != 0 && &(v1[:1][0]) == &(v2[:1][0])
}
func okBytes2(b []byte) (v [2]byte) {
copy(v[:], b)
return
}
func okBytes3(b []byte) (v [3]byte) {
copy(v[:], b)
return
}
func okBytes4(b []byte) (v [4]byte) {
copy(v[:], b)
return
}
func okBytes8(b []byte) (v [8]byte) {
copy(v[:], b)
return
}
func isNil(v interface{}) (rv reflect.Value, isnil bool) {
func isNil(v interface{}, checkPtr bool) (rv reflect.Value, b bool) {
b = v == nil
if b || !checkPtr {
return
}
rv = reflect.ValueOf(v)
if isnilBitset.isset(byte(rv.Kind())) {
isnil = rv.IsNil()
if rv.Kind() == reflect.Ptr {
b = rv.IsNil()
}
return
}
func ptrToLowLevel(v interface{}) interface{} {
return v
}
func lowLevelToPtr[T any](v interface{}) *T {
return v.(*T)
}
func eq4i(i0, i1 interface{}) bool {
return i0 == i1
}
@@ -85,17 +71,21 @@ func eq4i(i0, i1 interface{}) bool {
func rv4iptr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
func rv4istr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
// func rv4i(i interface{}) reflect.Value { return reflect.ValueOf(i) }
// func rv4iK(i interface{}, kind byte, isref bool) reflect.Value { return reflect.ValueOf(i) }
func rv2i(rv reflect.Value) interface{} {
return rv.Interface()
if rv.IsValid() {
return rv.Interface()
}
return nil
}
func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
return rv.Addr()
}
func rvPtrIsNil(rv reflect.Value) bool {
return rv.IsNil()
}
func rvIsNil(rv reflect.Value) bool {
return rv.IsNil()
}
@@ -131,6 +121,30 @@ func i2rtid(i interface{}) uintptr {
// --------------------------
// is this an empty interface/ptr/struct/map/slice/chan/array
func isEmptyContainerValue(v reflect.Value, tinfos *TypeInfos, recursive bool) (empty bool) {
switch v.Kind() {
case reflect.Array:
for i, vlen := 0, v.Len(); i < vlen; i++ {
if !isEmptyValue(v.Index(i), tinfos, false) {
return false
}
}
return true
case reflect.Map, reflect.Slice, reflect.Chan:
return v.IsNil() || v.Len() == 0
case reflect.Interface, reflect.Ptr:
empty = v.IsNil()
if recursive && !empty {
return isEmptyValue(v.Elem(), tinfos, recursive)
}
return empty
case reflect.Struct:
return isEmptyStruct(v, tinfos, recursive)
}
return false
}
func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
switch v.Kind() {
case reflect.Invalid:
@@ -215,7 +229,7 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
// We only care about what we can encode/decode,
// so that is what we use to check omitEmpty.
for _, si := range ti.sfi.source() {
sfv := si.path.field(v)
sfv := si.fieldNoAlloc(v, true)
if sfv.IsValid() && !isEmptyValue(sfv, tinfos, recursive) {
return false
}
@@ -223,6 +237,10 @@ func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
return true
}
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMapWithSize(t, size)
}
// --------------------------
type perTypeElem struct {
@@ -247,13 +265,9 @@ type perType struct {
v []perTypeElem
}
type decPerType struct {
perType
}
type decPerType = perType
type encPerType struct {
perType
}
type encPerType = perType
func (x *perType) elem(t reflect.Type) *perTypeElem {
rtid := rt2id(t)
@@ -295,10 +309,44 @@ func (x *perType) AddressableRO(v reflect.Value) (rv reflect.Value) {
return
}
// --------------------------
type mapIter struct {
t *reflect.MapIter
m reflect.Value
values bool
}
func (t *mapIter) Next() (r bool) {
return t.t.Next()
}
func (t *mapIter) Key() reflect.Value {
return t.t.Key()
}
func (t *mapIter) Value() (r reflect.Value) {
if t.values {
return t.t.Value()
}
return
}
func (t *mapIter) Done() {}
func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
*t = mapIter{
m: m,
t: m.MapRange(),
values: values,
}
}
// --------------------------
type structFieldInfos struct {
c []*structFieldInfo
s []*structFieldInfo
t uint8To32TrieNode
// byName map[string]*structFieldInfo // find sfi given a name
}
func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
@@ -306,55 +354,24 @@ func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
x.s = sorted
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s }
// func (x *structFieldInfos) count() int { return len(x.c) }
func (x *structFieldInfos) source() (v []*structFieldInfo) { return x.c }
type atomicClsErr struct {
v atomic.Value
}
func (x *atomicClsErr) load() (e clsErr) {
if i := x.v.Load(); i != nil {
e = i.(clsErr)
}
return
}
func (x *atomicClsErr) store(p clsErr) {
x.v.Store(p)
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s }
// --------------------------
type atomicTypeInfoSlice struct {
v atomic.Value
type uint8To32TrieNodeNoKids struct {
key uint8
valid bool // the value marks the end of a full stored string
_ [2]byte // padding
value uint32
}
func (x *atomicTypeInfoSlice) load() (e []rtid2ti) {
if i := x.v.Load(); i != nil {
e = i.([]rtid2ti)
}
return
}
type uint8To32TrieNodeKids = []uint8To32TrieNode
func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
x.v.Store(p)
}
// --------------------------
type atomicRtidFnSlice struct {
v atomic.Value
}
func (x *atomicRtidFnSlice) load() (e []codecRtidFn) {
if i := x.v.Load(); i != nil {
e = i.([]codecRtidFn)
}
return
}
func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
x.v.Store(p)
}
func (x *uint8To32TrieNode) setKids(kids []uint8To32TrieNode) { x.kids = kids }
func (x *uint8To32TrieNode) getKids() []uint8To32TrieNode { return x.kids }
func (x *uint8To32TrieNode) truncKids() { x.kids = x.kids[:0] } // set len to 0
// --------------------------
func (n *fauxUnion) ru() reflect.Value {
@@ -501,13 +518,13 @@ func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value
// ----------------
func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
func rvArrayIndex(rv reflect.Value, i int, _ *typeInfo, _ bool) reflect.Value {
return rv.Index(i)
}
func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
return rv.Index(i)
}
// func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
// return rv.Index(i)
// }
func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
return reflect.MakeSlice(t, 0, 0)
@@ -523,7 +540,7 @@ func rvCapSlice(rv reflect.Value) int {
func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
l := rv.Len()
if scratch == nil || rv.CanAddr() {
if scratch == nil && rv.CanAddr() {
return rv.Slice(0, l).Bytes()
}
@@ -537,7 +554,7 @@ func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
}
func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
v = rvZeroAddrK(reflectArrayOf(rvLenSlice(rv), rv.Type().Elem()), reflect.Array)
v = rvZeroAddrK(reflect.ArrayOf(rvLenSlice(rv), rv.Type().Elem()), reflect.Array)
reflect.Copy(v, rv)
return
}
@@ -647,60 +664,43 @@ func rvLenMap(rv reflect.Value) int {
return rv.Len()
}
// func copybytes(to, from []byte) int {
// return copy(to, from)
// }
// func copybytestr(to []byte, from string) int {
// return copy(to, from)
// }
// func rvLenArray(rv reflect.Value) int { return rv.Len() }
// ------------ map range and map indexing ----------
func mapStoresElemIndirect(elemsize uintptr) bool { return false }
func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) {
func mapSet(m, k, v reflect.Value, _ mapReqParams) {
m.SetMapIndex(k, v)
}
func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) (vv reflect.Value) {
func mapGet(m, k, v reflect.Value, _ mapReqParams) (vv reflect.Value) {
return m.MapIndex(k)
}
// func mapDelete(m, k reflect.Value) {
// m.SetMapIndex(k, reflect.Value{})
// }
func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (r reflect.Value) {
return // reflect.New(t).Elem()
}
// ---------- ENCODER optimized ---------------
func (e *Encoder) jsondriver() *jsonEncDriver {
return e.e.(*jsonEncDriver)
}
// ---------- DECODER optimized ---------------
func (d *Decoder) jsondriver() *jsonDecDriver {
return d.d.(*jsonDecDriver)
}
func (d *Decoder) stringZC(v []byte) (s string) {
return d.string(v)
}
func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
return d.string(*kstr2bs)
func (d *decoderBase) bytes2Str(in []byte, att dBytesAttachState) (s string, mutable bool) {
return d.detach2Str(in, att), false
}
// ---------- structFieldInfo optimized ---------------
func (n *structFieldInfoPathNode) rvField(v reflect.Value) reflect.Value {
func (n *structFieldInfoNode) rvField(v reflect.Value) reflect.Value {
return v.Field(int(n.index))
}
// ---------- others ---------------
// --------------------------
type atomicRtidFnSlice struct {
v atomic.Value
}
func (x *atomicRtidFnSlice) load() interface{} {
return x.v.Load()
}
func (x *atomicRtidFnSlice) store(p interface{}) {
x.v.Store(p)
}

View File

@@ -1,21 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !go1.9 || safe || codec.safe || appengine || !gc
// +build !go1.9 safe codec.safe appengine !gc
package codec
// import "reflect"
// This files contains safe versions of the code where the unsafe versions are not supported
// in either gccgo or gollvm.
//
// - rvType:
// reflect.toType is not supported in gccgo, gollvm.
// func rvType(rv reflect.Value) reflect.Type {
// return rv.Type()
// }
var _ = 0

View File

@@ -1,12 +1,15 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && go1.9
// +build !safe,!codec.safe,!appengine,go1.9
//go:build !safe && !codec.safe && !appengine && go1.21
// minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need
// - typedmemclr was introduced in go 1.8
// - mapassign_fastXXX was introduced in go 1.9
// minimum of go 1.21 is needed, as that is the minimum for all features and linked functions we need
// - typedmemclr : go1.8
// - mapassign_fastXXX: go1.9
// - clear was added in go1.21
// - unsafe.String(Data): go1.20
// - unsafe.Add: go1.17
// - generics/any: go1.18
// etc
package codec
@@ -21,7 +24,7 @@ import (
// This file has unsafe variants of some helper functions.
// MARKER: See helper_unsafe.go for the usage documentation.
//
// There are a number of helper_*unsafe*.go files.
//
// - helper_unsafe
@@ -41,19 +44,32 @@ import (
// As of March 2021, we cannot differentiate whether running with gccgo or gollvm
// using a build constraint, as both satisfy 'gccgo' build tag.
// Consequently, we must use the lowest common denominator to support both.
//
// For reflect.Value code, we decided to do the following:
// - if we know the kind, we can elide conditional checks for
// - SetXXX (Int, Uint, String, Bool, etc)
// - SetLen
//
// We can also optimize
// - IsNil
// We can also optimize many others, incl IsNil, etc
//
// MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g.
// - rvCopySlice: called by decode if rvGrowSlice did not set new slice into pointer to orig slice.
// however, helper_unsafe sets it, so no need to call rvCopySlice later
// - rvSlice: same as above
//
// MARKER: Handling flagIndir ----
//
// flagIndir means that the reflect.Value holds a pointer to the data itself.
//
// flagIndir can be set for:
// - references
// Here, type.IfaceIndir() --> false
// flagIndir is usually false (except when the value is addressable, where in flagIndir may be true)
// - everything else (numbers, bools, string, slice, struct, etc).
// Here, type.IfaceIndir() --> true
// flagIndir is always true
//
// This knowlege is used across this file, e.g. in rv2i and rvRefPtr
const safeMode = false
@@ -88,7 +104,9 @@ const (
const transientSizeMax = 64
// should struct/array support internal strings and slices?
const transientValueHasStringSlice = false
// const transientValueHasStringSlice = false
func isTransientType4Size(size uint32) bool { return size <= transientSizeMax }
type unsafeString struct {
Data unsafe.Pointer
@@ -144,7 +162,8 @@ func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer {
x.slice = unsafeSlice{} // memclr
return unsafe.Pointer(&x.slice)
}
x.arr = [transientSizeMax]byte{} // memclr
clear(x.arr[:])
// x.arr = [transientSizeMax]byte{} // memclr
return unsafe.Pointer(&x.arr)
}
@@ -152,9 +171,7 @@ type perType struct {
elems [2]unsafePerTypeElem
}
type decPerType struct {
perType
}
type decPerType = perType
type encPerType struct{}
@@ -183,19 +200,6 @@ func byteAt(b []byte, index uint) byte {
return *(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index)))
}
func byteSliceOf(b []byte, start, end uint) []byte {
s := (*unsafeSlice)(unsafe.Pointer(&b))
s.Data = unsafe.Pointer(uintptr(s.Data) + uintptr(start))
s.Len = int(end - start)
s.Cap -= int(start)
return b
}
// func byteSliceWithLen(b []byte, length uint) []byte {
// (*unsafeSlice)(unsafe.Pointer(&b)).Len = int(length)
// return b
// }
func setByteAt(b []byte, index uint, val byte) {
// b[index] = val
*(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index))) = val
@@ -222,49 +226,26 @@ func byteSliceSameData(v1 []byte, v2 []byte) bool {
return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data
}
// MARKER: okBytesN functions will copy N bytes into the top slots of the return array.
// These functions expect that the bound check already occured and are are valid.
// copy(...) does a number of checks which are unnecessary in this situation when in bounds.
func okBytes2(b []byte) [2]byte {
return *((*[2]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
func okBytes3(b []byte) [3]byte {
return *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
func okBytes4(b []byte) [4]byte {
return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
func okBytes8(b []byte) [8]byte {
return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
}
// isNil says whether the value v is nil.
// This applies to references like map/ptr/unsafepointer/chan/func,
// and non-reference values like interface/slice.
func isNil(v interface{}) (rv reflect.Value, isnil bool) {
var ui = (*unsafeIntf)(unsafe.Pointer(&v))
isnil = ui.ptr == nil
if !isnil {
rv, isnil = unsafeIsNilIntfOrSlice(ui, v)
}
return
}
func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) {
rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly
tk := rv.Kind()
isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil
return
}
// return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer).
// true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
// isNil checks - without much effort - if an interface is nil.
//
// Assumes that v is a reference (map/func/chan/ptr/func)
// returned rv is not guaranteed to be valid (e.g. if v == nil).
//
// Note that this will handle all pointer-sized types e.g.
// pointer, map, chan, func, etc.
func isNil(v interface{}, checkPtr bool) (rv reflect.Value, b bool) {
b = ((*unsafeIntf)(unsafe.Pointer(&v))).ptr == nil
return
}
func ptrToLowLevel[T any](ptr *T) unsafe.Pointer {
return unsafe.Pointer(ptr)
}
func lowLevelToPtr[T any](v unsafe.Pointer) *T {
return (*T)(v)
}
// Given that v is a reference (map/func/chan/ptr/unsafepointer) kind, return the pointer
func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer {
if v.flag&unsafeFlagIndir != 0 {
return *(*unsafe.Pointer)(v.ptr)
@@ -295,13 +276,6 @@ func rv4istr(i interface{}) (v reflect.Value) {
}
func rv2i(rv reflect.Value) (i interface{}) {
// We tap into implememtation details from
// the source go stdlib reflect/value.go, and trims the implementation.
//
// e.g.
// - a map/ptr is a reference, thus flagIndir is not set on it
// - an int/slice is not a reference, thus flagIndir is set on it
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 {
urv.ptr = *(*unsafe.Pointer)(urv.ptr)
@@ -316,12 +290,22 @@ func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
return rv
}
// return true if this rv - got from a pointer kind - is nil.
// For now, only use for struct fields of pointer types, as we're guaranteed
// that flagIndir will never be set.
func rvPtrIsNil(rv reflect.Value) bool {
return rvIsNil(rv)
}
// checks if a nil'able value is nil
func rvIsNil(rv reflect.Value) bool {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
if urv.flag&unsafeFlagIndir != 0 {
return *(*unsafe.Pointer)(urv.ptr) == nil
if urv.flag&unsafeFlagIndir == 0 {
return urv.ptr == nil
}
return urv.ptr == nil
// flagIndir is set for a reference (ptr/map/func/unsafepointer/chan)
// OR kind is slice/interface
return *(*unsafe.Pointer)(urv.ptr) == nil
}
func rvSetSliceLen(rv reflect.Value, length int) {
@@ -499,29 +483,62 @@ func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos
return false
}
// is this an empty interface/ptr/struct/map/slice/chan/array
func isEmptyContainerValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
switch v.Kind() {
case reflect.Slice:
return (*unsafeSlice)(urv.ptr).Len == 0
case reflect.Struct:
if tinfos == nil {
tinfos = defTypeInfos
}
ti := tinfos.find(uintptr(urv.typ))
if ti == nil {
ti = tinfos.load(v.Type())
}
return unsafeCmpZero(urv.ptr, int(ti.size))
case reflect.Interface, reflect.Ptr:
// isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
if recursive && !isnil {
return isEmptyValue(v.Elem(), tinfos, recursive)
}
return isnil
case reflect.Chan:
return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0
case reflect.Map:
return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0
case reflect.Array:
return v.Len() == 0 ||
urv.ptr == nil ||
urv.typ == nil ||
rtsize2(urv.typ) == 0 ||
unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
}
return false
}
// --------------------------
type structFieldInfos struct {
c unsafe.Pointer // source
s unsafe.Pointer // sorted
c unsafe.Pointer // source
s unsafe.Pointer // sorted
t uint8To32TrieNode
length int
// byName map[string]*structFieldInfo // find sfi given a name
}
// func (x *structFieldInfos) load(source, sorted []*structFieldInfo, sourceNames, sortedNames []string) {
func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
s := (*unsafeSlice)(unsafe.Pointer(&sorted))
x.s = s.Data
x.length = s.Len
var s *unsafeSlice
s = (*unsafeSlice)(unsafe.Pointer(&source))
x.c = s.Data
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
// s := (*unsafeSlice)(unsafe.Pointer(&v))
// s.Data = x.sorted0
// s.Len = x.length
// s.Cap = s.Len
return
x.length = s.Len
s = (*unsafeSlice)(unsafe.Pointer(&sorted))
x.s = s.Data
}
func (x *structFieldInfos) source() (v []*structFieldInfo) {
@@ -529,66 +546,48 @@ func (x *structFieldInfos) source() (v []*structFieldInfo) {
return
}
// atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
//
// Note that we do not atomically load/store length and data pointer separately,
// as this could lead to some races. Instead, we atomically load/store cappedSlice.
//
// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
// ----------------------
type atomicTypeInfoSlice struct {
v unsafe.Pointer // *[]rtid2ti
}
func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
x2 := atomic.LoadPointer(&x.v)
if x2 != nil {
s = *(*[]rtid2ti)(x2)
}
func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
return
}
func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
atomic.StorePointer(&x.v, unsafe.Pointer(&p))
// --------------------------
type uint8To32TrieNodeNoKids struct {
key uint8
valid bool // the value marks the end of a full stored string
numkids uint8
_ byte // padding
value uint32
}
// MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}.
// This is 2 words.
// consider padding atomicXXX here with a uintptr, so they fit into 2 words also.
type uint8To32TrieNodeKids = *uint8To32TrieNode
func (x *uint8To32TrieNode) setKids(kids []uint8To32TrieNode) {
x.numkids = uint8(len(kids))
x.kids = &kids[0]
}
func (x *uint8To32TrieNode) getKids() (v []uint8To32TrieNode) {
*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{unsafe.Pointer(x.kids), int(x.numkids), int(x.numkids)}
return
}
func (x *uint8To32TrieNode) truncKids() { x.numkids = 0 }
// --------------------------
// Note that we do not atomically load/store length and data pointer separately,
// as this could lead to some races. Instead, we atomically load/store cappedSlice.
type atomicRtidFnSlice struct {
v unsafe.Pointer // *[]codecRtidFn
}
func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
x2 := atomic.LoadPointer(&x.v)
if x2 != nil {
s = *(*[]codecRtidFn)(x2)
}
return
func (x *atomicRtidFnSlice) load() (s unsafe.Pointer) {
return atomic.LoadPointer(&x.v)
}
func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
atomic.StorePointer(&x.v, unsafe.Pointer(&p))
}
// --------------------------
type atomicClsErr struct {
v unsafe.Pointer // *clsErr
}
func (x *atomicClsErr) load() (e clsErr) {
x2 := (*clsErr)(atomic.LoadPointer(&x.v))
if x2 != nil {
e = *x2
}
return
}
func (x *atomicClsErr) store(p clsErr) {
atomic.StorePointer(&x.v, unsafe.Pointer(&p))
func (x *atomicRtidFnSlice) store(p unsafe.Pointer) {
atomic.StorePointer(&x.v, p)
}
// --------------------------
@@ -660,98 +659,79 @@ func (n *fauxUnion) rb() (v reflect.Value) {
// --------------------------
func rvSetBytes(rv reflect.Value, v []byte) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*[]byte)(urv.ptr) = v
*(*[]byte)(rvPtr(rv)) = v
}
func rvSetString(rv reflect.Value, v string) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*string)(urv.ptr) = v
*(*string)(rvPtr(rv)) = v
}
func rvSetBool(rv reflect.Value, v bool) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*bool)(urv.ptr) = v
*(*bool)(rvPtr(rv)) = v
}
func rvSetTime(rv reflect.Value, v time.Time) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*time.Time)(urv.ptr) = v
*(*time.Time)(rvPtr(rv)) = v
}
func rvSetFloat32(rv reflect.Value, v float32) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float32)(urv.ptr) = v
*(*float32)(rvPtr(rv)) = v
}
func rvSetFloat64(rv reflect.Value, v float64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float64)(urv.ptr) = v
*(*float64)(rvPtr(rv)) = v
}
func rvSetComplex64(rv reflect.Value, v complex64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*complex64)(urv.ptr) = v
*(*complex64)(rvPtr(rv)) = v
}
func rvSetComplex128(rv reflect.Value, v complex128) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*complex128)(urv.ptr) = v
*(*complex128)(rvPtr(rv)) = v
}
func rvSetInt(rv reflect.Value, v int) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int)(urv.ptr) = v
*(*int)(rvPtr(rv)) = v
}
func rvSetInt8(rv reflect.Value, v int8) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int8)(urv.ptr) = v
*(*int8)(rvPtr(rv)) = v
}
func rvSetInt16(rv reflect.Value, v int16) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int16)(urv.ptr) = v
*(*int16)(rvPtr(rv)) = v
}
func rvSetInt32(rv reflect.Value, v int32) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int32)(urv.ptr) = v
*(*int32)(rvPtr(rv)) = v
}
func rvSetInt64(rv reflect.Value, v int64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int64)(urv.ptr) = v
*(*int64)(rvPtr(rv)) = v
}
func rvSetUint(rv reflect.Value, v uint) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint)(urv.ptr) = v
*(*uint)(rvPtr(rv)) = v
}
func rvSetUintptr(rv reflect.Value, v uintptr) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uintptr)(urv.ptr) = v
*(*uintptr)(rvPtr(rv)) = v
}
func rvSetUint8(rv reflect.Value, v uint8) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint8)(urv.ptr) = v
*(*uint8)(rvPtr(rv)) = v
}
func rvSetUint16(rv reflect.Value, v uint16) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint16)(urv.ptr) = v
*(*uint16)(rvPtr(rv)) = v
}
func rvSetUint32(rv reflect.Value, v uint32) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint32)(urv.ptr) = v
*(*uint32)(rvPtr(rv)) = v
}
func rvSetUint64(rv reflect.Value, v uint64) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint64)(urv.ptr) = v
*(*uint64)(rvPtr(rv)) = v
}
// ----------------
@@ -775,12 +755,10 @@ func rvSetDirect(rv reflect.Value, v reflect.Value) {
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
if uv.flag&unsafeFlagIndir == 0 {
*(*unsafe.Pointer)(urv.ptr) = uv.ptr
} else if uv.ptr == unsafeZeroAddr {
if urv.ptr != unsafeZeroAddr {
typedmemclr(urv.typ, urv.ptr)
}
} else {
} else if uv.ptr != unsafeZeroAddr {
typedmemmove(urv.typ, urv.ptr, uv.ptr)
} else if urv.ptr != unsafeZeroAddr {
typedmemclr(urv.typ, urv.ptr)
}
}
@@ -812,11 +790,9 @@ func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Valu
// It is typically called when we know that SetLen(...) cannot be done.
func rvSlice(rv reflect.Value, length int) reflect.Value {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
var x []struct{}
ux := (*unsafeSlice)(unsafe.Pointer(&x))
*ux = *(*unsafeSlice)(urv.ptr)
ux := *(*unsafeSlice)(urv.ptr) // copy slice header
ux.Len = length
urv.ptr = unsafe.Pointer(ux)
urv.ptr = unsafe.Pointer(&ux)
return rv
}
@@ -834,10 +810,16 @@ func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value
// ------------
func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo, isSlice bool) (v reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i))
if isSlice {
uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data))
} else {
uv.ptr = unsafe.Pointer(uintptr(urv.ptr))
}
uv.ptr = unsafe.Add(uv.ptr, ti.elemsize*uint32(i))
// uv.ptr = unsafe.Pointer(ptr + uintptr(int(ti.elemsize)*i))
uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
return
@@ -861,19 +843,11 @@ func rvCapSlice(rv reflect.Value) int {
return (*unsafeSlice)(urv.ptr).Cap
}
func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i))
uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
return
}
// if scratch is nil, then return a writable view (assuming canAddr=true)
func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
func rvGetArrayBytes(rv reflect.Value, _ []byte) (bs []byte) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
bx := (*unsafeSlice)(unsafe.Pointer(&bs))
// bx.Data, bx.Len, bx.Cap = urv.ptr, rv.Len(), bx.Len
bx.Data = urv.ptr
bx.Len = rv.Len()
bx.Cap = bx.Len
@@ -889,7 +863,7 @@ func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
//
// Consequently, we use rvLenSlice, not rvCapSlice.
t := reflectArrayOf(rvLenSlice(rv), rv.Type().Elem())
t := reflect.ArrayOf(rvLenSlice(rv), rv.Type().Elem())
// v = rvZeroAddrK(t, reflect.Array)
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
@@ -921,99 +895,84 @@ func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) {
// ------------
func rvPtr(rv reflect.Value) unsafe.Pointer {
return (*unsafeReflectValue)(unsafe.Pointer(&rv)).ptr
}
func rvGetBool(rv reflect.Value) bool {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*bool)(v.ptr)
return *(*bool)(rvPtr(rv))
}
func rvGetBytes(rv reflect.Value) []byte {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*[]byte)(v.ptr)
return *(*[]byte)(rvPtr(rv))
}
func rvGetTime(rv reflect.Value) time.Time {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*time.Time)(v.ptr)
return *(*time.Time)(rvPtr(rv))
}
func rvGetString(rv reflect.Value) string {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*string)(v.ptr)
return *(*string)(rvPtr(rv))
}
func rvGetFloat64(rv reflect.Value) float64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*float64)(v.ptr)
return *(*float64)(rvPtr(rv))
}
func rvGetFloat32(rv reflect.Value) float32 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*float32)(v.ptr)
return *(*float32)(rvPtr(rv))
}
func rvGetComplex64(rv reflect.Value) complex64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*complex64)(v.ptr)
return *(*complex64)(rvPtr(rv))
}
func rvGetComplex128(rv reflect.Value) complex128 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*complex128)(v.ptr)
return *(*complex128)(rvPtr(rv))
}
func rvGetInt(rv reflect.Value) int {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int)(v.ptr)
return *(*int)(rvPtr(rv))
}
func rvGetInt8(rv reflect.Value) int8 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int8)(v.ptr)
return *(*int8)(rvPtr(rv))
}
func rvGetInt16(rv reflect.Value) int16 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int16)(v.ptr)
return *(*int16)(rvPtr(rv))
}
func rvGetInt32(rv reflect.Value) int32 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int32)(v.ptr)
return *(*int32)(rvPtr(rv))
}
func rvGetInt64(rv reflect.Value) int64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*int64)(v.ptr)
return *(*int64)(rvPtr(rv))
}
func rvGetUint(rv reflect.Value) uint {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint)(v.ptr)
return *(*uint)(rvPtr(rv))
}
func rvGetUint8(rv reflect.Value) uint8 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint8)(v.ptr)
return *(*uint8)(rvPtr(rv))
}
func rvGetUint16(rv reflect.Value) uint16 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint16)(v.ptr)
return *(*uint16)(rvPtr(rv))
}
func rvGetUint32(rv reflect.Value) uint32 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint32)(v.ptr)
return *(*uint32)(rvPtr(rv))
}
func rvGetUint64(rv reflect.Value) uint64 {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uint64)(v.ptr)
return *(*uint64)(rvPtr(rv))
}
func rvGetUintptr(rv reflect.Value) uintptr {
v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
return *(*uintptr)(v.ptr)
return *(*uintptr)(rvPtr(rv))
}
func rvLenMap(rv reflect.Value) int {
@@ -1027,32 +986,6 @@ func rvLenMap(rv reflect.Value) int {
return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv))))
}
// copy is an intrinsic, which may use asm if length is small,
// or make a runtime call to runtime.memmove if length is large.
// Performance suffers when you always call runtime.memmove function.
//
// Consequently, there's no value in a copybytes call - just call copy() directly
// func copybytes(to, from []byte) (n int) {
// n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
// memmove(
// (*unsafeSlice)(unsafe.Pointer(&to)).Data,
// (*unsafeSlice)(unsafe.Pointer(&from)).Data,
// uintptr(n),
// )
// return
// }
// func copybytestr(to []byte, from string) (n int) {
// n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
// memmove(
// (*unsafeSlice)(unsafe.Pointer(&to)).Data,
// (*unsafeSlice)(unsafe.Pointer(&from)).Data,
// uintptr(n),
// )
// return
// }
// Note: it is hard to find len(...) of an array type,
// as that is a field in the arrayType representing the array, and hard to introspect.
//
@@ -1065,24 +998,26 @@ func rvLenMap(rv reflect.Value) int {
//
// It is more performant to provide a value that the map entry is set into,
// and that elides the allocation.
// go 1.4+ has runtime/hashmap.go or runtime/map.go which has a
// hIter struct with the first 2 values being key and value
// of the current iteration.
//
// go 1.4 through go 1.23 (in runtime/hashmap.go or runtime/map.go) has a hIter struct
// with the first 2 values being pointers for key and value of the current iteration.
// The next 6 values are pointers, followed by numeric types (uintptr, uint8, bool, etc).
// This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem.
// We bypass the reflect wrapper functions and just use the *hIter directly.
//
// Though *hIter has many fields, we only care about the first 2.
// In go 1.24, swissmap was introduced, and it provides a compatibility layer
// for hIter (called linknameIter). This has only 2 pointer fields after the key and value pointers.
//
// We directly embed this in unsafeMapIter below
// Note: We bypass the reflect wrapper functions and just use the *hIter directly.
//
// hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words,
// so it fills multiple cache lines and can give some extra space to accomodate small growth.
// When 'faking' these types with our own, we MUST ensure that the GC sees the pointers
// appropriately. These are reflected in goversion_(no)swissmap_unsafe.go files.
// In these files, we pad the extra spaces appropriately.
//
// Note: the faux hIter/linknameIter is directly embedded in unsafeMapIter below
type unsafeMapIter struct {
mtyp, mptr unsafe.Pointer
k, v reflect.Value
k, v unsafeReflectValue
kisref bool
visref bool
mapvalues bool
@@ -1092,7 +1027,7 @@ type unsafeMapIter struct {
it struct {
key unsafe.Pointer
value unsafe.Pointer
_ [20]uintptr // padding for other fields (to make up 32 words for enclosing struct)
_ unsafeMapIterPadding
}
}
@@ -1112,18 +1047,16 @@ func (t *unsafeMapIter) Next() (r bool) {
}
if helperUnsafeDirectAssignMapEntry || t.kisref {
(*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key
t.k.ptr = t.it.key
} else {
k := (*unsafeReflectValue)(unsafe.Pointer(&t.k))
typedmemmove(k.typ, k.ptr, t.it.key)
typedmemmove(t.k.typ, t.k.ptr, t.it.key)
}
if t.mapvalues {
if helperUnsafeDirectAssignMapEntry || t.visref {
(*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value
t.v.ptr = t.it.value
} else {
v := (*unsafeReflectValue)(unsafe.Pointer(&t.v))
typedmemmove(v.typ, v.ptr, t.it.value)
typedmemmove(t.v.typ, t.v.ptr, t.it.value)
}
}
@@ -1131,11 +1064,11 @@ func (t *unsafeMapIter) Next() (r bool) {
}
func (t *unsafeMapIter) Key() (r reflect.Value) {
return t.k
return *(*reflect.Value)(unsafe.Pointer(&t.k))
}
func (t *unsafeMapIter) Value() (r reflect.Value) {
return t.v
return *(*reflect.Value)(unsafe.Pointer(&t.v))
}
func (t *unsafeMapIter) Done() {}
@@ -1162,14 +1095,14 @@ func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
// t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr))
mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it))
t.k = k
t.k = *(*unsafeReflectValue)(unsafe.Pointer(&k))
t.kisref = refBitset.isset(byte(k.Kind()))
if mapvalues {
t.v = v
t.v = *(*unsafeReflectValue)(unsafe.Pointer(&v))
t.visref = refBitset.isset(byte(v.Kind()))
} else {
t.v = reflect.Value{}
t.v = unsafeReflectValue{}
}
}
@@ -1182,13 +1115,6 @@ func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
return urv.ptr
}
// func mapDelete(m, k reflect.Value) {
// var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
// var kptr = unsafeMapKVPtr(urv)
// urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
// mapdelete(urv.typ, rv2ptr(urv), kptr)
// }
// return an addressable reflect value that can be used in mapRange and mapGet operations.
//
// all calls to mapGet or mapRange will call here to get an addressable reflect.Value.
@@ -1205,53 +1131,39 @@ func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
return
}
// ---------- ENCODER optimized ---------------
func (e *Encoder) jsondriver() *jsonEncDriver {
return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr)
func makeMapReflect(typ reflect.Type, size int) (rv reflect.Value) {
t := (*unsafeIntf)(unsafe.Pointer(&typ)).ptr
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
urv.typ = t
urv.flag = uintptr(reflect.Map)
urv.ptr = makemap(t, size, nil)
return
}
func (d *Decoder) zerocopystate() bool {
return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy
}
func (d *Decoder) stringZC(v []byte) (s string) {
// MARKER: inline zerocopystate directly so genHelper forwarding function fits within inlining cost
// if d.zerocopystate() {
if d.decByteState == decByteStateZerocopy && d.h.ZeroCopy {
return stringView(v)
}
return d.string(v)
}
func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
if !d.zerocopystate() {
*callFnRvk = true
if d.decByteState == decByteStateReuseBuf {
*kstrbs = append((*kstrbs)[:0], (*kstr2bs)...)
*kstr2bs = *kstrbs
}
}
return stringView(*kstr2bs)
}
// ---------- DECODER optimized ---------------
func (d *Decoder) jsondriver() *jsonDecDriver {
return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr)
func (d *decoderBase) bytes2Str(in []byte, state dBytesAttachState) (s string, mutable bool) {
return stringView(in), state <= dBytesAttachBuffer
}
// ---------- structFieldInfo optimized ---------------
func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) {
func (n *structFieldInfoNode) rvField(v reflect.Value) (rv reflect.Value) {
// we already know this is exported, and maybe embedded (based on what si says)
uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// clear flagEmbedRO if necessary, and inherit permission bits from v
urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind)
urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr
urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset))
// *(*unsafeReflectValue)(unsafe.Pointer(&rv)) = unsafeReflectValue{
// unsafeIntf: unsafeIntf{
// typ: ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr,
// ptr: unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset)),
// },
// flag: uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind),
// }
return
}
@@ -1299,10 +1211,6 @@ func unsafeNew(typ unsafe.Pointer) unsafe.Pointer {
// failing with "error: undefined reference" error.
// however, runtime.{mallocgc, newarray} are supported, so use that instead.
//go:linkname memmove runtime.memmove
//go:noescape
func memmove(to, from unsafe.Pointer, n uintptr)
//go:linkname mallocgc runtime.mallocgc
//go:noescape
func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer
@@ -1319,10 +1227,6 @@ func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer)
//go:noescape
func mapiternext(it unsafe.Pointer) (key unsafe.Pointer)
//go:linkname mapdelete runtime.mapdelete
//go:noescape
func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer)
//go:linkname mapassign runtime.mapassign
//go:noescape
func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
@@ -1331,6 +1235,10 @@ func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.
//go:noescape
func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool)
//go:linkname makemap runtime.makemap
//go:noescape
func makemap(typ unsafe.Pointer, size int, h unsafe.Pointer) unsafe.Pointer
// reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not,
// and if a writeBarrier is needed, before delegating to the right method in the runtime.
//

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && go1.9 && gc
// +build !safe,!codec.safe,!appengine,go1.9,gc
package codec
@@ -24,8 +23,67 @@ const (
mapMaxElemSize = 128
)
func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsafeSlice) {
return growslice(typ, old, cap+incr)
type mapKeyFastKind uint8
const (
mapKeyFastKindAny = iota + 1
mapKeyFastKind32
mapKeyFastKind32ptr
mapKeyFastKind64
mapKeyFastKind64ptr
mapKeyFastKindStr
)
var mapKeyFastKindVals [32]mapKeyFastKind
type mapReqParams struct {
kfast mapKeyFastKind
ref bool
indirect bool
}
func getMapReqParams(ti *typeInfo) (r mapReqParams) {
r.indirect = mapStoresElemIndirect(uintptr(ti.elemsize))
r.ref = refBitset.isset(ti.elemkind)
r.kfast = mapKeyFastKindFor(reflect.Kind(ti.keykind))
return
}
func init() {
xx := func(f mapKeyFastKind, k ...reflect.Kind) {
for _, v := range k {
mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
}
}
var f mapKeyFastKind
f = mapKeyFastKind64
if wordSizeBits == 32 {
f = mapKeyFastKind32
}
xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
f = mapKeyFastKind64ptr
if wordSizeBits == 32 {
f = mapKeyFastKind32ptr
}
xx(f, reflect.Ptr)
xx(mapKeyFastKindStr, reflect.String)
xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
}
func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
return mapKeyFastKindVals[k&31]
}
func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (s unsafeSlice) {
// culled from GOROOT/runtime/slice.go
s = rtgrowslice(old.Data, old.Cap+incr, old.Cap, incr, typ)
s.Len = old.Len
return
}
// func rvType(rv reflect.Value) reflect.Type {
@@ -43,7 +101,7 @@ func mapStoresElemIndirect(elemsize uintptr) bool {
return elemsize > mapMaxElemSize
}
func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) {
func mapSet(m, k, v reflect.Value, p mapReqParams) { // valIsRef
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
@@ -60,14 +118,15 @@ func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
// Sometimes, we got vvptr == nil when we dereferenced vvptr (if valIsIndirect).
// Consequently, only use fastXXX functions if !valIsIndirect
if valIsIndirect {
if p.indirect {
vvptr = mapassign(urv.typ, mptr, kptr)
typedmemmove(vtyp, vvptr, vptr)
// reflect_mapassign(urv.typ, mptr, kptr, vptr)
return
// typedmemmove(vtyp, vvptr, vptr)
// // reflect_mapassign(urv.typ, mptr, kptr, vptr)
// return
goto END
}
switch keyFastKind {
switch p.kfast {
case mapKeyFastKind32:
vvptr = mapassign_fast32(urv.typ, mptr, *(*uint32)(kptr))
case mapKeyFastKind32ptr:
@@ -82,14 +141,14 @@ func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
vvptr = mapassign(urv.typ, mptr, kptr)
}
// if keyFastKind != 0 && valIsIndirect {
// if p.kfast != 0 && valIsIndirect {
// vvptr = *(*unsafe.Pointer)(vvptr)
// }
END:
typedmemmove(vtyp, vvptr, vptr)
}
func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) (_ reflect.Value) {
func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) {
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
@@ -101,7 +160,7 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
// Note that mapaccess2_fastXXX functions do not check if the value needs to be copied.
// if they do, we should dereference the pointer and return that
switch keyFastKind {
switch p.kfast {
case mapKeyFastKind32, mapKeyFastKind32ptr:
vvptr, ok = mapaccess2_fast32(urv.typ, mptr, *(*uint32)(kptr))
case mapKeyFastKind64, mapKeyFastKind64ptr:
@@ -118,9 +177,9 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
if keyFastKind != 0 && valIsIndirect {
if p.kfast != 0 && p.indirect {
urv.ptr = *(*unsafe.Pointer)(vvptr)
} else if helperUnsafeDirectAssignMapEntry || valIsRef {
} else if helperUnsafeDirectAssignMapEntry || p.ref {
urv.ptr = vvptr
} else {
typedmemmove(urv.typ, urv.ptr, vvptr)
@@ -129,13 +188,11 @@ func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, va
return v
}
// ----
//go:linkname unsafeZeroArr runtime.zeroVal
var unsafeZeroArr [1024]byte
// //go:linkname rvPtrToType reflect.toType
// //go:noescape
// func rvPtrToType(typ unsafe.Pointer) reflect.Type
//go:linkname mapassign_fast32 runtime.mapassign_fast32
//go:noescape
func mapassign_fast32(typ unsafe.Pointer, m unsafe.Pointer, key uint32) unsafe.Pointer
@@ -167,3 +224,19 @@ func mapaccess2_fast64(typ unsafe.Pointer, m unsafe.Pointer, key uint64) (val un
//go:linkname mapaccess2_faststr runtime.mapaccess2_faststr
//go:noescape
func mapaccess2_faststr(typ unsafe.Pointer, m unsafe.Pointer, key string) (val unsafe.Pointer, ok bool)
//go:linkname rtgrowslice runtime.growslice
//go:noescape
func rtgrowslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, typ unsafe.Pointer) unsafeSlice
// ----
// //go:linkname rvPtrToType reflect.toType
// //go:noescape
// func rvPtrToType(typ unsafe.Pointer) reflect.Type
// //go:linkname growslice reflect.growslice
// //go:noescape
// func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice
// ----

View File

@@ -2,7 +2,6 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !safe && !codec.safe && !appengine && go1.9 && !gc
// +build !safe,!codec.safe,!appengine,go1.9,!gc
package codec
@@ -14,6 +13,15 @@ import (
var unsafeZeroArr [1024]byte
type mapReqParams struct {
ref bool
}
func getMapReqParams(ti *typeInfo) (r mapReqParams) {
r.ref = refBitset.isset(ti.elemkind)
return
}
// runtime.growslice does not work with gccgo, failing with "growslice: cap out of range" error.
// consequently, we just call newarray followed by typedslicecopy directly.
@@ -31,18 +39,11 @@ func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsa
return
}
// func unsafeNew(t reflect.Type, typ unsafe.Pointer) unsafe.Pointer {
// rv := reflect.New(t)
// return ((*unsafeReflectValue)(unsafe.Pointer(&rv))).ptr
// }
// runtime.{mapassign_fastXXX, mapaccess2_fastXXX} are not supported in gollvm,
// failing with "error: undefined reference" error.
// so we just use runtime.{mapassign, mapaccess2} directly
func mapStoresElemIndirect(elemsize uintptr) bool { return false }
func mapSet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) {
func mapSet(m, k, v reflect.Value, p mapReqParams) {
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
@@ -56,7 +57,7 @@ func mapSet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) {
typedmemmove(vtyp, vvptr, vptr)
}
func mapGet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) (_ reflect.Value) {
func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) {
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
var kptr = unsafeMapKVPtr(urv)
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
@@ -70,7 +71,7 @@ func mapGet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) (_ reflec
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
if helperUnsafeDirectAssignMapEntry || valIsRef {
if helperUnsafeDirectAssignMapEntry || p.ref {
urv.ptr = vvptr
} else {
typedmemmove(urv.typ, urv.ptr, vvptr)

File diff suppressed because it is too large Load Diff

View File

@@ -1,235 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
//go:build !codec.notmammoth
// +build codec.notmammoth
// Code generated from mammoth-test.go.tmpl - DO NOT EDIT.
package codec
import "testing"
import "fmt"
import "reflect"
// TestMammoth has all the different paths optimized in fast-path
// It has all the primitives, slices and maps.
//
// For each of those types, it has a pointer and a non-pointer field.
func init() { _ = fmt.Printf } // so we can include fmt as needed
type TestMammoth struct {
{{range .Values }}{{if .Primitive -}}
{{ .MethodNamePfx "F" true }} {{ .Primitive }}
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
{{ .MethodNamePfx "F" false }} []{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
{{ .MethodNamePfx "Farr4" false }} [4]{{ .Elem }}
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
func __doTestMammothSlices(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey -}}
var v{{$i}}va [8]{{ .Elem }}
for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } {
{{/*
// fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
// - encode value to some []byte
// - decode into a length-wise-equal []byte
// - check if equal to initial slice
// - encode ptr to the value
// - check if encode bytes are same
// - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
// - decode into non-addressable slice of equal length, then larger len
// - for each decode, compare elem-by-elem to the original slice
// -
// - rinse and repeat for a MapBySlice version
// -
*/ -}}
var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
var bs{{$i}} []byte
v{{$i}}v1 = v
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
if v == nil {
v{{$i}}v2 = make([]{{ .Elem }}, 2)
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}") // should not change
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
v{{$i}}v2 = make([]{{ .Elem }}, 2)
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}-noaddr") // should not change
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
} else {
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}")
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr")
}
testReleaseBytes(bs{{$i}})
// ...
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr(&v{{$i}}va, bs{{$i}}, h, t, "dec-array-v{{$i}}-p-1")
if v{{$i}}v1 == nil && v{{$i}}v2 == nil { v{{$i}}v2 = []{{ .Elem }}{} } // so we can compare to zero len slice below
testDeepEqualErr(v{{$i}}va[:len(v{{$i}}v2)], v{{$i}}v2, t, "equal-array-v{{$i}}-p-1")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:1:1]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
v{{$i}}v2 = v{{$i}}va[:]
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap")
if len(v{{$i}}v1) > 1 {
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr")
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr")
}
testReleaseBytes(bs{{$i}})
// ...
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
v{{$i}}v2 = nil
if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
if v != nil {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
testReleaseBytes(bs{{$i}})
}
bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
v{{$i}}v2 = nil
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
testReleaseBytes(bs{{$i}})
}
{{end}}{{end}}{{end}}
}
func __doTestMammothMaps(t *testing.T, h Handle) {
{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey -}}
for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
// fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v)
var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
var bs{{$i}} []byte
v{{$i}}v1 = v
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
if v != nil {
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}")
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr")
}
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
v{{$i}}v2 = nil
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil")
testReleaseBytes(bs{{$i}})
// ...
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
if v != nil {
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len")
testReleaseBytes(bs{{$i}})
}
}
{{end}}{{end}}{{end}}
}
func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
defer testSetup(t, &h)()
if mh, ok := h.(*MsgpackHandle); ok {
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
mh.RawToString = true
}
__doTestMammothSlices(t, h)
__doTestMammothMaps(t, h)
}
func doTestMammoth(t *testing.T, h Handle) {
defer testSetup(t, &h)()
if mh, ok := h.(*MsgpackHandle); ok {
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
mh.RawToString = true
}
name := h.Name()
var b []byte
var m, m2 TestMammoth
testRandomFillRV(reflect.ValueOf(&m).Elem())
b = testMarshalErr(&m, h, t, "mammoth-"+name)
testUnmarshalErr(&m2, b, h, t, "mammoth-"+name)
testDeepEqualErr(&m, &m2, t, "mammoth-"+name)
testReleaseBytes(b)
if testing.Short() {
t.Skipf("skipping rest of mammoth test in -short mode")
}
var mm, mm2 TestMammoth2Wrapper
testRandomFillRV(reflect.ValueOf(&mm).Elem())
b = testMarshalErr(&mm, h, t, "mammoth2-"+name)
// os.Stderr.Write([]byte("\n\n\n\n" + string(b) + "\n\n\n\n"))
testUnmarshalErr(&mm2, b, h, t, "mammoth2-"+name)
testDeepEqualErr(&mm, &mm2, t, "mammoth2-"+name)
// testMammoth2(t, name, h)
testReleaseBytes(b)
}
{{range $i, $e := .Formats -}}
func Test{{ . }}Mammoth(t *testing.T) {
doTestMammoth(t, test{{ . }}H)
}
{{end}}
{{range $i, $e := .Formats -}}
func Test{{ . }}MammothMapsAndSlices(t *testing.T) {
doTestMammothMapsAndSlices(t, test{{ . }}H)
}
{{end}}

View File

@@ -1,101 +0,0 @@
// +build !codec.notmammoth
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT.
package codec
// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
//
// Note: even though this is built based on fast-path and gen-helper, we will run these tests
// in all modes, including notfastpath, etc.
//
// Add test file for creating a mammoth generated file as _mammoth_generated.go
// - generate a second mammoth files in a different file: mammoth2_generated_test.go
// mammoth-test.go.tmpl will do this
// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
// - as part of TestMammoth, run it also
// - this will cover all the codecgen, gen-helper, etc in one full run
// - check in mammoth* files into github also
//
// Now, add some types:
// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
// import "encoding/binary"
import "fmt"
type TestMammoth2 struct {
{{range .Values }}{{if .Primitive }}{{/*
*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
{{end}}{{end}}{{end}}
{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
{{end}}{{end}}{{end}}
}
// -----------
type testMammoth2Binary uint64
func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
data = make([]byte, 8)
bigenstd.PutUint64(data, uint64(x))
return
}
func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
*x = testMammoth2Binary(bigenstd.Uint64(data))
return
}
type testMammoth2Text uint64
func (x testMammoth2Text) MarshalText() (data []byte, err error) {
data = []byte(fmt.Sprintf("%b", uint64(x)))
return
}
func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
return
}
type testMammoth2Json uint64
func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
data = []byte(fmt.Sprintf("%v", uint64(x)))
return
}
func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
return
}
type testMammoth2Basic [4]uint64
type TestMammoth2Wrapper struct {
V TestMammoth2
T testMammoth2Text
B testMammoth2Binary
J testMammoth2Json
C testMammoth2Basic
M map[testMammoth2Basic]TestMammoth2
L []TestMammoth2
A [4]int64
Tcomplex128 complex128
Tcomplex64 complex64
Tbytes []uint8
Tpbytes *[]uint8
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,38 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import "reflect"
// This file exists, so that the files for specific formats do not all import reflect.
// This just helps us ensure that reflect package is isolated to a few files.
// SetInterfaceExt sets an extension
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetInterfaceExt sets an extension
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetBytesExt sets an extension
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetBytesExt sets an extension
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// SetBytesExt sets an extension
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, makeExt(ext))
}
// func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
// return h.SetExt(rt, tag, &interfaceExtWrapper{InterfaceExt: ext})
// }

View File

@@ -4,10 +4,11 @@
package codec
import (
"bufio"
"errors"
"io"
"net"
"net/rpc"
"sync/atomic"
)
var (
@@ -28,57 +29,44 @@ type RPCOptions struct {
// RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls.
//
// Set RPCNoBuffer=true to turn buffering off.
//
// Buffering can still be done if buffered connections are passed in, or
// buffering is configured on the handle.
//
// Deprecated: Buffering should be configured at the Handle or by using a buffer Reader.
// Setting this has no effect anymore (after v1.2.12 - authored 2025-05-06)
RPCNoBuffer bool
}
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
c io.Closer
r io.Reader
w io.Writer
f ioFlusher
c io.Closer
r io.Reader
w io.Writer
f ioFlusher
nc net.Conn
dec *Decoder
enc *Encoder
h Handle
cls atomicClsErr
cls atomic.Pointer[clsErr]
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
return newRPCCodec2(conn, conn, conn, h)
}
func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
bh := h.getBasicHandle()
// if the writer can flush, ensure we leverage it, else
// we may hang waiting on read if write isn't flushed.
// var f ioFlusher
f, ok := w.(ioFlusher)
if !bh.RPCNoBuffer {
if bh.WriterBufferSize <= 0 {
if !ok { // a flusher means there's already a buffer
bw := bufio.NewWriter(w)
f, w = bw, bw
}
}
if bh.ReaderBufferSize <= 0 {
if _, ok = w.(ioBuffered); !ok {
r = bufio.NewReader(r)
}
}
}
return rpcCodec{
c: c,
w: w,
r: r,
f: f,
func newRPCCodec(conn io.ReadWriteCloser, h Handle) *rpcCodec {
nc, _ := conn.(net.Conn)
f, _ := conn.(ioFlusher)
rc := &rpcCodec{
h: h,
enc: NewEncoder(w, h),
dec: NewDecoder(r, h),
c: conn,
w: conn,
r: conn,
f: f,
nc: nc,
enc: NewEncoder(conn, h),
dec: NewDecoder(conn, h),
}
rc.cls.Store(new(clsErr))
return rc
}
func (c *rpcCodec) write(obj ...interface{}) (err error) {
@@ -116,10 +104,16 @@ func (c *rpcCodec) write(obj ...interface{}) (err error) {
func (c *rpcCodec) read(obj interface{}) (err error) {
err = c.ready()
if err == nil {
//If nil is passed in, we should read and discard
// Setting ReadDeadline should not be necessary,
// especially since it only works for net.Conn (not generic ioReadCloser).
// if c.nc != nil {
// c.nc.SetReadDeadline(time.Now().Add(1 * time.Second))
// }
// Note: If nil is passed in, we should read and discard
if obj == nil {
// return c.dec.Decode(&obj)
err = c.dec.swallowErr()
err = panicToErr(c.dec, func() { c.dec.swallow() })
} else {
err = c.dec.Decode(obj)
}
@@ -129,11 +123,11 @@ func (c *rpcCodec) read(obj interface{}) (err error) {
func (c *rpcCodec) Close() (err error) {
if c.c != nil {
cls := c.cls.load()
cls := c.cls.Load()
if !cls.closed {
cls.err = c.c.Close()
cls.closed = true
c.cls.store(cls)
// writing to same pointer could lead to a data race (always make new one)
cls = &clsErr{closed: true, err: c.c.Close()}
c.cls.Store(cls)
}
err = cls.err
}
@@ -144,8 +138,8 @@ func (c *rpcCodec) ready() (err error) {
if c.c == nil {
err = errRpcNoConn
} else {
cls := c.cls.load()
if cls.closed {
cls := c.cls.Load()
if cls != nil && cls.closed {
if err = cls.err; err == nil {
err = errRpcIsClosed
}
@@ -161,7 +155,7 @@ func (c *rpcCodec) ReadResponseBody(body interface{}) error {
// -------------------------------------
type goRpcCodec struct {
rpcCodec
*rpcCodec
}
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {

View File

@@ -1,111 +1,65 @@
//go:build notmono || codec.notmono
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"io"
"math"
"reflect"
"time"
)
const (
_ uint8 = iota
simpleVdNil = 1
simpleVdFalse = 2
simpleVdTrue = 3
simpleVdFloat32 = 4
simpleVdFloat64 = 5
// each lasts for 4 (ie n, n+1, n+2, n+3)
simpleVdPosInt = 8
simpleVdNegInt = 12
simpleVdTime = 24
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216
simpleVdByteArray = 224
simpleVdArray = 232
simpleVdMap = 240
simpleVdExt = 248
)
var simpledescNames = map[byte]string{
simpleVdNil: "null",
simpleVdFalse: "false",
simpleVdTrue: "true",
simpleVdFloat32: "float32",
simpleVdFloat64: "float64",
simpleVdPosInt: "+int",
simpleVdNegInt: "-int",
simpleVdTime: "time",
simpleVdString: "string",
simpleVdByteArray: "binary",
simpleVdArray: "array",
simpleVdMap: "map",
simpleVdExt: "ext",
}
func simpledesc(bd byte) (s string) {
s = simpledescNames[bd]
if s == "" {
s = "unknown"
}
return
}
type simpleEncDriver struct {
type simpleEncDriver[T encWriter] struct {
noBuiltInTypes
encDriverNoopContainerWriter
encDriverNoState
encDriverContainerNoTrackerT
encInit2er
h *SimpleHandle
e *encoderBase
// b [8]byte
e Encoder
w T
}
func (e *simpleEncDriver) encoder() *Encoder {
return &e.e
func (e *simpleEncDriver[T]) EncodeNil() {
e.w.writen1(simpleVdNil)
}
func (e *simpleEncDriver) EncodeNil() {
e.e.encWr.writen1(simpleVdNil)
}
func (e *simpleEncDriver) EncodeBool(b bool) {
func (e *simpleEncDriver[T]) EncodeBool(b bool) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && !b {
e.EncodeNil()
return
}
if b {
e.e.encWr.writen1(simpleVdTrue)
e.w.writen1(simpleVdTrue)
} else {
e.e.encWr.writen1(simpleVdFalse)
e.w.writen1(simpleVdFalse)
}
}
func (e *simpleEncDriver) EncodeFloat32(f float32) {
func (e *simpleEncDriver[T]) EncodeFloat32(f float32) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.e.encWr.writen1(simpleVdFloat32)
bigen.writeUint32(e.e.w(), math.Float32bits(f))
e.w.writen1(simpleVdFloat32)
e.w.writen4(bigen.PutUint32(math.Float32bits(f)))
}
func (e *simpleEncDriver) EncodeFloat64(f float64) {
func (e *simpleEncDriver[T]) EncodeFloat64(f float64) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
e.EncodeNil()
return
}
e.e.encWr.writen1(simpleVdFloat64)
bigen.writeUint64(e.e.w(), math.Float64bits(f))
e.w.writen1(simpleVdFloat64)
e.w.writen8(bigen.PutUint64(math.Float64bits(f)))
}
func (e *simpleEncDriver) EncodeInt(v int64) {
func (e *simpleEncDriver[T]) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-v), simpleVdNegInt)
} else {
@@ -113,62 +67,62 @@ func (e *simpleEncDriver) EncodeInt(v int64) {
}
}
func (e *simpleEncDriver) EncodeUint(v uint64) {
func (e *simpleEncDriver[T]) EncodeUint(v uint64) {
e.encUint(v, simpleVdPosInt)
}
func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
func (e *simpleEncDriver[T]) encUint(v uint64, bd uint8) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == 0 {
e.EncodeNil()
return
}
if v <= math.MaxUint8 {
e.e.encWr.writen2(bd, uint8(v))
e.w.writen2(bd, uint8(v))
} else if v <= math.MaxUint16 {
e.e.encWr.writen1(bd + 1)
bigen.writeUint16(e.e.w(), uint16(v))
e.w.writen1(bd + 1)
e.w.writen2(bigen.PutUint16(uint16(v)))
} else if v <= math.MaxUint32 {
e.e.encWr.writen1(bd + 2)
bigen.writeUint32(e.e.w(), uint32(v))
e.w.writen1(bd + 2)
e.w.writen4(bigen.PutUint32(uint32(v)))
} else { // if v <= math.MaxUint64 {
e.e.encWr.writen1(bd + 3)
bigen.writeUint64(e.e.w(), v)
e.w.writen1(bd + 3)
e.w.writen8(bigen.PutUint64(v))
}
}
func (e *simpleEncDriver) encLen(bd byte, length int) {
func (e *simpleEncDriver[T]) encLen(bd byte, length int) {
if length == 0 {
e.e.encWr.writen1(bd)
e.w.writen1(bd)
} else if length <= math.MaxUint8 {
e.e.encWr.writen1(bd + 1)
e.e.encWr.writen1(uint8(length))
e.w.writen1(bd + 1)
e.w.writen1(uint8(length))
} else if length <= math.MaxUint16 {
e.e.encWr.writen1(bd + 2)
bigen.writeUint16(e.e.w(), uint16(length))
e.w.writen1(bd + 2)
e.w.writen2(bigen.PutUint16(uint16(length)))
} else if int64(length) <= math.MaxUint32 {
e.e.encWr.writen1(bd + 3)
bigen.writeUint32(e.e.w(), uint32(length))
e.w.writen1(bd + 3)
e.w.writen4(bigen.PutUint32(uint32(length)))
} else {
e.e.encWr.writen1(bd + 4)
bigen.writeUint64(e.e.w(), uint64(length))
e.w.writen1(bd + 4)
e.w.writen8(bigen.PutUint64(uint64(length)))
}
}
func (e *simpleEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
func (e *simpleEncDriver[T]) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
var bs0, bs []byte
if ext == SelfExt {
bs0 = e.e.blist.get(1024)
bs = bs0
e.e.sideEncode(v, basetype, &bs)
sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, true) })
} else {
bs = ext.WriteExt(v)
}
if bs == nil {
e.EncodeNil()
e.writeNilBytes()
goto END
}
e.encodeExtPreamble(uint8(xtag), len(bs))
e.e.encWr.writeb(bs)
e.w.writeb(bs)
END:
if ext == SelfExt {
e.e.blist.put(bs)
@@ -178,25 +132,35 @@ END:
}
}
func (e *simpleEncDriver) EncodeRawExt(re *RawExt) {
func (e *simpleEncDriver[T]) EncodeRawExt(re *RawExt) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.e.encWr.writeb(re.Data)
e.w.writeb(re.Data)
}
func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
func (e *simpleEncDriver[T]) encodeExtPreamble(xtag byte, length int) {
e.encLen(simpleVdExt, length)
e.e.encWr.writen1(xtag)
e.w.writen1(xtag)
}
func (e *simpleEncDriver) WriteArrayStart(length int) {
func (e *simpleEncDriver[T]) WriteArrayStart(length int) {
e.encLen(simpleVdArray, length)
}
func (e *simpleEncDriver) WriteMapStart(length int) {
func (e *simpleEncDriver[T]) WriteMapStart(length int) {
e.encLen(simpleVdMap, length)
}
func (e *simpleEncDriver) EncodeString(v string) {
func (e *simpleEncDriver[T]) WriteArrayEmpty() {
// e.WriteArrayStart(0) = e.encLen(simpleVdArray, 0)
e.w.writen1(simpleVdArray)
}
func (e *simpleEncDriver[T]) WriteMapEmpty() {
// e.WriteMapStart(0) = e.encLen(simpleVdMap, 0)
e.w.writen1(simpleVdMap)
}
func (e *simpleEncDriver[T]) EncodeString(v string) {
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" {
e.EncodeNil()
return
@@ -206,57 +170,88 @@ func (e *simpleEncDriver) EncodeString(v string) {
} else {
e.encLen(simpleVdString, len(v))
}
e.e.encWr.writestr(v)
e.w.writestr(v)
}
func (e *simpleEncDriver) EncodeStringBytesRaw(v []byte) {
func (e *simpleEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) }
func (e *simpleEncDriver[T]) EncodeStringBytesRaw(v []byte) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
e.encLen(simpleVdByteArray, len(v))
e.w.writeb(v)
}
func (e *simpleEncDriver[T]) EncodeBytes(v []byte) {
if v == nil {
e.EncodeNil()
e.writeNilBytes()
return
}
e.encLen(simpleVdByteArray, len(v))
e.e.encWr.writeb(v)
e.EncodeStringBytesRaw(v)
}
func (e *simpleEncDriver) EncodeTime(t time.Time) {
func (e *simpleEncDriver[T]) encodeNilBytes() {
b := byte(simpleVdNil)
if e.h.NilCollectionToZeroLength {
b = simpleVdArray
}
e.w.writen1(b)
}
func (e *simpleEncDriver[T]) writeNilOr(v byte) {
if !e.h.NilCollectionToZeroLength {
v = simpleVdNil
}
e.w.writen1(v)
}
func (e *simpleEncDriver[T]) writeNilArray() {
e.writeNilOr(simpleVdArray)
}
func (e *simpleEncDriver[T]) writeNilMap() {
e.writeNilOr(simpleVdMap)
}
func (e *simpleEncDriver[T]) writeNilBytes() {
e.writeNilOr(simpleVdByteArray)
}
func (e *simpleEncDriver[T]) EncodeTime(t time.Time) {
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() {
if t.IsZero() {
e.EncodeNil()
return
}
v, err := t.MarshalBinary()
e.e.onerror(err)
e.e.encWr.writen2(simpleVdTime, uint8(len(v)))
e.e.encWr.writeb(v)
halt.onerror(err)
e.w.writen2(simpleVdTime, uint8(len(v)))
e.w.writeb(v)
}
//------------------------------------
type simpleDecDriver struct {
type simpleDecDriver[T decReader] struct {
h *SimpleHandle
d *decoderBase
r T
bdAndBdread
_ bool
// bytes bool
noBuiltInTypes
// decDriverNoopNumberHelper
decDriverNoopContainerReader
decDriverNoopNumberHelper
d Decoder
decInit2er
// ds interface{} // must be *decoder[simpleDecDriverM[bytes...]]
}
func (d *simpleDecDriver) decoder() *Decoder {
return &d.d
}
func (d *simpleDecDriver) descBd() string {
return sprintf("%v (%s)", d.bd, simpledesc(d.bd))
}
func (d *simpleDecDriver) readNextBd() {
d.bd = d.d.decRd.readn1()
func (d *simpleDecDriver[T]) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
}
func (d *simpleDecDriver) advanceNil() (null bool) {
func (d *simpleDecDriver[T]) advanceNil() (null bool) {
if !d.bdRead {
d.readNextBd()
}
@@ -267,7 +262,7 @@ func (d *simpleDecDriver) advanceNil() (null bool) {
return
}
func (d *simpleDecDriver) ContainerType() (vt valueType) {
func (d *simpleDecDriver[T]) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
@@ -291,88 +286,90 @@ func (d *simpleDecDriver) ContainerType() (vt valueType) {
return valueTypeUnset
}
func (d *simpleDecDriver) TryNil() bool {
func (d *simpleDecDriver[T]) TryNil() bool {
return d.advanceNil()
}
func (d *simpleDecDriver) decFloat() (f float64, ok bool) {
func (d *simpleDecDriver[T]) decFloat() (f float64, ok bool) {
ok = true
switch d.bd {
case simpleVdFloat32:
f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4())))
f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4())))
case simpleVdFloat64:
f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8()))
f = math.Float64frombits(bigen.Uint64(d.r.readn8()))
default:
ok = false
}
return
}
func (d *simpleDecDriver) decInteger() (ui uint64, neg, ok bool) {
func (d *simpleDecDriver[T]) decInteger() (ui uint64, neg, ok bool) {
ok = true
switch d.bd {
case simpleVdPosInt:
ui = uint64(d.d.decRd.readn1())
ui = uint64(d.r.readn1())
case simpleVdPosInt + 1:
ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
ui = uint64(bigen.Uint16(d.r.readn2()))
case simpleVdPosInt + 2:
ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
ui = uint64(bigen.Uint32(d.r.readn4()))
case simpleVdPosInt + 3:
ui = uint64(bigen.Uint64(d.d.decRd.readn8()))
ui = uint64(bigen.Uint64(d.r.readn8()))
case simpleVdNegInt:
ui = uint64(d.d.decRd.readn1())
ui = uint64(d.r.readn1())
neg = true
case simpleVdNegInt + 1:
ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
ui = uint64(bigen.Uint16(d.r.readn2()))
neg = true
case simpleVdNegInt + 2:
ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
ui = uint64(bigen.Uint32(d.r.readn4()))
neg = true
case simpleVdNegInt + 3:
ui = uint64(bigen.Uint64(d.d.decRd.readn8()))
ui = uint64(bigen.Uint64(d.r.readn8()))
neg = true
default:
ok = false
// d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
// halt.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
}
// DO NOT do this check below, because callers may only want the unsigned value:
//
// if ui > math.MaxInt64 {
// d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
// halt.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
// return
// }
return
}
func (d *simpleDecDriver) DecodeInt64() (i int64) {
func (d *simpleDecDriver[T]) DecodeInt64() (i int64) {
if d.advanceNil() {
return
}
i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger())
v1, v2, v3 := d.decInteger()
i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false)
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
func (d *simpleDecDriver[T]) DecodeUint64() (ui uint64) {
if d.advanceNil() {
return
}
ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger())
ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger())
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeFloat64() (f float64) {
func (d *simpleDecDriver[T]) DecodeFloat64() (f float64) {
if d.advanceNil() {
return
}
f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat())
v1, v2 := d.decFloat()
f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false)
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *simpleDecDriver) DecodeBool() (b bool) {
func (d *simpleDecDriver[T]) DecodeBool() (b bool) {
if d.advanceNil() {
return
}
@@ -380,13 +377,13 @@ func (d *simpleDecDriver) DecodeBool() (b bool) {
} else if d.bd == simpleVdTrue {
b = true
} else {
d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
halt.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
}
d.bdRead = false
return
}
func (d *simpleDecDriver) ReadMapStart() (length int) {
func (d *simpleDecDriver[T]) ReadMapStart() (length int) {
if d.advanceNil() {
return containerLenNil
}
@@ -394,7 +391,7 @@ func (d *simpleDecDriver) ReadMapStart() (length int) {
return d.decLen()
}
func (d *simpleDecDriver) ReadArrayStart() (length int) {
func (d *simpleDecDriver[T]) ReadArrayStart() (length int) {
if d.advanceNil() {
return containerLenNil
}
@@ -402,131 +399,128 @@ func (d *simpleDecDriver) ReadArrayStart() (length int) {
return d.decLen()
}
func (d *simpleDecDriver) uint2Len(ui uint64) int {
func (d *simpleDecDriver[T]) uint2Len(ui uint64) int {
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("overflow integer: %v", ui)
halt.errorf("overflow integer: %v", ui)
}
return int(ui)
}
func (d *simpleDecDriver) decLen() int {
func (d *simpleDecDriver[T]) decLen() int {
switch d.bd & 7 { // d.bd % 8 {
case 0:
return 0
case 1:
return int(d.d.decRd.readn1())
return int(d.r.readn1())
case 2:
return int(bigen.Uint16(d.d.decRd.readn2()))
return int(bigen.Uint16(d.r.readn2()))
case 3:
return d.uint2Len(uint64(bigen.Uint32(d.d.decRd.readn4())))
return d.uint2Len(uint64(bigen.Uint32(d.r.readn4())))
case 4:
return d.uint2Len(bigen.Uint64(d.d.decRd.readn8()))
return d.uint2Len(bigen.Uint64(d.r.readn8()))
}
d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
halt.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
}
func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(nil)
func (d *simpleDecDriver[T]) DecodeStringAsBytes() ([]byte, dBytesAttachState) {
return d.DecodeBytes()
}
func (d *simpleDecDriver) DecodeBytes(bs []byte) (bsOut []byte) {
d.d.decByteState = decByteStateNone
func (d *simpleDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) {
if d.advanceNil() {
return
}
var cond bool
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 {
if bs == nil {
d.d.decByteState = decByteStateReuseBuf
bs = d.d.b[:]
}
if d.bd >= simpleVdArray && d.bd <= simpleVdArray+4 {
slen := d.ReadArrayStart()
var changed bool
if bs, changed = usableByteSlice(bs, slen); changed {
d.d.decByteState = decByteStateNone
}
bs, cond = usableByteSlice(d.d.buf, slen)
for i := 0; i < len(bs); i++ {
bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
}
for i := len(bs); i < slen; i++ {
bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
}
return bs
if cond {
d.d.buf = bs
}
state = dBytesAttachBuffer
return
}
clen := d.decLen()
d.bdRead = false
if d.d.zerocopy() {
d.d.decByteState = decByteStateZerocopy
return d.d.decRd.rb.readx(uint(clen))
}
if bs == nil {
d.d.decByteState = decByteStateReuseBuf
bs = d.d.b[:]
}
return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs)
bs, cond = d.r.readxb(uint(clen))
state = d.d.attachState(cond)
return
}
func (d *simpleDecDriver) DecodeTime() (t time.Time) {
func (d *simpleDecDriver[T]) DecodeTime() (t time.Time) {
if d.advanceNil() {
return
}
if d.bd != simpleVdTime {
d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
halt.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
}
d.bdRead = false
clen := uint(d.d.decRd.readn1())
b := d.d.decRd.readx(clen)
d.d.onerror((&t).UnmarshalBinary(b))
clen := uint(d.r.readn1())
b := d.r.readx(clen)
halt.onerror((&t).UnmarshalBinary(b))
return
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
if xtag > 0xff {
d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
}
if d.advanceNil() {
func (d *simpleDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
xbs, _, _, ok := d.decodeExtV(ext != nil, xtag)
if !ok {
return
}
xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag))
realxtag := uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.setData(xbs, zerocopy)
} else if ext == SelfExt {
d.d.sideDecode(rv, basetype, xbs)
if ext == SelfExt {
sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, true) })
} else {
ext.ReadExt(rv, xbs)
}
}
func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) {
func (d *simpleDecDriver[T]) DecodeRawExt(re *RawExt) {
xbs, realxtag, state, ok := d.decodeExtV(false, 0)
if !ok {
return
}
re.Tag = uint64(realxtag)
re.setData(xbs, state >= dBytesAttachViewZerocopy)
}
func (d *simpleDecDriver[T]) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) {
if xtagIn > 0xff {
halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn)
}
if d.advanceNil() {
return
}
tag := uint8(xtagIn)
switch d.bd {
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
l := d.decLen()
xtag = d.d.decRd.readn1()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
}
if d.d.bytes {
xbs = d.d.decRd.rb.readx(uint(l))
zerocopy = true
} else {
xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
halt.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
}
xbs, ok = d.r.readxb(uint(l))
bstate = d.d.attachState(ok)
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs = d.DecodeBytes(nil)
xbs, bstate = d.DecodeBytes()
default:
d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
halt.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
}
d.bdRead = false
ok = true
return
}
func (d *simpleDecDriver) DecodeNaked() {
func (d *simpleDecDriver[T]) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
@@ -566,19 +560,20 @@ func (d *simpleDecDriver) DecodeNaked() {
case simpleVdString, simpleVdString + 1,
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
n.v = valueTypeString
n.s = d.d.stringZC(d.DecodeStringAsBytes())
n.s = d.d.detach2Str(d.DecodeStringAsBytes())
case simpleVdByteArray, simpleVdByteArray + 1,
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
d.d.fauxUnionReadRawBytes(false)
d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
n.v = valueTypeExt
l := d.decLen()
n.u = uint64(d.d.decRd.readn1())
if d.d.bytes {
n.l = d.d.decRd.rb.readx(uint(l))
} else {
n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
}
n.u = uint64(d.r.readn1())
n.l = d.r.readx(uint(l))
// MARKER: not necessary to detach for extensions
// var useBuf bool
// n.l, useBuf = d.r.readxb(uint(l))
// n.a = d.d.attachState(useBuf)
// n.l = d.d.detach2Bytes(n.l, nil, n.a)
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2,
simpleVdArray + 3, simpleVdArray + 4:
n.v = valueTypeArray
@@ -587,7 +582,7 @@ func (d *simpleDecDriver) DecodeNaked() {
n.v = valueTypeMap
decodeFurther = true
default:
d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
}
if !decodeFurther {
@@ -595,32 +590,18 @@ func (d *simpleDecDriver) DecodeNaked() {
}
}
func (d *simpleDecDriver) nextValueBytes(v0 []byte) (v []byte) {
func (d *simpleDecDriver[T]) nextValueBytes() (v []byte) {
if !d.bdRead {
d.readNextBd()
}
v = v0
var h = decNextValueBytesHelper{d: &d.d}
var cursor = d.d.rb.c - 1
h.append1(&v, d.bd)
v = d.nextValueBytesBdReadR(v)
d.r.startRecording()
d.nextValueBytesBdReadR()
v = d.r.stopRecording()
d.bdRead = false
h.bytesRdV(&v, cursor)
return
}
func (d *simpleDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
d.readNextBd()
v = v0
var h = decNextValueBytesHelper{d: &d.d}
h.append1(&v, d.bd)
return d.nextValueBytesBdReadR(v)
}
func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
v = v0
var h = decNextValueBytesHelper{d: &d.d}
func (d *simpleDecDriver[T]) nextValueBytesBdReadR() {
c := d.bd
var length uint
@@ -629,38 +610,33 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray:
// pass
case simpleVdPosInt, simpleVdNegInt:
h.append1(&v, d.d.decRd.readn1())
d.r.readn1()
case simpleVdPosInt + 1, simpleVdNegInt + 1:
h.appendN(&v, d.d.decRd.readx(2)...)
d.r.skip(2)
case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32:
h.appendN(&v, d.d.decRd.readx(4)...)
d.r.skip(4)
case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64:
h.appendN(&v, d.d.decRd.readx(8)...)
d.r.skip(8)
case simpleVdTime:
c = d.d.decRd.readn1()
h.append1(&v, c)
h.appendN(&v, d.d.decRd.readx(uint(c))...)
c = d.r.readn1()
d.r.skip(uint(c))
default:
switch c & 7 { // c % 8 {
case 0:
length = 0
case 1:
b := d.d.decRd.readn1()
b := d.r.readn1()
length = uint(b)
h.append1(&v, b)
case 2:
x := d.d.decRd.readn2()
x := d.r.readn2()
length = uint(bigen.Uint16(x))
h.appendN(&v, x[:]...)
case 3:
x := d.d.decRd.readn4()
x := d.r.readn4()
length = uint(bigen.Uint32(x))
h.appendN(&v, x[:]...)
case 4:
x := d.d.decRd.readn8()
x := d.r.readn8()
length = uint(bigen.Uint64(x))
h.appendN(&v, x[:]...)
}
bExt := c >= simpleVdExt && c <= simpleVdExt+7
@@ -670,11 +646,11 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
bMap := c >= simpleVdMap && c <= simpleVdMap+7
if !(bExt || bStr || bByteArray || bArray || bMap) {
d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, c)
halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, c)
}
if bExt {
h.append1(&v, d.d.decRd.readn1()) // tag
d.r.readn1() // tag
}
if length == 0 {
@@ -683,68 +659,91 @@ func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
if bArray {
for i := uint(0); i < length; i++ {
v = d.nextValueBytesR(v)
d.readNextBd()
d.nextValueBytesBdReadR()
}
} else if bMap {
for i := uint(0); i < length; i++ {
v = d.nextValueBytesR(v)
v = d.nextValueBytesR(v)
d.readNextBd()
d.nextValueBytesBdReadR()
d.readNextBd()
d.nextValueBytesBdReadR()
}
} else {
h.appendN(&v, d.d.decRd.readx(length)...)
d.r.skip(length)
}
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
// ----
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Length of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3)
// - maps are encoded as [bd] [length] [[key][value]]...
// - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]...
// - time.Time are encoded as [bd] [length] [byte]...
// The following below are similar across all format files (except for the format name).
//
// The full spec will be published soon.
type SimpleHandle struct {
binaryEncodingType
BasicHandle
// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
EncZeroValuesAsNil bool
// We keep them together here, so that we can easily copy and compare.
// ----
func (d *simpleEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) {
callMake(&d.w)
d.h = hh.(*SimpleHandle)
d.e = shared
if shared.bytes {
fp = simpleFpEncBytes
} else {
fp = simpleFpEncIO
}
// d.w.init()
d.init2(enc)
return
}
// Name returns the name of the handle: simple
func (h *SimpleHandle) Name() string { return "simple" }
func (e *simpleEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) }
func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) }
func (e *simpleEncDriver[T]) writerEnd() { e.w.end() }
func (h *SimpleHandle) newEncDriver() encDriver {
var e = &simpleEncDriver{h: h}
e.e.e = e
e.e.init(h)
e.reset()
return e
func (e *simpleEncDriver[T]) resetOutBytes(out *[]byte) {
e.w.resetBytes(*out, out)
}
func (h *SimpleHandle) newDecDriver() decDriver {
d := &simpleDecDriver{h: h}
d.d.d = d
d.d.init(h)
d.reset()
return d
func (e *simpleEncDriver[T]) resetOutIO(out io.Writer) {
e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist)
}
var _ decDriver = (*simpleDecDriver)(nil)
var _ encDriver = (*simpleEncDriver)(nil)
// ----
func (d *simpleDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) {
callMake(&d.r)
d.h = hh.(*SimpleHandle)
d.d = shared
if shared.bytes {
fp = simpleFpDecBytes
} else {
fp = simpleFpDecIO
}
// d.r.init()
d.init2(dec)
return
}
func (d *simpleDecDriver[T]) NumBytesRead() int {
return int(d.r.numread())
}
func (d *simpleDecDriver[T]) resetInBytes(in []byte) {
d.r.resetBytes(in)
}
func (d *simpleDecDriver[T]) resetInIO(r io.Reader) {
d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist)
}
// ---- (custom stanza)
func (d *simpleDecDriver[T]) descBd() string {
return sprintf("%v (%s)", d.bd, simpledesc(d.bd))
}
func (d *simpleDecDriver[T]) DecodeFloat32() (f float32) {
return float32(chkOvf.Float32V(d.DecodeFloat64()))
}

View File

@@ -1,148 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
package codec
import (
"bytes"
"reflect"
"time"
)
type stringSlice []string
func (p stringSlice) Len() int { return len(p) }
func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p stringSlice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type uint8Slice []uint8
func (p uint8Slice) Len() int { return len(p) }
func (p uint8Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint8Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint64Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type intSlice []int
func (p intSlice) Len() int { return len(p) }
func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p intSlice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type int32Slice []int32
func (p int32Slice) Len() int { return len(p) }
func (p int32Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p int32Slice) Less(i, j int) bool {
return p[uint(i)] < p[uint(j)]
}
type stringRv struct {
v string
r reflect.Value
}
type stringRvSlice []stringRv
func (p stringRvSlice) Len() int { return len(p) }
func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p stringRvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type stringIntf struct {
v string
i interface{}
}
type stringIntfSlice []stringIntf
func (p stringIntfSlice) Len() int { return len(p) }
func (p stringIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p stringIntfSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type float64Rv struct {
v float64
r reflect.Value
}
type float64RvSlice []float64Rv
func (p float64RvSlice) Len() int { return len(p) }
func (p float64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p float64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v || isNaN64(p[uint(i)].v) && !isNaN64(p[uint(j)].v)
}
type uint64Rv struct {
v uint64
r reflect.Value
}
type uint64RvSlice []uint64Rv
func (p uint64RvSlice) Len() int { return len(p) }
func (p uint64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p uint64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type int64Rv struct {
v int64
r reflect.Value
}
type int64RvSlice []int64Rv
func (p int64RvSlice) Len() int { return len(p) }
func (p int64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p int64RvSlice) Less(i, j int) bool {
return p[uint(i)].v < p[uint(j)].v
}
type timeRv struct {
v time.Time
r reflect.Value
}
type timeRvSlice []timeRv
func (p timeRvSlice) Len() int { return len(p) }
func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p timeRvSlice) Less(i, j int) bool {
return p[uint(i)].v.Before(p[uint(j)].v)
}
type bytesRv struct {
v []byte
r reflect.Value
}
type bytesRvSlice []bytesRv
func (p bytesRvSlice) Len() int { return len(p) }
func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p bytesRvSlice) Less(i, j int) bool {
return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1
}
type bytesIntf struct {
v []byte
i interface{}
}
type bytesIntfSlice []bytesIntf
func (p bytesIntfSlice) Len() int { return len(p) }
func (p bytesIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p bytesIntfSlice) Less(i, j int) bool {
return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1
}

View File

@@ -1,68 +0,0 @@
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
{{/*
xxxSlice
xxxIntf
xxxIntfSlice
xxxRv
xxxRvSlice
I'm now going to create them for
- sortables
- sortablesplus
With the parameters passed in sortables or sortablesplus,
'time, 'bytes' are special, and correspond to time.Time and []byte respectively.
*/}}
package codec
import (
"time"
"reflect"
"bytes"
)
{{/* func init() { _ = time.Unix } */}}
{{define "T"}}
func (p {{ .Type }}) Len() int { return len(p) }
func (p {{ .Type }}) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
func (p {{ .Type }}) Less(i, j int) bool {
{{ if eq .Kind "bool" }} return !p[uint(i)]{{.V}} && p[uint(j)]{{.V}}
{{ else if eq .Kind "float32" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN32(p[uint(i)]{{.V}}) && !isNaN32(p[uint(j)]{{.V}})
{{ else if eq .Kind "float64" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN64(p[uint(i)]{{.V}}) && !isNaN64(p[uint(j)]{{.V}})
{{ else if eq .Kind "time" }} return p[uint(i)]{{.V}}.Before(p[uint(j)]{{.V}})
{{ else if eq .Kind "bytes" }} return bytes.Compare(p[uint(i)]{{.V}}, p[uint(j)]{{.V}}) == -1
{{ else }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}}
{{ end -}}
}
{{end}}
{{range $i, $v := sortables }}{{ $t := tshort $v }}
type {{ $v }}Slice []{{ $t }}
{{template "T" args "Kind" $v "Type" (print $v "Slice") "V" ""}}
{{end}}
{{range $i, $v := sortablesplus }}{{ $t := tshort $v }}
type {{ $v }}Rv struct {
v {{ $t }}
r reflect.Value
}
type {{ $v }}RvSlice []{{ $v }}Rv
{{template "T" args "Kind" $v "Type" (print $v "RvSlice") "V" ".v"}}
{{if eq $v "bytes" "string" -}}
type {{ $v }}Intf struct {
v {{ $t }}
i interface{}
}
type {{ $v }}IntfSlice []{{ $v }}Intf
{{template "T" args "Kind" $v "Type" (print $v "IntfSlice") "V" ".v"}}
{{end}}
{{end}}

View File

@@ -3,10 +3,14 @@
package codec
import "io"
import (
"io"
)
const maxConsecutiveEmptyWrites = 16 // 2 is sufficient, 16 is enough, 64 is optimal
// encWriter abstracts writing to a byte array or to an io.Writer.
type encWriter interface {
type encWriterI interface {
writeb([]byte)
writestr(string)
writeqstr(string) // write string wrapped in quotes ie "..."
@@ -17,7 +21,11 @@ type encWriter interface {
writen4([4]byte)
writen8([8]byte)
// isBytes() bool
end()
resetIO(w io.Writer, bufsize int, blist *bytesFreeList)
resetBytes(in []byte, out *[]byte)
}
// ---------------------------------------------
@@ -32,16 +40,18 @@ type bufioEncWriter struct {
b [16]byte // scratch buffer and padding (cache-aligned)
}
func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
// MARKER: use setByteAt/byteAt to elide the bounds-checks
// when we are sure that we don't go beyond the bounds.
func (z *bufioEncWriter) resetBytes(in []byte, out *[]byte) {
halt.errorStr("resetBytes is unsupported by bufioEncWriter")
}
func (z *bufioEncWriter) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) {
z.w = w
z.n = 0
if bufsize <= 0 {
bufsize = defEncByteBufSize
}
// bufsize must be >= 8, to accomodate writen methods (where n <= 8)
if bufsize <= 8 {
bufsize = 8
}
// use minimum bufsize of 16, matching the array z.b and accomodating writen methods (where n <= 8)
bufsize = max(16, bufsize) // max(byteBufSize, bufsize)
if cap(z.buf) < bufsize {
if len(z.buf) > 0 && &z.buf[0] != &z.b[0] {
blist.put(z.buf)
@@ -56,17 +66,19 @@ func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
}
func (z *bufioEncWriter) flushErr() (err error) {
n, err := z.w.Write(z.buf[:z.n])
z.n -= n
if z.n > 0 {
if err == nil {
err = io.ErrShortWrite
var n int
for i := maxConsecutiveEmptyReads; i > 0; i-- {
n, err = z.w.Write(z.buf[:z.n])
z.n -= n
if z.n == 0 || err != nil {
return
}
// at this point: z.n > 0 && err == nil
if n > 0 {
copy(z.buf, z.buf[n:z.n+n])
}
}
return err
return io.ErrShortWrite // OR io.ErrNoProgress: not enough (or no) data written
}
func (z *bufioEncWriter) flush() {
@@ -131,6 +143,7 @@ func (z *bufioEncWriter) writen1(b1 byte) {
// z.buf[z.n] = b1
z.n++
}
func (z *bufioEncWriter) writen2(b1, b2 byte) {
if 2 > len(z.buf)-z.n {
z.flush()
@@ -169,8 +182,14 @@ func (z *bufioEncWriter) endErr() (err error) {
return
}
func (z *bufioEncWriter) end() {
halt.onerror(z.endErr())
}
// ---------------------------------------------
var bytesEncAppenderDefOut = []byte{}
// bytesEncAppender implements encWriter and can write to an byte slice.
type bytesEncAppender struct {
b []byte
@@ -203,122 +222,18 @@ func (z *bytesEncAppender) writen4(b [4]byte) {
func (z *bytesEncAppender) writen8(b [8]byte) {
z.b = append(z.b, b[:]...)
// z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]) // prevents inlining encWr.writen4
// z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7])
}
func (z *bytesEncAppender) endErr() error {
func (z *bytesEncAppender) end() {
*(z.out) = z.b
return nil
}
func (z *bytesEncAppender) reset(in []byte, out *[]byte) {
func (z *bytesEncAppender) resetBytes(in []byte, out *[]byte) {
z.b = in[:0]
z.out = out
}
// --------------------------------------------------
type encWr struct {
wb bytesEncAppender
wf *bufioEncWriter
bytes bool // encoding to []byte
// MARKER: these fields below should belong directly in Encoder.
// we pack them here for space efficiency and cache-line optimization.
js bool // is json encoder?
be bool // is binary encoder?
c containerState
calls uint16
seq uint16 // sequencer (e.g. used by binc for symbols, etc)
func (z *bytesEncAppender) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) {
halt.errorStr("resetIO is unsupported by bytesEncAppender")
}
// MARKER: manually inline bytesEncAppender.writenx/writeqstr methods,
// as calling them causes encWr.writenx/writeqstr methods to not be inlined (cost > 80).
//
// i.e. e.g. instead of writing z.wb.writen2(b1, b2), use z.wb.b = append(z.wb.b, b1, b2)
func (z *encWr) writeb(s []byte) {
if z.bytes {
z.wb.writeb(s)
} else {
z.wf.writeb(s)
}
}
func (z *encWr) writestr(s string) {
if z.bytes {
z.wb.writestr(s)
} else {
z.wf.writestr(s)
}
}
// MARKER: Add WriteStr to be called directly by generated code without a genHelper forwarding function.
// Go's inlining model adds cost for forwarding functions, preventing inlining (cost goes above 80 budget).
func (z *encWr) WriteStr(s string) {
if z.bytes {
z.wb.writestr(s)
} else {
z.wf.writestr(s)
}
}
func (z *encWr) writen1(b1 byte) {
if z.bytes {
z.wb.writen1(b1)
} else {
z.wf.writen1(b1)
}
}
func (z *encWr) writen2(b1, b2 byte) {
if z.bytes {
// MARKER: z.wb.writen2(b1, b2)
z.wb.b = append(z.wb.b, b1, b2)
} else {
z.wf.writen2(b1, b2)
}
}
func (z *encWr) writen4(b [4]byte) {
if z.bytes {
// MARKER: z.wb.writen4(b1, b2, b3, b4)
z.wb.b = append(z.wb.b, b[:]...)
// z.wb.writen4(b)
} else {
z.wf.writen4(b)
}
}
func (z *encWr) writen8(b [8]byte) {
if z.bytes {
// z.wb.b = append(z.wb.b, b[:]...)
z.wb.writen8(b)
} else {
z.wf.writen8(b)
}
}
func (z *encWr) writeqstr(s string) {
if z.bytes {
// MARKER: z.wb.writeqstr(s)
z.wb.b = append(append(append(z.wb.b, '"'), s...), '"')
} else {
z.wf.writeqstr(s)
}
}
func (z *encWr) endErr() error {
if z.bytes {
return z.wb.endErr()
}
return z.wf.endErr()
}
func (z *encWr) end() {
halt.onerror(z.endErr())
}
var _ encWriter = (*encWr)(nil)