MAJ des dépendances
This commit is contained in:
parent
09a5fff3f5
commit
c207500a38
25
go.mod
25
go.mod
@ -1,6 +1,6 @@
|
||||
module deevirt.fr/compute
|
||||
|
||||
go 1.23
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.24.0
|
||||
|
||||
@ -11,7 +11,7 @@ require (
|
||||
github.com/hashicorp/raft v1.7.2
|
||||
github.com/hashicorp/raft-boltdb/v2 v2.3.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/client_golang v1.21.0
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
go.etcd.io/etcd/client/v3 v3.5.18
|
||||
@ -28,34 +28,33 @@ require (
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-metrics v0.5.4 // indirect
|
||||
github.com/hashicorp/go-msgpack v1.1.5 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.3 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
go.etcd.io/bbolt v1.3.5 // indirect
|
||||
go.etcd.io/bbolt v1.4.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.18 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.18 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/net v0.35.0 // indirect
|
||||
golang.org/x/oauth2 v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250224174004-546df14abb99 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250224174004-546df14abb99 // indirect
|
||||
)
|
||||
|
60
go.sum
60
go.sum
@ -1,12 +1,10 @@
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@ -30,8 +28,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ=
|
||||
github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
@ -73,19 +71,17 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I=
|
||||
github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
|
||||
github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=
|
||||
github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.3 h1:cB1w4Zrk0O3jQBTcFMKqYQWRFfsSQ/TYKNyUUVyCP2c=
|
||||
github.com/hashicorp/go-msgpack/v2 v2.1.3/go.mod h1:SjlwKKFnwBXvxD/I1bEcfJIBbEJ+MCUn39TxymNR5ZU=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
@ -94,11 +90,10 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
|
||||
github.com/hashicorp/raft v1.7.2 h1:pyvxhfJ4R8VIAlHKvLoKQWElZspsCVT6YWuxVxsPAgc=
|
||||
github.com/hashicorp/raft v1.7.2/go.mod h1:DfvCGFxpAUPE0L4Uc8JLlTPtc3GzSbdH0MTJCLgnmJQ=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9 h1:DtRY4x+oreq0BTrrfF66XeCg6DPJuR2AL4Ejeipau/A=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9/go.mod h1:FLQZr+lEOtW/5JZQCqRihQOrmyqWRqpJ+pP1gjb8XTE=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0=
|
||||
github.com/hashicorp/raft-boltdb/v2 v2.3.1 h1:ackhdCNPKblmOhjEU9+4lHSJYFkJd6Jqyvj6eW9pwkc=
|
||||
github.com/hashicorp/raft-boltdb/v2 v2.3.1/go.mod h1:n4S+g43dXF1tqDT+yzcXHhXM6y7MrlUd3TTwGRcUvQE=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
@ -113,8 +108,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
@ -125,13 +120,12 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
@ -154,19 +148,17 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
|
||||
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
@ -174,7 +166,6 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
@ -197,8 +188,8 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk=
|
||||
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk=
|
||||
go.etcd.io/etcd/api/v3 v3.5.18 h1:Q4oDAKnmwqTo5lafvB+afbgCDF7E35E4EYV2g+FNGhs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.18/go.mod h1:uY03Ob2H50077J7Qq0DeehjM/A9S8PhVfbQ1mSaMopU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.18 h1:mZPOYw4h8rTk7TeJ5+3udUkfVGBqc+GCjOJYd68QgNM=
|
||||
@ -229,7 +220,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@ -243,12 +233,16 @@ golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
|
||||
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -257,7 +251,6 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -268,7 +261,6 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
@ -287,10 +279,10 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6 h1:L9JNMl/plZH9wmzQUHleO/ZZDSN+9Gh41wPczNy+5Fk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6/go.mod h1:iYONQfRdizDB8JJBybql13nArx91jcUk7zCXEsOofM4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250224174004-546df14abb99 h1:ilJhrCga0AptpJZXmUYG4MCrx/zf3l1okuYz7YK9PPw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250224174004-546df14abb99/go.mod h1:Xsh8gBVxGCcbV8ZeTB9wI5XPyZ5RvC6V3CTeeplHbiA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250224174004-546df14abb99 h1:ZSlhAUqC4r8TPzqLXQ0m3upBNZeF+Y8jQ3c4CR3Ujms=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250224174004-546df14abb99/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
|
||||
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
23
vendor/github.com/fatih/color/README.md
generated
vendored
23
vendor/github.com/fatih/color/README.md
generated
vendored
@ -9,7 +9,7 @@ suits you.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
```
|
||||
go get github.com/fatih/color
|
||||
```
|
||||
|
||||
@ -30,6 +30,18 @@ color.Magenta("And many others ..")
|
||||
|
||||
```
|
||||
|
||||
### RGB colors
|
||||
|
||||
If your terminal supports 24-bit colors, you can use RGB color codes.
|
||||
|
||||
```go
|
||||
color.RGB(255, 128, 0).Println("foreground orange")
|
||||
color.RGB(230, 42, 42).Println("foreground red")
|
||||
|
||||
color.BgRGB(255, 128, 0).Println("background orange")
|
||||
color.BgRGB(230, 42, 42).Println("background red")
|
||||
```
|
||||
|
||||
### Mix and reuse colors
|
||||
|
||||
```go
|
||||
@ -49,6 +61,11 @@ boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with white background.")
|
||||
|
||||
// Mix with RGB color codes
|
||||
color.RGB(255, 128, 0).AddBgRGB(0, 0, 0).Println("orange with black background")
|
||||
|
||||
color.BgRGB(255, 128, 0).AddRGB(255, 255, 255).Println("orange background with white foreground")
|
||||
```
|
||||
|
||||
### Use your own output (io.Writer)
|
||||
@ -161,10 +178,6 @@ c.Println("This prints again cyan...")
|
||||
|
||||
To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
|
||||
|
||||
## Todo
|
||||
|
||||
* Save/Return previous values
|
||||
* Evaluate fmt.Formatter interface
|
||||
|
||||
## Credits
|
||||
|
||||
|
91
vendor/github.com/fatih/color/color.go
generated
vendored
91
vendor/github.com/fatih/color/color.go
generated
vendored
@ -65,6 +65,29 @@ const (
|
||||
CrossedOut
|
||||
)
|
||||
|
||||
const (
|
||||
ResetBold Attribute = iota + 22
|
||||
ResetItalic
|
||||
ResetUnderline
|
||||
ResetBlinking
|
||||
_
|
||||
ResetReversed
|
||||
ResetConcealed
|
||||
ResetCrossedOut
|
||||
)
|
||||
|
||||
var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{
|
||||
Bold: ResetBold,
|
||||
Faint: ResetBold,
|
||||
Italic: ResetItalic,
|
||||
Underline: ResetUnderline,
|
||||
BlinkSlow: ResetBlinking,
|
||||
BlinkRapid: ResetBlinking,
|
||||
ReverseVideo: ResetReversed,
|
||||
Concealed: ResetConcealed,
|
||||
CrossedOut: ResetCrossedOut,
|
||||
}
|
||||
|
||||
// Foreground text colors
|
||||
const (
|
||||
FgBlack Attribute = iota + 30
|
||||
@ -75,6 +98,9 @@ const (
|
||||
FgMagenta
|
||||
FgCyan
|
||||
FgWhite
|
||||
|
||||
// used internally for 256 and 24-bit coloring
|
||||
foreground
|
||||
)
|
||||
|
||||
// Foreground Hi-Intensity text colors
|
||||
@ -99,6 +125,9 @@ const (
|
||||
BgMagenta
|
||||
BgCyan
|
||||
BgWhite
|
||||
|
||||
// used internally for 256 and 24-bit coloring
|
||||
background
|
||||
)
|
||||
|
||||
// Background Hi-Intensity text colors
|
||||
@ -127,6 +156,30 @@ func New(value ...Attribute) *Color {
|
||||
return c
|
||||
}
|
||||
|
||||
// RGB returns a new foreground color in 24-bit RGB.
|
||||
func RGB(r, g, b int) *Color {
|
||||
return New(foreground, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
}
|
||||
|
||||
// BgRGB returns a new background color in 24-bit RGB.
|
||||
func BgRGB(r, g, b int) *Color {
|
||||
return New(background, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
}
|
||||
|
||||
// AddRGB is used to chain foreground RGB SGR parameters. Use as many as parameters to combine
|
||||
// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0).
|
||||
func (c *Color) AddRGB(r, g, b int) *Color {
|
||||
c.params = append(c.params, foreground, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
return c
|
||||
}
|
||||
|
||||
// AddRGB is used to chain background RGB SGR parameters. Use as many as parameters to combine
|
||||
// and create custom color objects. Example: .Add(34, 0, 12).Add(255, 128, 0).
|
||||
func (c *Color) AddBgRGB(r, g, b int) *Color {
|
||||
c.params = append(c.params, background, 2, Attribute(r), Attribute(g), Attribute(b))
|
||||
return c
|
||||
}
|
||||
|
||||
// Set sets the given parameters immediately. It will change the color of
|
||||
// output with the given SGR parameters until color.Unset() is called.
|
||||
func Set(p ...Attribute) *Color {
|
||||
@ -246,10 +299,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
// On Windows, users should wrap w with colorable.NewColorable() if w is of
|
||||
// type *os.File.
|
||||
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
c.SetWriter(w)
|
||||
defer c.UnsetWriter(w)
|
||||
|
||||
return fmt.Fprintln(w, a...)
|
||||
return fmt.Fprintln(w, c.wrap(sprintln(a...)))
|
||||
}
|
||||
|
||||
// Println formats using the default formats for its operands and writes to
|
||||
@ -258,10 +308,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||
// color.
|
||||
func (c *Color) Println(a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprintln(Output, a...)
|
||||
return fmt.Fprintln(Output, c.wrap(sprintln(a...)))
|
||||
}
|
||||
|
||||
// Sprint is just like Print, but returns a string instead of printing it.
|
||||
@ -271,7 +318,7 @@ func (c *Color) Sprint(a ...interface{}) string {
|
||||
|
||||
// Sprintln is just like Println, but returns a string instead of printing it.
|
||||
func (c *Color) Sprintln(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintln(a...))
|
||||
return c.wrap(sprintln(a...)) + "\n"
|
||||
}
|
||||
|
||||
// Sprintf is just like Printf, but returns a string instead of printing it.
|
||||
@ -353,7 +400,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
|
||||
// string. Windows users should use this in conjunction with color.Output.
|
||||
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
|
||||
return func(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintln(a...))
|
||||
return c.wrap(sprintln(a...)) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
@ -383,7 +430,18 @@ func (c *Color) format() string {
|
||||
}
|
||||
|
||||
func (c *Color) unformat() string {
|
||||
return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||
//return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||
//for each element in sequence let's use the specific reset escape, or the generic one if not found
|
||||
format := make([]string, len(c.params))
|
||||
for i, v := range c.params {
|
||||
format[i] = strconv.Itoa(int(Reset))
|
||||
ra, ok := mapResetAttributes[v]
|
||||
if ok {
|
||||
format[i] = strconv.Itoa(int(ra))
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";"))
|
||||
}
|
||||
|
||||
// DisableColor disables the color output. Useful to not change any existing
|
||||
@ -411,6 +469,12 @@ func (c *Color) isNoColorSet() bool {
|
||||
|
||||
// Equals returns a boolean value indicating whether two colors are equal.
|
||||
func (c *Color) Equals(c2 *Color) bool {
|
||||
if c == nil && c2 == nil {
|
||||
return true
|
||||
}
|
||||
if c == nil || c2 == nil {
|
||||
return false
|
||||
}
|
||||
if len(c.params) != len(c2.params) {
|
||||
return false
|
||||
}
|
||||
@ -614,3 +678,8 @@ func HiCyanString(format string, a ...interface{}) string { return colorString(f
|
||||
func HiWhiteString(format string, a ...interface{}) string {
|
||||
return colorString(format, FgHiWhite, a...)
|
||||
}
|
||||
|
||||
// sprintln is a helper function to format a string with fmt.Sprintln and trim the trailing newline.
|
||||
func sprintln(a ...interface{}) string {
|
||||
return strings.TrimSuffix(fmt.Sprintln(a...), "\n")
|
||||
}
|
||||
|
22
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
22
vendor/github.com/hashicorp/go-hclog/intlogger.go
generated
vendored
@ -80,12 +80,13 @@ var _ Logger = &intLogger{}
|
||||
// intLogger is an internal logger implementation. Internal in that it is
|
||||
// defined entirely by this package.
|
||||
type intLogger struct {
|
||||
json bool
|
||||
callerOffset int
|
||||
name string
|
||||
timeFormat string
|
||||
timeFn TimeFunction
|
||||
disableTime bool
|
||||
json bool
|
||||
jsonEscapeEnabled bool
|
||||
callerOffset int
|
||||
name string
|
||||
timeFormat string
|
||||
timeFn TimeFunction
|
||||
disableTime bool
|
||||
|
||||
// This is an interface so that it's shared by any derived loggers, since
|
||||
// those derived loggers share the bufio.Writer as well.
|
||||
@ -173,6 +174,7 @@ func newLogger(opts *LoggerOptions) *intLogger {
|
||||
|
||||
l := &intLogger{
|
||||
json: opts.JSONFormat,
|
||||
jsonEscapeEnabled: !opts.JSONEscapeDisabled,
|
||||
name: opts.Name,
|
||||
timeFormat: TimeFormat,
|
||||
timeFn: time.Now,
|
||||
@ -667,13 +669,17 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a
|
||||
}
|
||||
}
|
||||
|
||||
err := json.NewEncoder(l.writer).Encode(vals)
|
||||
encoder := json.NewEncoder(l.writer)
|
||||
encoder.SetEscapeHTML(l.jsonEscapeEnabled)
|
||||
err := encoder.Encode(vals)
|
||||
if err != nil {
|
||||
if _, ok := err.(*json.UnsupportedTypeError); ok {
|
||||
plainVal := l.jsonMapEntry(t, name, level, msg)
|
||||
plainVal["@warn"] = errJsonUnsupportedTypeMsg
|
||||
|
||||
json.NewEncoder(l.writer).Encode(plainVal)
|
||||
errEncoder := json.NewEncoder(l.writer)
|
||||
errEncoder.SetEscapeHTML(l.jsonEscapeEnabled)
|
||||
errEncoder.Encode(plainVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
3
vendor/github.com/hashicorp/go-hclog/logger.go
generated
vendored
@ -264,6 +264,9 @@ type LoggerOptions struct {
|
||||
// Control if the output should be in JSON.
|
||||
JSONFormat bool
|
||||
|
||||
// Control the escape switch of json.Encoder
|
||||
JSONEscapeDisabled bool
|
||||
|
||||
// Include file and line information in each log line
|
||||
IncludeLocation bool
|
||||
|
||||
|
8
vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go
generated
vendored
8
vendor/github.com/hashicorp/go-msgpack/v2/codec/rpc.go
generated
vendored
@ -162,7 +162,13 @@ func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
|
||||
}
|
||||
|
||||
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
|
||||
return c.write(r, body, true)
|
||||
err := c.write(r, body, true)
|
||||
if err != nil {
|
||||
// If error occurred writing a response, close the underlying connection.
|
||||
// See hashicorp/net-rpc-msgpackrpc#15
|
||||
c.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
|
||||
|
140
vendor/github.com/klauspost/compress/README.md
generated
vendored
140
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -14,8 +14,34 @@ This package provides various compression algorithms.
|
||||
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
|
||||
|
||||
# package usage
|
||||
|
||||
Use `go get github.com/klauspost/compress@latest` to add it to your project.
|
||||
|
||||
This package will support the current Go version and 2 versions back.
|
||||
|
||||
* Use the `nounsafe` tag to disable all use of the "unsafe" package.
|
||||
* Use the `noasm` tag to disable all assembly across packages.
|
||||
|
||||
Use the links above for more information on each.
|
||||
|
||||
# changelog
|
||||
|
||||
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
|
||||
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
|
||||
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
|
||||
* flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
|
||||
* flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
|
||||
* s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
|
||||
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
|
||||
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
|
||||
|
||||
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
|
||||
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
|
||||
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
|
||||
* gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
|
||||
* gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
|
||||
|
||||
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
|
||||
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
|
||||
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
|
||||
@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
|
||||
|
||||
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
|
||||
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
|
||||
* s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
|
||||
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
|
||||
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
|
||||
* s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
|
||||
|
||||
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
|
||||
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
|
||||
@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
<summary>See changes to v1.15.x</summary>
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
|
||||
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
|
||||
@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
|
||||
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
|
||||
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
|
||||
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
|
||||
* zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
|
||||
|
||||
* July 13, 2022 (v1.15.8)
|
||||
|
||||
@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
|
||||
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
|
||||
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
|
||||
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
|
||||
* flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
|
||||
|
||||
|
||||
* May 11, 2022 (v1.15.4)
|
||||
@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
|
||||
|
||||
* Mar 3, 2022 (v1.15.0)
|
||||
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||
* zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
|
||||
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
|
||||
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
|
||||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||
* flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
|
||||
* gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
|
||||
|
||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||
|
||||
@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
|
||||
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||
* huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||
|
||||
* Feb 17, 2022 (v1.14.3)
|
||||
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||
@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when
|
||||
|
||||
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||
|
||||
| old import | new import | Documentation
|
||||
|--------------------|-----------------------------------------|--------------------|
|
||||
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
|
||||
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
|
||||
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
|
||||
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
|
||||
Typical speed is about 2x of the standard library packages.
|
||||
|
||||
| old import | new import | Documentation |
|
||||
|------------------|---------------------------------------|-------------------------------------------------------------------------|
|
||||
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
|
||||
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
|
||||
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
|
||||
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
|
||||
|
||||
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||
|
||||
@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle.
|
||||
Compression is almost always worse than the fastest compression level
|
||||
and each write will allocate (a little) memory.
|
||||
|
||||
# Performance Update 2018
|
||||
|
||||
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
|
||||
|
||||
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
|
||||
|
||||
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
|
||||
|
||||
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
|
||||
|
||||
|
||||
## Overall differences.
|
||||
|
||||
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
|
||||
|
||||
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
|
||||
|
||||
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
|
||||
|
||||
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
|
||||
|
||||
## Web Content
|
||||
|
||||
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
|
||||
|
||||
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
|
||||
|
||||
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
|
||||
|
||||
## Object files
|
||||
|
||||
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
|
||||
|
||||
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
|
||||
|
||||
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
|
||||
|
||||
## Highly Compressible File
|
||||
|
||||
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
|
||||
|
||||
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
|
||||
|
||||
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
|
||||
|
||||
## Medium-High Compressible
|
||||
|
||||
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
|
||||
|
||||
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
|
||||
|
||||
## Medium Compressible
|
||||
|
||||
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
|
||||
|
||||
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
|
||||
|
||||
|
||||
## Un-compressible Content
|
||||
|
||||
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
|
||||
|
||||
|
||||
## Huffman only compression
|
||||
|
||||
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
|
||||
|
||||
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
|
||||
|
||||
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
|
||||
|
||||
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
|
||||
|
||||
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
|
||||
|
||||
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
||||
|
||||
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
|
||||
|
||||
# Other packages
|
||||
|
||||
|
25
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
25
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@ -6,10 +6,11 @@
|
||||
package huff0
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// peekBitsFast requires that at least one bit is requested every time.
|
||||
// peekByteFast requires that at least one byte is requested every time.
|
||||
// There are no checks if the buffer is filled.
|
||||
func (b *bitReaderBytes) peekByteFast() uint8 {
|
||||
got := uint8(b.value >> 56)
|
||||
@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() {
|
||||
}
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() {
|
||||
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReaderBytes) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
b.value = le.Load64(b.in, b.off-8)
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
if b.off >= 4 {
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() {
|
||||
return
|
||||
}
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() {
|
||||
|
||||
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReaderShifted) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
b.value = le.Load64(b.in, b.off-8)
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
|
5
vendor/github.com/klauspost/compress/internal/le/le.go
generated
vendored
Normal file
5
vendor/github.com/klauspost/compress/internal/le/le.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package le
|
||||
|
||||
type Indexer interface {
|
||||
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
|
||||
}
|
42
vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
generated
vendored
Normal file
42
vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
|
||||
|
||||
package le
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// Load8 will load from b at index i.
|
||||
func Load8[I Indexer](b []byte, i I) byte {
|
||||
return b[i]
|
||||
}
|
||||
|
||||
// Load16 will load from b at index i.
|
||||
func Load16[I Indexer](b []byte, i I) uint16 {
|
||||
return binary.LittleEndian.Uint16(b[i:])
|
||||
}
|
||||
|
||||
// Load32 will load from b at index i.
|
||||
func Load32[I Indexer](b []byte, i I) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
}
|
||||
|
||||
// Load64 will load from b at index i.
|
||||
func Load64[I Indexer](b []byte, i I) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
// Store16 will store v at b.
|
||||
func Store16(b []byte, v uint16) {
|
||||
binary.LittleEndian.PutUint16(b, v)
|
||||
}
|
||||
|
||||
// Store32 will store v at b.
|
||||
func Store32(b []byte, v uint32) {
|
||||
binary.LittleEndian.PutUint32(b, v)
|
||||
}
|
||||
|
||||
// Store64 will store v at b.
|
||||
func Store64(b []byte, v uint64) {
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
}
|
55
vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
generated
vendored
Normal file
55
vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// We enable 64 bit LE platforms:
|
||||
|
||||
//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
|
||||
|
||||
package le
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Load8 will load from b at index i.
|
||||
func Load8[I Indexer](b []byte, i I) byte {
|
||||
//return binary.LittleEndian.Uint16(b[i:])
|
||||
//return *(*uint16)(unsafe.Pointer(&b[i]))
|
||||
return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Load16 will load from b at index i.
|
||||
func Load16[I Indexer](b []byte, i I) uint16 {
|
||||
//return binary.LittleEndian.Uint16(b[i:])
|
||||
//return *(*uint16)(unsafe.Pointer(&b[i]))
|
||||
return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Load32 will load from b at index i.
|
||||
func Load32[I Indexer](b []byte, i I) uint32 {
|
||||
//return binary.LittleEndian.Uint32(b[i:])
|
||||
//return *(*uint32)(unsafe.Pointer(&b[i]))
|
||||
return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Load64 will load from b at index i.
|
||||
func Load64[I Indexer](b []byte, i I) uint64 {
|
||||
//return binary.LittleEndian.Uint64(b[i:])
|
||||
//return *(*uint64)(unsafe.Pointer(&b[i]))
|
||||
return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Store16 will store v at b.
|
||||
func Store16(b []byte, v uint16) {
|
||||
//binary.LittleEndian.PutUint16(b, v)
|
||||
*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
||||
|
||||
// Store32 will store v at b.
|
||||
func Store32(b []byte, v uint32) {
|
||||
//binary.LittleEndian.PutUint32(b, v)
|
||||
*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
||||
|
||||
// Store64 will store v at b.
|
||||
func Store64(b []byte, v uint64) {
|
||||
//binary.LittleEndian.PutUint64(b, v)
|
||||
*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee
|
||||
|
||||
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
|
||||
|
||||
This package is pure Go and without use of "unsafe".
|
||||
This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
|
||||
|
||||
The `zstd` package is provided as open source software using a Go standard license.
|
||||
|
||||
|
37
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
37
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
@ -5,11 +5,12 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
@ -18,6 +19,7 @@ import (
|
||||
type bitReader struct {
|
||||
in []byte
|
||||
value uint64 // Maybe use [16]byte, but shifting is awkward.
|
||||
cursor int // offset where next read should end
|
||||
bitsRead uint8
|
||||
}
|
||||
|
||||
@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error {
|
||||
if v == 0 {
|
||||
return errors.New("corrupt stream, did not find end of stream")
|
||||
}
|
||||
b.cursor = len(in)
|
||||
b.bitsRead = 64
|
||||
b.value = 0
|
||||
if len(in) >= 8 {
|
||||
@ -67,18 +70,15 @@ func (b *bitReader) fillFast() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.cursor -= 4
|
||||
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
|
||||
b.bitsRead -= 32
|
||||
}
|
||||
|
||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReader) fillFastStart() {
|
||||
v := b.in[len(b.in)-8:]
|
||||
b.in = b.in[:len(b.in)-8]
|
||||
b.value = binary.LittleEndian.Uint64(v)
|
||||
b.cursor -= 8
|
||||
b.value = le.Load64(b.in, b.cursor)
|
||||
b.bitsRead = 0
|
||||
}
|
||||
|
||||
@ -87,25 +87,23 @@ func (b *bitReader) fill() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if len(b.in) >= 4 {
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
if b.cursor >= 4 {
|
||||
b.cursor -= 4
|
||||
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
|
||||
b.bitsRead -= 32
|
||||
return
|
||||
}
|
||||
|
||||
b.bitsRead -= uint8(8 * len(b.in))
|
||||
for len(b.in) > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
|
||||
b.in = b.in[:len(b.in)-1]
|
||||
b.bitsRead -= uint8(8 * b.cursor)
|
||||
for b.cursor > 0 {
|
||||
b.cursor -= 1
|
||||
b.value = (b.value << 8) | uint64(b.in[b.cursor])
|
||||
}
|
||||
}
|
||||
|
||||
// finished returns true if all bits have been read from the bit stream.
|
||||
func (b *bitReader) finished() bool {
|
||||
return len(b.in) == 0 && b.bitsRead >= 64
|
||||
return b.cursor == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
// overread returns true if more bits have been requested than is on the stream.
|
||||
@ -115,13 +113,14 @@ func (b *bitReader) overread() bool {
|
||||
|
||||
// remain returns the number of bits remaining.
|
||||
func (b *bitReader) remain() uint {
|
||||
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
|
||||
return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReader) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
b.cursor = 0
|
||||
if !b.finished() {
|
||||
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
|
||||
}
|
||||
|
19
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
19
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -5,14 +5,10 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/huff0"
|
||||
@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
println("initializing sequences:", err)
|
||||
return err
|
||||
}
|
||||
// Extract blocks...
|
||||
if false && hist.dict == nil {
|
||||
fatalErr := func(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
|
||||
var buf bytes.Buffer
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
||||
buf.Write(in)
|
||||
os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
27
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
27
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
"slices"
|
||||
|
||||
"github.com/klauspost/compress/huff0"
|
||||
)
|
||||
@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int {
|
||||
// All 0
|
||||
return 0
|
||||
}
|
||||
maxCount := func(a []uint32) int {
|
||||
var max uint32
|
||||
for _, v := range a {
|
||||
if v > max {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return int(max)
|
||||
}
|
||||
cnt := maxCount(hist[:maxSym])
|
||||
cnt := int(slices.Max(hist[:maxSym]))
|
||||
if cnt == len(data) {
|
||||
// RLE
|
||||
return 0
|
||||
@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() {
|
||||
}
|
||||
}
|
||||
}
|
||||
maxCount := func(a []uint32) int {
|
||||
var max uint32
|
||||
for _, v := range a {
|
||||
if v > max {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return int(max)
|
||||
}
|
||||
if debugAsserts && mlMax > maxMatchLengthSymbol {
|
||||
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
|
||||
}
|
||||
@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() {
|
||||
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
|
||||
}
|
||||
|
||||
b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
|
||||
b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
|
||||
b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
|
||||
b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
|
||||
b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
|
||||
b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
|
||||
}
|
||||
|
3
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
||||
}
|
||||
|
||||
// Read bytes from the decompressed stream into p.
|
||||
// Returns the number of bytes written and any error that occurred.
|
||||
// Returns the number of bytes read and any error that occurred.
|
||||
// When the stream is done, io.EOF will be returned.
|
||||
func (d *Decoder) Read(p []byte) (int, error) {
|
||||
var n int
|
||||
@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
frame.bBuf = nil
|
||||
if frame.history.decoders.br != nil {
|
||||
frame.history.decoders.br.in = nil
|
||||
frame.history.decoders.br.cursor = 0
|
||||
}
|
||||
d.decoders <- block
|
||||
}()
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
||||
panic(err)
|
||||
}
|
||||
if t < 0 {
|
||||
err := fmt.Sprintf("s (%d) < 0", s)
|
||||
err := fmt.Sprintf("t (%d) < 0", t)
|
||||
panic(err)
|
||||
}
|
||||
if s-t > e.maxMatchOff {
|
||||
|
11
vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
generated
vendored
@ -7,20 +7,25 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// matchLen returns the maximum common prefix length of a and b.
|
||||
// a must be the shortest of the two.
|
||||
func matchLen(a, b []byte) (n int) {
|
||||
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||
left := len(a)
|
||||
for left >= 8 {
|
||||
diff := le.Load64(a, n) ^ le.Load64(b, n)
|
||||
if diff != 0 {
|
||||
return n + bits.TrailingZeros64(diff)>>3
|
||||
}
|
||||
n += 8
|
||||
left -= 8
|
||||
}
|
||||
a = a[n:]
|
||||
b = b[n:]
|
||||
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var ll, mo, ml int
|
||||
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||
if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
}
|
||||
for i := range seqs {
|
||||
var ll, mo, ml int
|
||||
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||
if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/seqenc.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/seqenc.go
generated
vendored
@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{
|
||||
func llCode(litLength uint32) uint8 {
|
||||
const llDeltaCode = 19
|
||||
if litLength <= 63 {
|
||||
// Compiler insists on bounds check (Go 1.12)
|
||||
return llCodeTable[litLength&63]
|
||||
}
|
||||
return uint8(highBit(litLength)) + llDeltaCode
|
||||
@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{
|
||||
func mlCode(mlBase uint32) uint8 {
|
||||
const mlDeltaCode = 36
|
||||
if mlBase <= 127 {
|
||||
// Compiler insists on bounds check (Go 1.12)
|
||||
return mlCodeTable[mlBase&127]
|
||||
}
|
||||
return uint8(highBit(mlBase)) + mlDeltaCode
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
||||
|
||||
n, r.err = w.Write(r.block.output)
|
||||
if r.err != nil {
|
||||
return written, err
|
||||
return written, r.err
|
||||
}
|
||||
written += int64(n)
|
||||
continue
|
||||
@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
||||
}
|
||||
n, r.err = w.Write(r.block.output)
|
||||
if r.err != nil {
|
||||
return written, err
|
||||
return written, r.err
|
||||
}
|
||||
written += int64(n)
|
||||
continue
|
||||
|
7
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -5,10 +5,11 @@ package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// enable debug printing
|
||||
@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) {
|
||||
}
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
|
||||
return le.Load32(b, i)
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
|
||||
return le.Load64(b, i)
|
||||
}
|
||||
|
||||
type byter interface {
|
||||
|
38
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
38
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
_ "github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NewColorable returns new instance of Writer which handles escape sequence.
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
||||
|
||||
// EnableColorsStdout enable colors if possible.
|
||||
func EnableColorsStdout(enabled *bool) func() {
|
||||
if enabled != nil {
|
||||
*enabled = true
|
||||
}
|
||||
return func() {}
|
||||
}
|
4
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
4
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build !windows && !appengine
|
||||
// +build !windows,!appengine
|
||||
//go:build !windows || appengine
|
||||
// +build !windows appengine
|
||||
|
||||
package colorable
|
||||
|
||||
|
22
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
22
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
@ -11,7 +11,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
syscall "golang.org/x/sys/windows"
|
||||
"unsafe"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
@ -73,7 +73,7 @@ type consoleCursorInfo struct {
|
||||
}
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
kernel32 = syscall.NewLazySystemDLL("kernel32.dll")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
@ -87,8 +87,8 @@ var (
|
||||
procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer")
|
||||
)
|
||||
|
||||
// Writer provides colorable Writer to the console
|
||||
type Writer struct {
|
||||
// writer provides colorable Writer to the console
|
||||
type writer struct {
|
||||
out io.Writer
|
||||
handle syscall.Handle
|
||||
althandle syscall.Handle
|
||||
@ -98,7 +98,7 @@ type Writer struct {
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewColorable returns new instance of Writer which handles escape sequence from File.
|
||||
// NewColorable returns new instance of writer which handles escape sequence from File.
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
@ -112,17 +112,17 @@ func NewColorable(file *os.File) io.Writer {
|
||||
var csbi consoleScreenBufferInfo
|
||||
handle := syscall.Handle(file.Fd())
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
|
||||
return &writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||
// NewColorableStdout returns new instance of writer which handles escape sequence for stdout.
|
||||
func NewColorableStdout() io.Writer {
|
||||
return NewColorable(os.Stdout)
|
||||
}
|
||||
|
||||
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||
// NewColorableStderr returns new instance of writer which handles escape sequence for stderr.
|
||||
func NewColorableStderr() io.Writer {
|
||||
return NewColorable(os.Stderr)
|
||||
}
|
||||
@ -434,7 +434,7 @@ func atoiWithDefault(s string, def int) (int, error) {
|
||||
}
|
||||
|
||||
// Write writes data on console
|
||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
func (w *writer) Write(data []byte) (n int, err error) {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
var csbi consoleScreenBufferInfo
|
||||
@ -560,7 +560,7 @@ loop:
|
||||
}
|
||||
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'E':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
n, err = atoiWithDefault(buf.String(), 1)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -569,7 +569,7 @@ loop:
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'F':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
n, err = atoiWithDefault(buf.String(), 1)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
3
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
3
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo
|
||||
// +build darwin freebsd openbsd netbsd dragonfly hurd
|
||||
// +build !appengine
|
||||
// +build !tinygo
|
||||
|
||||
package isatty
|
||||
|
||||
|
5
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
5
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
@ -1,5 +1,6 @@
|
||||
//go:build appengine || js || nacl || wasm
|
||||
// +build appengine js nacl wasm
|
||||
//go:build (appengine || js || nacl || tinygo || wasm) && !windows
|
||||
// +build appengine js nacl tinygo wasm
|
||||
// +build !windows
|
||||
|
||||
package isatty
|
||||
|
||||
|
3
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
3
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
//go:build (linux || aix || zos) && !appengine
|
||||
//go:build (linux || aix || zos) && !appengine && !tinygo
|
||||
// +build linux aix zos
|
||||
// +build !appengine
|
||||
// +build !tinygo
|
||||
|
||||
package isatty
|
||||
|
||||
|
8
vendor/github.com/prometheus/client_golang/api/client.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/api/client.go
generated
vendored
@ -79,6 +79,10 @@ type Client interface {
|
||||
Do(context.Context, *http.Request) (*http.Response, []byte, error)
|
||||
}
|
||||
|
||||
type CloseIdler interface {
|
||||
CloseIdleConnections()
|
||||
}
|
||||
|
||||
// NewClient returns a new Client.
|
||||
//
|
||||
// It is safe to use the returned Client from multiple goroutines.
|
||||
@ -118,6 +122,10 @@ func (c *httpClient) URL(ep string, args map[string]string) *url.URL {
|
||||
return &u
|
||||
}
|
||||
|
||||
func (c *httpClient) CloseIdleConnections() {
|
||||
c.client.CloseIdleConnections()
|
||||
}
|
||||
|
||||
func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||
if ctx != nil {
|
||||
req = req.WithContext(ctx)
|
||||
|
50
vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go
generated
vendored
Normal file
50
vendor/github.com/prometheus/client_golang/prometheus/atomic_update.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// atomicUpdateFloat atomically updates the float64 value pointed to by bits
|
||||
// using the provided updateFunc, with an exponential backoff on contention.
|
||||
func atomicUpdateFloat(bits *uint64, updateFunc func(float64) float64) {
|
||||
const (
|
||||
// both numbers are derived from empirical observations
|
||||
// documented in this PR: https://github.com/prometheus/client_golang/pull/1661
|
||||
maxBackoff = 320 * time.Millisecond
|
||||
initialBackoff = 10 * time.Millisecond
|
||||
)
|
||||
backoff := initialBackoff
|
||||
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
oldFloat := math.Float64frombits(loadedBits)
|
||||
newFloat := updateFunc(oldFloat)
|
||||
newBits := math.Float64bits(newFloat)
|
||||
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
} else {
|
||||
// Exponential backoff with sleep and cap to avoid infinite wait
|
||||
time.Sleep(backoff)
|
||||
backoff *= 2
|
||||
if backoff > maxBackoff {
|
||||
backoff = maxBackoff
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
10
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -134,13 +134,9 @@ func (c *counter) Add(v float64) {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&c.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&c.valBits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
}
|
||||
|
||||
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||
|
15
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
15
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -189,12 +189,15 @@ func (d *Desc) String() string {
|
||||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
||||
)
|
||||
}
|
||||
vlStrings := make([]string, 0, len(d.variableLabels.names))
|
||||
for _, vl := range d.variableLabels.names {
|
||||
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
|
||||
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
|
||||
} else {
|
||||
vlStrings = append(vlStrings, vl)
|
||||
vlStrings := []string{}
|
||||
if d.variableLabels != nil {
|
||||
vlStrings = make([]string, 0, len(d.variableLabels.names))
|
||||
for _, vl := range d.variableLabels.names {
|
||||
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
|
||||
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
|
||||
} else {
|
||||
vlStrings = append(vlStrings, vl)
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
|
10
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@ -120,13 +120,9 @@ func (g *gauge) Dec() {
|
||||
}
|
||||
|
||||
func (g *gauge) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&g.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&g.valBits, func(oldVal float64) float64 {
|
||||
return oldVal + val
|
||||
})
|
||||
}
|
||||
|
||||
func (g *gauge) Sub(val float64) {
|
||||
|
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
||||
}
|
||||
|
||||
func attachOriginalName(desc, origName string) string {
|
||||
return fmt.Sprintf("%s Sourced from %s", desc, origName)
|
||||
return fmt.Sprintf("%s Sourced from %s.", desc, origName)
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
|
259
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
259
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -14,6 +14,7 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
@ -28,6 +29,11 @@ import (
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
nativeHistogramSchemaMaximum = 8
|
||||
nativeHistogramSchemaMinimum = -4
|
||||
)
|
||||
|
||||
// nativeHistogramBounds for the frac of observed values. Only relevant for
|
||||
// schema > 0. The position in the slice is the schema. (0 is never used, just
|
||||
// here for convenience of using the schema directly as the index.)
|
||||
@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
||||
// used for the Buckets field of HistogramOpts.
|
||||
//
|
||||
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
|
||||
func ExponentialBucketsRange(min, max float64, count int) []float64 {
|
||||
func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
|
||||
if count < 1 {
|
||||
panic("ExponentialBucketsRange count needs a positive count")
|
||||
}
|
||||
if min <= 0 {
|
||||
if minBucket <= 0 {
|
||||
panic("ExponentialBucketsRange min needs to be greater than 0")
|
||||
}
|
||||
|
||||
@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 {
|
||||
// max = min*growthFactor^(bucketCount-1)
|
||||
|
||||
// We know max/min and highest bucket. Solve for growthFactor.
|
||||
growthFactor := math.Pow(max/min, 1.0/float64(count-1))
|
||||
growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
|
||||
|
||||
// Now that we know growthFactor, solve for each bucket.
|
||||
buckets := make([]float64, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
|
||||
buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
|
||||
}
|
||||
return buckets
|
||||
}
|
||||
@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||
// findBucket returns the index of the bucket for the provided value, or
|
||||
// len(h.upperBounds) for the +Inf bucket.
|
||||
func (h *histogram) findBucket(v float64) int {
|
||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||
// slightly faster than the binary search. If we really care, we could
|
||||
// switch from one search strategy to the other depending on the number
|
||||
// of buckets.
|
||||
//
|
||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
n := len(h.upperBounds)
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Early exit: if v is less than or equal to the first upper bound, return 0
|
||||
if v <= h.upperBounds[0] {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
|
||||
if v > h.upperBounds[n-1] {
|
||||
return n
|
||||
}
|
||||
|
||||
// For small arrays, use simple linear search
|
||||
// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
|
||||
// see more details here: https://github.com/prometheus/client_golang/pull/1662
|
||||
if n < 35 {
|
||||
for i, bound := range h.upperBounds {
|
||||
if v <= bound {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// If v is greater than all upper bounds, return len(h.upperBounds)
|
||||
return n
|
||||
}
|
||||
|
||||
// For larger arrays, use stdlib's binary search
|
||||
return sort.SearchFloat64s(h.upperBounds, v)
|
||||
}
|
||||
|
||||
@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 {
|
||||
floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
|
||||
switch {
|
||||
case floor <= -8:
|
||||
return 8
|
||||
return nativeHistogramSchemaMaximum
|
||||
case floor >= 4:
|
||||
return -4
|
||||
return nativeHistogramSchemaMinimum
|
||||
default:
|
||||
return -int32(floor)
|
||||
}
|
||||
@ -1621,13 +1647,9 @@ func waitForCooldown(count uint64, counts *histogramCounts) {
|
||||
// atomicAddFloat adds the provided float atomically to another float
|
||||
// represented by the bit pattern the bits pointer is pointing to.
|
||||
func atomicAddFloat(bits *uint64, v float64) {
|
||||
for {
|
||||
loadedBits := atomic.LoadUint64(bits)
|
||||
newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
|
||||
if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(bits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
}
|
||||
|
||||
// atomicDecUint32 atomically decrements the uint32 p points to. See
|
||||
@ -1835,3 +1857,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
|
||||
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
|
||||
}
|
||||
}
|
||||
|
||||
type constNativeHistogram struct {
|
||||
desc *Desc
|
||||
dto.Histogram
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
|
||||
var bucketPopulationSum int64
|
||||
for _, v := range positiveBuckets {
|
||||
bucketPopulationSum += v
|
||||
}
|
||||
for _, v := range negativeBuckets {
|
||||
bucketPopulationSum += v
|
||||
}
|
||||
bucketPopulationSum += int64(zeroBucket)
|
||||
|
||||
// If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
|
||||
// Otherwise, the number of observations must be equal to the sum of all bucket counts .
|
||||
|
||||
if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
|
||||
!math.IsNaN(sum) && bucketPopulationSum != int64(count) {
|
||||
return errors.New("the sum of all bucket populations exceeds the count of observations")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
|
||||
// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
|
||||
// cannot be changed, the returned value does not implement the Histogram
|
||||
// interface (but only the Metric interface). Users of this package will not
|
||||
// have much use for it in regular operations. However, when implementing custom
|
||||
// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
|
||||
// to send it to Prometheus in the Collect method.
|
||||
//
|
||||
// zeroBucket counts all (positive and negative)
|
||||
// observations in the zero bucket (with an absolute value less or equal
|
||||
// the current threshold).
|
||||
// positiveBuckets and negativeBuckets are separate maps for negative and positive
|
||||
// observations. The map's value is an int64, counting observations in
|
||||
// that bucket. The map's key is the
|
||||
// index of the bucket according to the used
|
||||
// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
|
||||
// NewConstNativeHistogram returns an error if
|
||||
// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
|
||||
// - the schema passed is not between 8 and -4
|
||||
// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
|
||||
//
|
||||
// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
|
||||
func NewConstNativeHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
positiveBuckets, negativeBuckets map[int]int64,
|
||||
zeroBucket uint64,
|
||||
schema int32,
|
||||
zeroThreshold float64,
|
||||
createdTimestamp time.Time,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
|
||||
return nil, errors.New("invalid native histogram schema")
|
||||
}
|
||||
if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
|
||||
PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
|
||||
ret := &constNativeHistogram{
|
||||
desc: desc,
|
||||
Histogram: dto.Histogram{
|
||||
CreatedTimestamp: timestamppb.New(createdTimestamp),
|
||||
Schema: &schema,
|
||||
ZeroThreshold: &zeroThreshold,
|
||||
SampleCount: &count,
|
||||
SampleSum: &sum,
|
||||
|
||||
NegativeSpan: NegativeSpan,
|
||||
NegativeDelta: NegativeDelta,
|
||||
|
||||
PositiveSpan: PositiveSpan,
|
||||
PositiveDelta: PositiveDelta,
|
||||
|
||||
ZeroCount: proto.Uint64(zeroBucket),
|
||||
},
|
||||
labelPairs: MakeLabelPairs(desc, labelValues),
|
||||
}
|
||||
if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
|
||||
ret.PositiveSpan = []*dto.BucketSpan{{
|
||||
Offset: proto.Int32(0),
|
||||
Length: proto.Uint32(0),
|
||||
}}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
|
||||
// NewConstNativeHistogram would have returned an error.
|
||||
func MustNewConstNativeHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
sum float64,
|
||||
positiveBuckets, negativeBuckets map[int]int64,
|
||||
zeroBucket uint64,
|
||||
nativeHistogramSchema int32,
|
||||
nativeHistogramZeroThreshold float64,
|
||||
createdTimestamp time.Time,
|
||||
labelValues ...string,
|
||||
) Metric {
|
||||
nativehistogram, err := NewConstNativeHistogram(desc,
|
||||
count,
|
||||
sum,
|
||||
positiveBuckets,
|
||||
negativeBuckets,
|
||||
zeroBucket,
|
||||
nativeHistogramSchema,
|
||||
nativeHistogramZeroThreshold,
|
||||
createdTimestamp,
|
||||
labelValues...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nativehistogram
|
||||
}
|
||||
|
||||
func (h *constNativeHistogram) Desc() *Desc {
|
||||
return h.desc
|
||||
}
|
||||
|
||||
func (h *constNativeHistogram) Write(out *dto.Metric) error {
|
||||
out.Histogram = &h.Histogram
|
||||
out.Label = h.labelPairs
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
|
||||
if len(buckets) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var ii []int
|
||||
for k := range buckets {
|
||||
ii = append(ii, k)
|
||||
}
|
||||
sort.Ints(ii)
|
||||
|
||||
var (
|
||||
spans []*dto.BucketSpan
|
||||
deltas []int64
|
||||
prevCount int64
|
||||
nextI int
|
||||
)
|
||||
|
||||
appendDelta := func(count int64) {
|
||||
*spans[len(spans)-1].Length++
|
||||
deltas = append(deltas, count-prevCount)
|
||||
prevCount = count
|
||||
}
|
||||
|
||||
for n, i := range ii {
|
||||
count := buckets[i]
|
||||
// Multiple spans with only small gaps in between are probably
|
||||
// encoded more efficiently as one larger span with a few empty
|
||||
// buckets. Needs some research to find the sweet spot. For now,
|
||||
// we assume that gaps of one or two buckets should not create
|
||||
// a new span.
|
||||
iDelta := int32(i - nextI)
|
||||
if n == 0 || iDelta > 2 {
|
||||
// We have to create a new span, either because we are
|
||||
// at the very beginning, or because we have found a gap
|
||||
// of more than two buckets.
|
||||
spans = append(spans, &dto.BucketSpan{
|
||||
Offset: proto.Int32(iDelta),
|
||||
Length: proto.Uint32(0),
|
||||
})
|
||||
} else {
|
||||
// We have found a small gap (or no gap at all).
|
||||
// Insert empty buckets as needed.
|
||||
for j := int32(0); j < iDelta; j++ {
|
||||
appendDelta(0)
|
||||
}
|
||||
}
|
||||
appendDelta(count)
|
||||
nextI = i + 1
|
||||
}
|
||||
return spans, deltas
|
||||
}
|
||||
|
19
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
19
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
@ -22,17 +22,18 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
func minInt(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
func maxInt(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
if codes[0].Tag == 'e' {
|
||||
c := codes[0]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||
codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
|
||||
}
|
||||
if codes[len(codes)-1].Tag == 'e' {
|
||||
c := codes[len(codes)-1]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
|
||||
}
|
||||
nn := n + n
|
||||
groups := [][]OpCode{}
|
||||
@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
// there is a large range with no changes.
|
||||
if c.Tag == 'e' && i2-i1 > nn {
|
||||
group = append(group, OpCode{
|
||||
c.Tag, i1, min(i2, i1+n),
|
||||
j1, min(j2, j1+n),
|
||||
c.Tag, i1, minInt(i2, i1+n),
|
||||
j1, minInt(j2, j1+n),
|
||||
})
|
||||
groups = append(groups, group)
|
||||
group = []OpCode{}
|
||||
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||
i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 {
|
||||
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||
la, lb := len(m.a), len(m.b)
|
||||
return calculateRatio(min(la, lb), la+lb)
|
||||
return calculateRatio(minInt(la, lb), la+lb)
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format
|
||||
@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string {
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
return strconv.Itoa(beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning-- // empty ranges begin at line just before the range
|
||||
|
3
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
3
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
|
||||
name += "_total"
|
||||
}
|
||||
|
||||
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
|
||||
// Our current conversion moves to legacy naming, so use legacy validation.
|
||||
valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name)
|
||||
switch d.Kind {
|
||||
case metrics.KindUint64:
|
||||
case metrics.KindFloat64:
|
||||
|
24
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
switch {
|
||||
case namespace != "" && subsystem != "":
|
||||
return strings.Join([]string{namespace, subsystem, name}, "_")
|
||||
case namespace != "":
|
||||
return strings.Join([]string{namespace, name}, "_")
|
||||
case subsystem != "":
|
||||
return strings.Join([]string{subsystem, name}, "_")
|
||||
|
||||
sb := strings.Builder{}
|
||||
sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
|
||||
|
||||
if namespace != "" {
|
||||
sb.WriteString(namespace)
|
||||
sb.WriteString("_")
|
||||
}
|
||||
return name
|
||||
|
||||
if subsystem != "" {
|
||||
sb.WriteString(subsystem)
|
||||
sb.WriteString("_")
|
||||
}
|
||||
|
||||
sb.WriteString(name)
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type invalidMetric struct {
|
||||
|
31
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
31
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
|
||||
type processCollector struct {
|
||||
collectFn func(chan<- Metric)
|
||||
describeFn func(chan<- *Desc)
|
||||
pidFn func() (int, error)
|
||||
reportErrors bool
|
||||
cpuTotal *Desc
|
||||
@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
if canCollectProcess() {
|
||||
c.collectFn = c.processCollect
|
||||
c.describeFn = c.describe
|
||||
} else {
|
||||
c.collectFn = func(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
c.collectFn = c.errorCollectFn
|
||||
c.describeFn = c.errorDescribeFn
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
func (c *processCollector) errorCollectFn(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
|
||||
func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
|
||||
if c.reportErrors {
|
||||
ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
}
|
||||
|
||||
// Collect returns the current state of all metrics of the collector.
|
||||
@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
||||
c.collectFn(ch)
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
c.describeFn(ch)
|
||||
}
|
||||
|
||||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||
if !c.reportErrors {
|
||||
return
|
||||
|
51
vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go
generated
vendored
Normal file
51
vendor/github.com/prometheus/client_golang/prometheus/process_collector_cgo_darwin.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build darwin && cgo
|
||||
|
||||
package prometheus
|
||||
|
||||
/*
|
||||
int get_memory_info(unsigned long long *rss, unsigned long long *vs);
|
||||
*/
|
||||
import "C"
|
||||
import "fmt"
|
||||
|
||||
func getMemory() (*memoryInfo, error) {
|
||||
var rss, vsize C.ulonglong
|
||||
|
||||
if err := C.get_memory_info(&rss, &vsize); err != 0 {
|
||||
return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
|
||||
}
|
||||
|
||||
return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for Darwin.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.maxVsize
|
||||
ch <- c.startTime
|
||||
ch <- c.rss
|
||||
ch <- c.vsize
|
||||
|
||||
/* the process could be collected but not implemented yet
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
*/
|
||||
}
|
128
vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
generated
vendored
Normal file
128
vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
|
||||
// isn't available.
|
||||
var notImplementedErr = errors.New("not implemented")
|
||||
|
||||
type memoryInfo struct {
|
||||
vsize uint64 // Virtual memory size in bytes
|
||||
rss uint64 // Resident memory size in bytes
|
||||
}
|
||||
|
||||
func canCollectProcess() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func getSoftLimit(which int) (uint64, error) {
|
||||
rlimit := syscall.Rlimit{}
|
||||
|
||||
if err := syscall.Getrlimit(which, &rlimit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return rlimit.Cur, nil
|
||||
}
|
||||
|
||||
func getOpenFileCount() (float64, error) {
|
||||
// Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
|
||||
// return a list of open fds, but that requires a way to call C APIs. The
|
||||
// benefits, however, include fewer system calls and not failing when at the
|
||||
// open file soft limit.
|
||||
|
||||
if dir, err := os.Open("/dev/fd"); err != nil {
|
||||
return 0.0, err
|
||||
} else {
|
||||
defer dir.Close()
|
||||
|
||||
// Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is
|
||||
// that info not used, but KQUEUE descriptors fail stat(2), which causes
|
||||
// the whole method to fail.
|
||||
if names, err := dir.Readdirnames(0); err != nil {
|
||||
return 0.0, err
|
||||
} else {
|
||||
// Subtract 1 to ignore the open /dev/fd descriptor above.
|
||||
return float64(len(names) - 1), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
|
||||
if len(procs) == 1 {
|
||||
startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
|
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||
} else {
|
||||
err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
|
||||
c.reportError(ch, c.startTime, err)
|
||||
}
|
||||
} else {
|
||||
c.reportError(ch, c.startTime, err)
|
||||
}
|
||||
|
||||
// The proc structure returned by kern.proc.pid above has an Rusage member,
|
||||
// but it is not filled in, so it needs to be fetched by getrusage(2). For
|
||||
// that call, the UTime, STime, and Maxrss members are filled out, but not
|
||||
// Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require
|
||||
// access to the C API to call task_info(TASK_BASIC_INFO).
|
||||
rusage := unix.Rusage{}
|
||||
|
||||
if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
|
||||
cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
|
||||
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
|
||||
} else {
|
||||
c.reportError(ch, c.cpuTotal, err)
|
||||
}
|
||||
|
||||
if memInfo, err := getMemory(); err == nil {
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
|
||||
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
|
||||
} else if !errors.Is(err, notImplementedErr) {
|
||||
// Don't report an error when support is not compiled in.
|
||||
c.reportError(ch, c.rss, err)
|
||||
c.reportError(ch, c.vsize, err)
|
||||
}
|
||||
|
||||
if fds, err := getOpenFileCount(); err == nil {
|
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
|
||||
} else {
|
||||
c.reportError(ch, c.openFDs, err)
|
||||
}
|
||||
|
||||
if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
|
||||
} else {
|
||||
c.reportError(ch, c.maxFDs, err)
|
||||
}
|
||||
|
||||
if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
|
||||
} else {
|
||||
c.reportError(ch, c.maxVsize, err)
|
||||
}
|
||||
|
||||
// TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
|
||||
// be able to get the per-process network send/receive counts.
|
||||
}
|
39
vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go
generated
vendored
Normal file
39
vendor/github.com/prometheus/client_golang/prometheus/process_collector_nocgo_darwin.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build darwin && !cgo
|
||||
|
||||
package prometheus
|
||||
|
||||
func getMemory() (*memoryInfo, error) {
|
||||
return nil, notImplementedErr
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for Darwin.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.maxVsize
|
||||
ch <- c.startTime
|
||||
|
||||
/* the process could be collected but not implemented yet
|
||||
ch <- c.rss
|
||||
ch <- c.vsize
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
*/
|
||||
}
|
20
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
20
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
@ -11,8 +11,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows && !js && !wasip1
|
||||
// +build !windows,!js,!wasip1
|
||||
//go:build !windows && !js && !wasip1 && !darwin
|
||||
// +build !windows,!js,!wasip1,!darwin
|
||||
|
||||
package prometheus
|
||||
|
||||
@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
ch <- c.inBytes
|
||||
ch <- c.outBytes
|
||||
}
|
||||
|
26
vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build wasip1
|
||||
// +build wasip1
|
||||
|
||||
package prometheus
|
||||
|
||||
func canCollectProcess() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (*processCollector) processCollect(chan<- Metric) {
|
||||
// noop on this platform
|
||||
return
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -11,8 +11,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build js
|
||||
// +build js
|
||||
//go:build wasip1 || js
|
||||
// +build wasip1 js
|
||||
|
||||
package prometheus
|
||||
|
||||
@ -21,6 +21,13 @@ func canCollectProcess() bool {
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
// noop on this platform
|
||||
return
|
||||
c.errorCollectFn(ch)
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for wasip1 and js.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
c.errorDescribeFn(ch)
|
||||
}
|
21
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
21
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) {
|
||||
}
|
||||
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
h, err := windows.GetCurrentProcess()
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
h := windows.CurrentProcess()
|
||||
|
||||
var startTime, exitTime, kernelTime, userTime windows.Filetime
|
||||
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||
err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
|
||||
}
|
||||
|
||||
// describe returns all descriptions of the collector for windows.
|
||||
// Ensure that this list of descriptors is kept in sync with the metrics collected
|
||||
// in the processCollect method. Any changes to the metrics in processCollect
|
||||
// (such as adding or removing metrics) should be reflected in this list of descriptors.
|
||||
func (c *processCollector) describe(ch chan<- *Desc) {
|
||||
ch <- c.cpuTotal
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
}
|
||||
|
||||
func fileTimeToSeconds(ft windows.Filetime) float64 {
|
||||
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
|
||||
}
|
||||
|
23
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
23
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -207,7 +207,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
|
||||
if encodingHeader != string(Identity) {
|
||||
rsp.Header().Set(contentEncodingHeader, encodingHeader)
|
||||
}
|
||||
enc := expfmt.NewEncoder(w, contentType)
|
||||
|
||||
var enc expfmt.Encoder
|
||||
if opts.EnableOpenMetricsTextCreatedSamples {
|
||||
enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines())
|
||||
} else {
|
||||
enc = expfmt.NewEncoder(w, contentType)
|
||||
}
|
||||
|
||||
// handleError handles the error according to opts.ErrorHandling
|
||||
// and returns true if we have to abort after the handling.
|
||||
@ -408,6 +414,21 @@ type HandlerOpts struct {
|
||||
// (which changes the identity of the resulting series on the Prometheus
|
||||
// server).
|
||||
EnableOpenMetrics bool
|
||||
// EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic
|
||||
// Created Timestamps for counters, histograms and summaries, which for the current
|
||||
// version of OpenMetrics are defined as extra series with the same name and "_created"
|
||||
// suffix. See also the OpenMetrics specification for more details
|
||||
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1
|
||||
//
|
||||
// Created timestamps are used to improve the accuracy of reset detection,
|
||||
// but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality
|
||||
// if the scraper does not handle those metrics correctly (converting to created timestamp
|
||||
// instead of leaving those series as-is). New OpenMetrics versions might improve
|
||||
// this situation.
|
||||
//
|
||||
// Prometheus introduced the feature flag 'created-timestamp-zero-ingestion'
|
||||
// in version 2.50.0 to handle this situation.
|
||||
EnableOpenMetricsTextCreatedSamples bool
|
||||
// ProcessStartTime allows setting process start timevalue that will be exposed
|
||||
// with "Process-Start-Time-Unix" response header along with the metrics
|
||||
// payload. This allow callers to have efficient transformations to cumulative
|
||||
|
32
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
32
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||
|
||||
s := &summary{
|
||||
desc: desc,
|
||||
now: opts.now,
|
||||
|
||||
objectives: opts.Objectives,
|
||||
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
|
||||
@ -280,6 +281,8 @@ type summary struct {
|
||||
|
||||
desc *Desc
|
||||
|
||||
now func() time.Time
|
||||
|
||||
objectives map[float64]float64
|
||||
sortedObjectives []float64
|
||||
|
||||
@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) {
|
||||
s.bufMtx.Lock()
|
||||
defer s.bufMtx.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
now := s.now()
|
||||
if now.After(s.hotBufExpTime) {
|
||||
s.asyncFlush(now)
|
||||
}
|
||||
@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error {
|
||||
s.bufMtx.Lock()
|
||||
s.mtx.Lock()
|
||||
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
|
||||
s.swapBufs(time.Now())
|
||||
s.swapBufs(s.now())
|
||||
s.bufMtx.Unlock()
|
||||
|
||||
s.flushColdBuf()
|
||||
@ -468,13 +471,9 @@ func (s *noObjectivesSummary) Observe(v float64) {
|
||||
n := atomic.AddUint64(&s.countAndHotIdx, 1)
|
||||
hotCounts := s.counts[n>>63]
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||
return oldVal + v
|
||||
})
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
@ -516,14 +515,13 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
|
||||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||
atomic.AddUint64(&hotCounts.count, count)
|
||||
atomic.StoreUint64(&coldCounts.count, 0)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Use atomicUpdateFloat to update hotCounts.sumBits atomically.
|
||||
atomicUpdateFloat(&hotCounts.sumBits, func(oldVal float64) float64 {
|
||||
return oldVal + sum.GetSampleSum()
|
||||
})
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
116
vendor/go.etcd.io/bbolt/Makefile
generated
vendored
116
vendor/go.etcd.io/bbolt/Makefile
generated
vendored
@ -1,38 +1,108 @@
|
||||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
GOFILES = $(shell find . -name \*.go)
|
||||
|
||||
default: build
|
||||
TESTFLAGS_RACE=-race=false
|
||||
ifdef ENABLE_RACE
|
||||
TESTFLAGS_RACE=-race=true
|
||||
endif
|
||||
|
||||
race:
|
||||
@TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
@echo "array freelist test"
|
||||
@TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
TESTFLAGS_CPU=
|
||||
ifdef CPU
|
||||
TESTFLAGS_CPU=-cpu=$(CPU)
|
||||
endif
|
||||
TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS)
|
||||
|
||||
TESTFLAGS_TIMEOUT=30m
|
||||
ifdef TIMEOUT
|
||||
TESTFLAGS_TIMEOUT=$(TIMEOUT)
|
||||
endif
|
||||
|
||||
TESTFLAGS_ENABLE_STRICT_MODE=false
|
||||
ifdef ENABLE_STRICT_MODE
|
||||
TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE)
|
||||
endif
|
||||
|
||||
.EXPORT_ALL_VARIABLES:
|
||||
TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE}
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
!(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
|
||||
@echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh"
|
||||
@!(gofmt -l -s -d ${GOFILES} | grep '[a-z]')
|
||||
|
||||
# go get honnef.co/go/tools/simple
|
||||
gosimple:
|
||||
gosimple ./...
|
||||
@echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh"
|
||||
@!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]')
|
||||
|
||||
# go get honnef.co/go/tools/unused
|
||||
unused:
|
||||
unused ./...
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golangci-lint run ./...
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
|
||||
@echo "hashmap freelist test"
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT}
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/...
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt
|
||||
|
||||
@echo "array freelist test"
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT}
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/...
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt
|
||||
|
||||
@TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
|
||||
# Note: gets "program not an importable package" in out of path builds
|
||||
@TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
|
||||
.PHONY: coverage
|
||||
coverage:
|
||||
@echo "hashmap freelist test"
|
||||
TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \
|
||||
-coverprofile cover-freelist-hashmap.out -covermode atomic
|
||||
|
||||
.PHONY: race fmt errcheck test gosimple unused
|
||||
@echo "array freelist test"
|
||||
TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \
|
||||
-coverprofile cover-freelist-array.out -covermode atomic
|
||||
|
||||
BOLT_CMD=bbolt
|
||||
|
||||
build:
|
||||
go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD}
|
||||
|
||||
.PHONY: clean
|
||||
clean: # Clean binaries
|
||||
rm -f ./bin/${BOLT_CMD}
|
||||
|
||||
.PHONY: gofail-enable
|
||||
gofail-enable: install-gofail
|
||||
gofail enable .
|
||||
|
||||
.PHONY: gofail-disable
|
||||
gofail-disable: install-gofail
|
||||
gofail disable .
|
||||
|
||||
.PHONY: install-gofail
|
||||
install-gofail:
|
||||
go install go.etcd.io/gofail
|
||||
|
||||
.PHONY: test-failpoint
|
||||
test-failpoint:
|
||||
@echo "[failpoint] hashmap freelist test"
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
|
||||
|
||||
@echo "[failpoint] array freelist test"
|
||||
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
|
||||
|
||||
.PHONY: test-robustness # Running robustness tests requires root permission for now
|
||||
# TODO: Remove sudo once we fully migrate to the prow infrastructure
|
||||
test-robustness: gofail-enable build
|
||||
sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root
|
||||
sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root
|
||||
|
||||
.PHONY: test-benchmark-compare
|
||||
# Runs benchmark tests on the current git ref and the given REF, and compares
|
||||
# the two.
|
||||
test-benchmark-compare: install-benchstat
|
||||
@git fetch
|
||||
./scripts/compare_benchmarks.sh $(REF)
|
||||
|
||||
.PHONY: install-benchstat
|
||||
install-benchstat:
|
||||
go install golang.org/x/perf/cmd/benchstat@latest
|
||||
|
117
vendor/go.etcd.io/bbolt/README.md
generated
vendored
117
vendor/go.etcd.io/bbolt/README.md
generated
vendored
@ -1,10 +1,8 @@
|
||||
bbolt
|
||||
=====
|
||||
|
||||
[](https://goreportcard.com/report/github.com/etcd-io/bbolt)
|
||||
[](https://codecov.io/gh/etcd-io/bbolt)
|
||||
[](https://travis-ci.com/etcd-io/bbolt)
|
||||
[](https://godoc.org/github.com/etcd-io/bbolt)
|
||||
[](https://goreportcard.com/report/go.etcd.io/bbolt)
|
||||
[](https://pkg.go.dev/go.etcd.io/bbolt)
|
||||
[](https://github.com/etcd-io/bbolt/releases)
|
||||
[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
|
||||
|
||||
@ -26,7 +24,7 @@ and setting values. That's it.
|
||||
[gh_ben]: https://github.com/benbjohnson
|
||||
[bolt]: https://github.com/boltdb/bolt
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
[lmdb]: https://www.symas.com/symas-embedded-database-lmdb
|
||||
|
||||
## Project Status
|
||||
|
||||
@ -71,21 +69,31 @@ New minor versions may add additional features to the API.
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Known Issues](#known-issues)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
To start using `bbolt`, install Go and run `go get`:
|
||||
```sh
|
||||
$ go get go.etcd.io/bbolt/...
|
||||
$ go get go.etcd.io/bbolt@latest
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
This will retrieve the library and update your `go.mod` and `go.sum` files.
|
||||
|
||||
To run the command line utility, execute:
|
||||
```sh
|
||||
$ go run go.etcd.io/bbolt/cmd/bbolt@latest
|
||||
```
|
||||
|
||||
Run `go install` to install the `bbolt` command line utility into
|
||||
your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the
|
||||
`GOPATH` environment variable is not set.
|
||||
```sh
|
||||
$ go install go.etcd.io/bbolt/cmd/bbolt@latest
|
||||
```
|
||||
|
||||
### Importing bbolt
|
||||
|
||||
@ -94,7 +102,7 @@ To use bbolt as an embedded key-value store, import as:
|
||||
```go
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
db, err := bolt.Open(path, 0666, nil)
|
||||
db, err := bolt.Open(path, 0600, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -289,6 +297,17 @@ db.Update(func(tx *bolt.Tx) error {
|
||||
})
|
||||
```
|
||||
|
||||
You can retrieve an existing bucket using the `Tx.Bucket()` function:
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
if b == nil {
|
||||
return errors.New("bucket does not exist")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
@ -296,6 +315,17 @@ guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
You can also iterate over all existing top-level buckets with `Tx.ForEach()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
tx.ForEach(func(name []byte, b *bolt.Bucket) error {
|
||||
fmt.Println(string(name))
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
@ -327,7 +357,17 @@ exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket:
|
||||
|
||||
```go
|
||||
db.Update(func (tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Delete([]byte("answer"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will delete the key `answers` from the bucket `MyBucket`.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
@ -412,10 +452,19 @@ Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
You must seek to a position using `First()`, `Last()`, or `Seek()` before calling
|
||||
`Next()` or `Prev()`. If you do not seek to a position then these functions will
|
||||
return a `nil` key.
|
||||
|
||||
When you have iterated to the end of the cursor, then `Next()` will return a
|
||||
`nil` key and the cursor still points to the last element if present. When you
|
||||
have iterated to the beginning of the cursor, then `Prev()` will return a `nil`
|
||||
key and the cursor still points to the first element if present.
|
||||
|
||||
If you remove key/value pairs during iteration, the cursor may automatically
|
||||
move to the next position if present in current node each time removing a key.
|
||||
When you call `c.Next()` after removing a key, it may skip one key/value pair.
|
||||
Refer to [pull/611](https://github.com/etcd-io/bbolt/pull/611) to get more detailed info.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
@ -636,7 +685,7 @@ uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -841,6 +890,12 @@ Here are a few things to note when evaluating and using Bolt:
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
* Removing key/values pairs in a bucket during iteration on the bucket using
|
||||
cursor may not work properly. Each time when removing a key/value pair, the
|
||||
cursor may automatically move to the next position if present. When users
|
||||
call `c.Next()` after removing a key, it may skip one key/value pair.
|
||||
Refer to https://github.com/etcd-io/bbolt/pull/611 for more detailed info.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
@ -866,7 +921,7 @@ The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
where the key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
@ -895,6 +950,21 @@ The best places to start are the main entry points into Bolt:
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
## Known Issues
|
||||
|
||||
- bbolt might run into data corruption issue on Linux when the feature
|
||||
[ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in
|
||||
linux kernel version v5.10, is enabled. The fixes to the issue were included in
|
||||
linux kernel version v5.17, please refer to links below,
|
||||
|
||||
* [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/)
|
||||
* [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e)
|
||||
* [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/)
|
||||
|
||||
Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562.
|
||||
|
||||
- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value.
|
||||
Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802).
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
@ -908,13 +978,18 @@ Below is a list of public, open source projects that use Bolt:
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
@ -931,16 +1006,16 @@ Below is a list of public, open source projects that use Bolt:
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
|
||||
* [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library.
|
||||
* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_unix_aix.go → vendor/go.etcd.io/bbolt/bolt_aix.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_unix_aix.go → vendor/go.etcd.io/bbolt/bolt_aix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build aix
|
||||
//go:build aix
|
||||
|
||||
package bbolt
|
||||
|
90
vendor/go.etcd.io/bbolt/bolt_android.go
generated
vendored
Normal file
90
vendor/go.etcd.io/bbolt/bolt_android.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
var lockType int16
|
||||
if exclusive {
|
||||
lockType = syscall.F_WRLCK
|
||||
} else {
|
||||
lockType = syscall.F_RDLCK
|
||||
}
|
||||
for {
|
||||
// Attempt to obtain an exclusive lock.
|
||||
lock := syscall.Flock_t{Type: lockType}
|
||||
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
err = unix.Madvise(b, syscall.MADV_RANDOM)
|
||||
if err != nil && err != syscall.ENOSYS {
|
||||
// Ignore not implemented error in kernel because it still works.
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
2
vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_arm64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build arm64
|
||||
//go:build arm64
|
||||
|
||||
package bbolt
|
||||
|
||||
|
9
vendor/go.etcd.io/bbolt/bolt_loong64.go
generated
vendored
Normal file
9
vendor/go.etcd.io/bbolt/bolt_loong64.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
//go:build loong64
|
||||
|
||||
package bbolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
2
vendor/go.etcd.io/bbolt/bolt_mips64x.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_mips64x.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build mips64 mips64le
|
||||
//go:build mips64 || mips64le
|
||||
|
||||
package bbolt
|
||||
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_mipsx.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_mipsx.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build mips mipsle
|
||||
//go:build mips || mipsle
|
||||
|
||||
package bbolt
|
||||
|
||||
|
15
vendor/go.etcd.io/bbolt/bolt_openbsd.go
generated
vendored
15
vendor/go.etcd.io/bbolt/bolt_openbsd.go
generated
vendored
@ -1,22 +1,11 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
msAsync = 1 << iota // perform asynchronous writes
|
||||
msSync // perform synchronous writes
|
||||
msInvalidate // invalidate cached data
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func msync(db *DB) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
return unix.Msync(db.data[:db.datasz], unix.MS_INVALIDATE)
|
||||
}
|
||||
|
||||
func fdatasync(db *DB) error {
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_ppc.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_ppc.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build ppc
|
||||
//go:build ppc
|
||||
|
||||
package bbolt
|
||||
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_ppc64.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_ppc64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build ppc64
|
||||
//go:build ppc64
|
||||
|
||||
package bbolt
|
||||
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_ppc64le.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_ppc64le.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build ppc64le
|
||||
//go:build ppc64le
|
||||
|
||||
package bbolt
|
||||
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_riscv64.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_riscv64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build riscv64
|
||||
//go:build riscv64
|
||||
|
||||
package bbolt
|
||||
|
||||
|
2
vendor/go.etcd.io/bbolt/bolt_s390x.go
generated
vendored
2
vendor/go.etcd.io/bbolt/bolt_s390x.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build s390x
|
||||
//go:build s390x
|
||||
|
||||
package bbolt
|
||||
|
||||
|
23
vendor/go.etcd.io/bbolt/bolt_unix.go
generated
vendored
23
vendor/go.etcd.io/bbolt/bolt_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build !windows,!plan9,!solaris,!aix
|
||||
//go:build !windows && !plan9 && !solaris && !aix && !android
|
||||
|
||||
package bbolt
|
||||
|
||||
@ -7,6 +7,10 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"go.etcd.io/bbolt/errors"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
@ -33,7 +37,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
return errors.ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
@ -49,13 +53,13 @@ func funlock(db *DB) error {
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
err = madvise(b, syscall.MADV_RANDOM)
|
||||
err = unix.Madvise(b, syscall.MADV_RANDOM)
|
||||
if err != nil && err != syscall.ENOSYS {
|
||||
// Ignore not implemented error in kernel because it still works.
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
@ -76,18 +80,9 @@ func munmap(db *DB) error {
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := syscall.Munmap(db.dataref)
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: This function is copied from stdlib because it is not available on darwin.
|
||||
func madvise(b []byte, advice int) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
70
vendor/go.etcd.io/bbolt/bolt_windows.go
generated
vendored
70
vendor/go.etcd.io/bbolt/bolt_windows.go
generated
vendored
@ -6,40 +6,12 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"go.etcd.io/bbolt/errors"
|
||||
)
|
||||
|
||||
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
)
|
||||
|
||||
const (
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
flagLockExclusive = 2
|
||||
flagLockFailImmediately = 1
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
||||
errLockViolation syscall.Errno = 0x21
|
||||
)
|
||||
|
||||
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
@ -51,28 +23,28 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
flags |= windows.LOCKFILE_EXCLUSIVE_LOCK
|
||||
}
|
||||
for {
|
||||
// Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
|
||||
// -1..0 as the lock on the database file.
|
||||
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
|
||||
err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
|
||||
err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{
|
||||
Offset: m1,
|
||||
OffsetHigh: m1,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
} else if err != windows.ERROR_LOCK_VIOLATION {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed oumercit then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
return errors.ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
@ -83,34 +55,37 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error {
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
|
||||
err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
|
||||
return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{
|
||||
Offset: m1,
|
||||
OffsetHigh: m1,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func mmap(db *DB, sz int) error {
|
||||
var sizelo, sizehi uint32
|
||||
|
||||
if !db.readOnly {
|
||||
// Truncate the database to the size of the mmap.
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("truncate: %s", err)
|
||||
}
|
||||
sizehi = uint32(sz >> 32)
|
||||
sizelo = uint32(sz)
|
||||
}
|
||||
|
||||
// Open a file mapping handle.
|
||||
sizelo := uint32(sz >> 32)
|
||||
sizehi := uint32(sz) & 0xffffffff
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil)
|
||||
if h == 0 {
|
||||
return os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Create the memory map.
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
|
||||
if addr == 0 {
|
||||
// Do our best and report error returned from MapViewOfFile.
|
||||
_ = syscall.CloseHandle(h)
|
||||
return os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
@ -120,7 +95,7 @@ func mmap(db *DB, sz int) error {
|
||||
}
|
||||
|
||||
// Convert to a byte array.
|
||||
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr))
|
||||
db.datasz = sz
|
||||
|
||||
return nil
|
||||
@ -134,8 +109,11 @@ func munmap(db *DB) error {
|
||||
}
|
||||
|
||||
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||
var err1 error
|
||||
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
err1 = os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err1
|
||||
}
|
||||
|
2
vendor/go.etcd.io/bbolt/boltsync_unix.go
generated
vendored
2
vendor/go.etcd.io/bbolt/boltsync_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build !windows,!plan9,!linux,!openbsd
|
||||
//go:build !windows && !plan9 && !linux && !openbsd
|
||||
|
||||
package bbolt
|
||||
|
||||
|
578
vendor/go.etcd.io/bbolt/bucket.go
generated
vendored
578
vendor/go.etcd.io/bbolt/bucket.go
generated
vendored
@ -4,6 +4,9 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"go.etcd.io/bbolt/errors"
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -14,8 +17,6 @@ const (
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
minFillPercent = 0.1
|
||||
maxFillPercent = 1.0
|
||||
@ -27,12 +28,12 @@ const DefaultFillPercent = 0.5
|
||||
|
||||
// Bucket represents a collection of key/value pairs inside the database.
|
||||
type Bucket struct {
|
||||
*bucket
|
||||
tx *Tx // the associated transaction
|
||||
buckets map[string]*Bucket // subbucket cache
|
||||
page *page // inline page reference
|
||||
rootNode *node // materialized node for the root page.
|
||||
nodes map[pgid]*node // node cache
|
||||
*common.InBucket
|
||||
tx *Tx // the associated transaction
|
||||
buckets map[string]*Bucket // subbucket cache
|
||||
page *common.Page // inline page reference
|
||||
rootNode *node // materialized node for the root page.
|
||||
nodes map[common.Pgid]*node // node cache
|
||||
|
||||
// Sets the threshold for filling nodes when they split. By default,
|
||||
// the bucket will fill to 50% but it can be useful to increase this
|
||||
@ -42,21 +43,12 @@ type Bucket struct {
|
||||
FillPercent float64
|
||||
}
|
||||
|
||||
// bucket represents the on-file representation of a bucket.
|
||||
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||
// then its root page can be stored inline in the "value", after the bucket
|
||||
// header. In the case of inline buckets, the "root" will be 0.
|
||||
type bucket struct {
|
||||
root pgid // page id of the bucket's root-level page
|
||||
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||
}
|
||||
|
||||
// newBucket returns a new bucket associated with a transaction.
|
||||
func newBucket(tx *Tx) Bucket {
|
||||
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||
if tx.writable {
|
||||
b.buckets = make(map[string]*Bucket)
|
||||
b.nodes = make(map[pgid]*node)
|
||||
b.nodes = make(map[common.Pgid]*node)
|
||||
}
|
||||
return b
|
||||
}
|
||||
@ -67,8 +59,8 @@ func (b *Bucket) Tx() *Tx {
|
||||
}
|
||||
|
||||
// Root returns the root of the bucket.
|
||||
func (b *Bucket) Root() pgid {
|
||||
return b.root
|
||||
func (b *Bucket) Root() common.Pgid {
|
||||
return b.RootPage()
|
||||
}
|
||||
|
||||
// Writable returns whether the bucket is writable.
|
||||
@ -81,7 +73,7 @@ func (b *Bucket) Writable() bool {
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (b *Bucket) Cursor() *Cursor {
|
||||
// Update transaction statistics.
|
||||
b.tx.stats.CursorCount++
|
||||
b.tx.stats.IncCursorCount(1)
|
||||
|
||||
// Allocate and return a cursor.
|
||||
return &Cursor{
|
||||
@ -105,7 +97,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
k, v, flags := c.seek(name)
|
||||
|
||||
// Return nil if the key doesn't exist or it is not a bucket.
|
||||
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||
if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -125,8 +117,8 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
|
||||
// Unaligned access requires a copy to be made.
|
||||
const unalignedMask = unsafe.Alignof(struct {
|
||||
bucket
|
||||
page
|
||||
common.InBucket
|
||||
common.Page
|
||||
}{}) - 1
|
||||
unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
|
||||
if unaligned {
|
||||
@ -136,15 +128,15 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable && !unaligned {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
child.InBucket = &common.InBucket{}
|
||||
*child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
// Save a reference to the inline page if the bucket is inline.
|
||||
if child.root == 0 {
|
||||
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
if child.RootPage() == 0 {
|
||||
child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize]))
|
||||
}
|
||||
|
||||
return &child
|
||||
@ -153,87 +145,167 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
// CreateBucket creates a new bucket at the given key and returns the new bucket.
|
||||
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if b.tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, ErrBucketNameRequired
|
||||
func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) {
|
||||
if lg := b.tx.db.Logger(); lg != discardLogger {
|
||||
lg.Debugf("Creating bucket %q", key)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
lg.Errorf("Creating bucket %q failed: %v", key, err)
|
||||
} else {
|
||||
lg.Debugf("Creating bucket %q successfully", key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if b.tx.db == nil {
|
||||
return nil, errors.ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, errors.ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, errors.ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
// Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent
|
||||
// it from being marked as leaking, and accordingly cannot be allocated on stack.
|
||||
newKey := cloneBytes(key)
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
k, _, flags := c.seek(newKey)
|
||||
|
||||
// Return an error if there is an existing key.
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
if bytes.Equal(newKey, k) {
|
||||
if (flags & common.BucketLeafFlag) != 0 {
|
||||
return nil, errors.ErrBucketExists
|
||||
}
|
||||
return nil, ErrIncompatibleValue
|
||||
return nil, errors.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
var bucket = Bucket{
|
||||
bucket: &bucket{},
|
||||
InBucket: &common.InBucket{},
|
||||
rootNode: &node{isLeaf: true},
|
||||
FillPercent: DefaultFillPercent,
|
||||
}
|
||||
var value = bucket.write()
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||
c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag)
|
||||
|
||||
// Since subbuckets are not allowed on inline buckets, we need to
|
||||
// dereference the inline page, if it exists. This will cause the bucket
|
||||
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||
b.page = nil
|
||||
|
||||
return b.Bucket(key), nil
|
||||
return b.Bucket(newKey), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
child, err := b.CreateBucket(key)
|
||||
if err == ErrBucketExists {
|
||||
return b.Bucket(key), nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) {
|
||||
if lg := b.tx.db.Logger(); lg != discardLogger {
|
||||
lg.Debugf("Creating bucket if not exist %q", key)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
lg.Errorf("Creating bucket if not exist %q failed: %v", key, err)
|
||||
} else {
|
||||
lg.Debugf("Creating bucket if not exist %q successfully", key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return child, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
return nil, errors.ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, errors.ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, errors.ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
// Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent
|
||||
// it from being marked as leaking, and accordingly cannot be allocated on stack.
|
||||
newKey := cloneBytes(key)
|
||||
|
||||
if b.buckets != nil {
|
||||
if child := b.buckets[string(newKey)]; child != nil {
|
||||
return child, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
k, v, flags := c.seek(newKey)
|
||||
|
||||
// Return an error if there is an existing non-bucket key.
|
||||
if bytes.Equal(newKey, k) {
|
||||
if (flags & common.BucketLeafFlag) != 0 {
|
||||
var child = b.openBucket(v)
|
||||
if b.buckets != nil {
|
||||
b.buckets[string(newKey)] = child
|
||||
}
|
||||
|
||||
return child, nil
|
||||
}
|
||||
return nil, errors.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
var bucket = Bucket{
|
||||
InBucket: &common.InBucket{},
|
||||
rootNode: &node{isLeaf: true},
|
||||
FillPercent: DefaultFillPercent,
|
||||
}
|
||||
var value = bucket.write()
|
||||
|
||||
c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag)
|
||||
|
||||
// Since subbuckets are not allowed on inline buckets, we need to
|
||||
// dereference the inline page, if it exists. This will cause the bucket
|
||||
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||
b.page = nil
|
||||
|
||||
return b.Bucket(newKey), nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) (err error) {
|
||||
if lg := b.tx.db.Logger(); lg != discardLogger {
|
||||
lg.Debugf("Deleting bucket %q", key)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
lg.Errorf("Deleting bucket %q failed: %v", key, err)
|
||||
} else {
|
||||
lg.Debugf("Deleting bucket %q successfully", key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if b.tx.db == nil {
|
||||
return errors.ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return errors.ErrTxNotWritable
|
||||
}
|
||||
|
||||
newKey := cloneBytes(key)
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(newKey)
|
||||
|
||||
// Return an error if bucket doesn't exist or is not a bucket.
|
||||
if !bytes.Equal(key, k) {
|
||||
return ErrBucketNotFound
|
||||
} else if (flags & bucketLeafFlag) == 0 {
|
||||
return ErrIncompatibleValue
|
||||
if !bytes.Equal(newKey, k) {
|
||||
return errors.ErrBucketNotFound
|
||||
} else if (flags & common.BucketLeafFlag) == 0 {
|
||||
return errors.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
child := b.Bucket(newKey)
|
||||
err = child.ForEachBucket(func(k []byte) error {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@ -242,7 +314,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
}
|
||||
|
||||
// Remove cached copy.
|
||||
delete(b.buckets, string(key))
|
||||
delete(b.buckets, string(newKey))
|
||||
|
||||
// Release all bucket pages to freelist.
|
||||
child.nodes = nil
|
||||
@ -250,19 +322,119 @@ func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
child.free()
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
c.node().del(newKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MoveBucket moves a sub-bucket from the source bucket to the destination bucket.
|
||||
// Returns an error if
|
||||
// 1. the sub-bucket cannot be found in the source bucket;
|
||||
// 2. or the key already exists in the destination bucket;
|
||||
// 3. or the key represents a non-bucket value;
|
||||
// 4. the source and destination buckets are the same.
|
||||
func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) {
|
||||
lg := b.tx.db.Logger()
|
||||
if lg != discardLogger {
|
||||
lg.Debugf("Moving bucket %q", key)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
lg.Errorf("Moving bucket %q failed: %v", key, err)
|
||||
} else {
|
||||
lg.Debugf("Moving bucket %q successfully", key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if b.tx.db == nil || dstBucket.tx.db == nil {
|
||||
return errors.ErrTxClosed
|
||||
} else if !b.Writable() || !dstBucket.Writable() {
|
||||
return errors.ErrTxNotWritable
|
||||
}
|
||||
|
||||
if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx {
|
||||
lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path())
|
||||
return errors.ErrDifferentDB
|
||||
}
|
||||
|
||||
newKey := cloneBytes(key)
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, v, flags := c.seek(newKey)
|
||||
|
||||
// Return an error if bucket doesn't exist or is not a bucket.
|
||||
if !bytes.Equal(newKey, k) {
|
||||
return errors.ErrBucketNotFound
|
||||
} else if (flags & common.BucketLeafFlag) == 0 {
|
||||
lg.Errorf("An incompatible key %s exists in the source bucket", newKey)
|
||||
return errors.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Do nothing (return true directly) if the source bucket and the
|
||||
// destination bucket are actually the same bucket.
|
||||
if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) {
|
||||
lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket)
|
||||
return errors.ErrSameBuckets
|
||||
}
|
||||
|
||||
// check whether the key already exists in the destination bucket
|
||||
curDst := dstBucket.Cursor()
|
||||
k, _, flags = curDst.seek(newKey)
|
||||
|
||||
// Return an error if there is an existing key in the destination bucket.
|
||||
if bytes.Equal(newKey, k) {
|
||||
if (flags & common.BucketLeafFlag) != 0 {
|
||||
return errors.ErrBucketExists
|
||||
}
|
||||
lg.Errorf("An incompatible key %s exists in the target bucket", newKey)
|
||||
return errors.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// remove the sub-bucket from the source bucket
|
||||
delete(b.buckets, string(newKey))
|
||||
c.node().del(newKey)
|
||||
|
||||
// add te sub-bucket to the destination bucket
|
||||
newValue := cloneBytes(v)
|
||||
curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inspect returns the structure of the bucket.
|
||||
func (b *Bucket) Inspect() BucketStructure {
|
||||
return b.recursivelyInspect([]byte("root"))
|
||||
}
|
||||
|
||||
func (b *Bucket) recursivelyInspect(name []byte) BucketStructure {
|
||||
bs := BucketStructure{Name: string(name)}
|
||||
|
||||
keyN := 0
|
||||
c := b.Cursor()
|
||||
for k, _, flags := c.first(); k != nil; k, _, flags = c.next() {
|
||||
if flags&common.BucketLeafFlag != 0 {
|
||||
childBucket := b.Bucket(k)
|
||||
childBS := childBucket.recursivelyInspect(k)
|
||||
bs.Children = append(bs.Children, childBS)
|
||||
} else {
|
||||
keyN++
|
||||
}
|
||||
}
|
||||
bs.KeyN = keyN
|
||||
|
||||
return bs
|
||||
}
|
||||
|
||||
// Get retrieves the value for a key in the bucket.
|
||||
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||
// The returned value is only valid for the life of the transaction.
|
||||
// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database.
|
||||
func (b *Bucket) Get(key []byte) []byte {
|
||||
k, v, flags := b.Cursor().seek(key)
|
||||
|
||||
// Return nil if this is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
if (flags & common.BucketLeafFlag) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -277,31 +449,46 @@ func (b *Bucket) Get(key []byte) []byte {
|
||||
// If the key exist then its previous value will be overwritten.
|
||||
// Supplied value must remain valid for the life of the transaction.
|
||||
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
|
||||
func (b *Bucket) Put(key []byte, value []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return ErrKeyRequired
|
||||
} else if len(key) > MaxKeySize {
|
||||
return ErrKeyTooLarge
|
||||
} else if int64(len(value)) > MaxValueSize {
|
||||
return ErrValueTooLarge
|
||||
func (b *Bucket) Put(key []byte, value []byte) (err error) {
|
||||
if lg := b.tx.db.Logger(); lg != discardLogger {
|
||||
lg.Debugf("Putting key %q", key)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
lg.Errorf("Putting key %q failed: %v", key, err)
|
||||
} else {
|
||||
lg.Debugf("Putting key %q successfully", key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key with a bucket value.
|
||||
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
if b.tx.db == nil {
|
||||
return errors.ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return errors.ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return errors.ErrKeyRequired
|
||||
} else if len(key) > MaxKeySize {
|
||||
return errors.ErrKeyTooLarge
|
||||
} else if int64(len(value)) > MaxValueSize {
|
||||
return errors.ErrValueTooLarge
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, 0)
|
||||
// Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent
|
||||
// it from being marked as leaking, and accordingly cannot be allocated on stack.
|
||||
newKey := cloneBytes(key)
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(newKey)
|
||||
|
||||
// Return an error if there is an existing key with a bucket value.
|
||||
if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 {
|
||||
return errors.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// gofail: var beforeBucketPut struct{}
|
||||
|
||||
c.node().put(newKey, newKey, value, 0, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -309,11 +496,22 @@ func (b *Bucket) Put(key []byte, value []byte) error {
|
||||
// Delete removes a key from the bucket.
|
||||
// If the key does not exist then nothing is done and a nil error is returned.
|
||||
// Returns an error if the bucket was created from a read-only transaction.
|
||||
func (b *Bucket) Delete(key []byte) error {
|
||||
func (b *Bucket) Delete(key []byte) (err error) {
|
||||
if lg := b.tx.db.Logger(); lg != discardLogger {
|
||||
lg.Debugf("Deleting key %q", key)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
lg.Errorf("Deleting key %q failed: %v", key, err)
|
||||
} else {
|
||||
lg.Debugf("Deleting key %q successfully", key)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
return errors.ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
return errors.ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
@ -326,8 +524,8 @@ func (b *Bucket) Delete(key []byte) error {
|
||||
}
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
if (flags & common.BucketLeafFlag) != 0 {
|
||||
return errors.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
@ -337,53 +535,56 @@ func (b *Bucket) Delete(key []byte) error {
|
||||
}
|
||||
|
||||
// Sequence returns the current integer for the bucket without incrementing it.
|
||||
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||
func (b *Bucket) Sequence() uint64 {
|
||||
return b.InSequence()
|
||||
}
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *Bucket) SetSequence(v uint64) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
return errors.ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
return errors.ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
_ = b.node(b.RootPage(), nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence = v
|
||||
// Set the sequence.
|
||||
b.SetInSequence(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
||||
return 0, ErrTxClosed
|
||||
return 0, errors.ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return 0, ErrTxNotWritable
|
||||
return 0, errors.ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
_ = b.node(b.RootPage(), nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence++
|
||||
return b.bucket.sequence, nil
|
||||
b.IncSequence()
|
||||
return b.Sequence(), nil
|
||||
}
|
||||
|
||||
// ForEach executes a function for each key/value pair in a bucket.
|
||||
// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller. The provided function must not modify
|
||||
// the bucket; this will result in undefined behavior.
|
||||
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
return errors.ErrTxClosed
|
||||
}
|
||||
c := b.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
@ -394,74 +595,89 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns stats on a bucket.
|
||||
func (b *Bucket) ForEachBucket(fn func(k []byte) error) error {
|
||||
if b.tx.db == nil {
|
||||
return errors.ErrTxClosed
|
||||
}
|
||||
c := b.Cursor()
|
||||
for k, _, flags := c.first(); k != nil; k, _, flags = c.next() {
|
||||
if flags&common.BucketLeafFlag != 0 {
|
||||
if err := fn(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stats returns stats on a bucket.
|
||||
func (b *Bucket) Stats() BucketStats {
|
||||
var s, subStats BucketStats
|
||||
pageSize := b.tx.db.pageSize
|
||||
s.BucketN += 1
|
||||
if b.root == 0 {
|
||||
if b.RootPage() == 0 {
|
||||
s.InlineBucketN += 1
|
||||
}
|
||||
b.forEachPage(func(p *page, depth int) {
|
||||
if (p.flags & leafPageFlag) != 0 {
|
||||
s.KeyN += int(p.count)
|
||||
b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) {
|
||||
if p.IsLeafPage() {
|
||||
s.KeyN += int(p.Count())
|
||||
|
||||
// used totals the used bytes for the page
|
||||
used := pageHeaderSize
|
||||
used := common.PageHeaderSize
|
||||
|
||||
if p.count != 0 {
|
||||
if p.Count() != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * uintptr(p.count-1)
|
||||
used += common.LeafPageElementSize * uintptr(p.Count()-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
// of the last element's key/value equals to the total of the sizes
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
lastElement := p.LeafPageElement(p.Count() - 1)
|
||||
used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize())
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
if b.RootPage() == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += int(used)
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += int(used)
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
s.LeafOverflowN += int(p.Overflow())
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
// Do that by iterating over all element headers
|
||||
// looking for the ones with the bucketLeafFlag.
|
||||
for i := uint16(0); i < p.count; i++ {
|
||||
e := p.leafPageElement(i)
|
||||
if (e.flags & bucketLeafFlag) != 0 {
|
||||
for i := uint16(0); i < p.Count(); i++ {
|
||||
e := p.LeafPageElement(i)
|
||||
if (e.Flags() & common.BucketLeafFlag) != 0 {
|
||||
// For any bucket element, open the element value
|
||||
// and recursively call Stats on the contained bucket.
|
||||
subStats.Add(b.openBucket(e.value()).Stats())
|
||||
subStats.Add(b.openBucket(e.Value()).Stats())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (p.flags & branchPageFlag) != 0 {
|
||||
} else if p.IsBranchPage() {
|
||||
s.BranchPageN++
|
||||
lastElement := p.branchPageElement(p.count - 1)
|
||||
lastElement := p.BranchPageElement(p.Count() - 1)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
|
||||
used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += uintptr(lastElement.pos + lastElement.ksize)
|
||||
used += uintptr(lastElement.Pos() + lastElement.Ksize())
|
||||
s.BranchInuse += int(used)
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
s.BranchOverflowN += int(p.Overflow())
|
||||
}
|
||||
|
||||
// Keep track of maximum page depth.
|
||||
if depth+1 > s.Depth {
|
||||
s.Depth = (depth + 1)
|
||||
s.Depth = depth + 1
|
||||
}
|
||||
})
|
||||
|
||||
@ -477,46 +693,46 @@ func (b *Bucket) Stats() BucketStats {
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page in a bucket, including inline pages.
|
||||
func (b *Bucket) forEachPage(fn func(*page, int)) {
|
||||
func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) {
|
||||
// If we have an inline page then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, 0)
|
||||
fn(b.page, 0, []common.Pgid{b.RootPage()})
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise traverse the page hierarchy.
|
||||
b.tx.forEachPage(b.root, 0, fn)
|
||||
b.tx.forEachPage(b.RootPage(), fn)
|
||||
}
|
||||
|
||||
// forEachPageNode iterates over every page (or node) in a bucket.
|
||||
// This also includes inline pages.
|
||||
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
|
||||
func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) {
|
||||
// If we have an inline page or root node then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, nil, 0)
|
||||
return
|
||||
}
|
||||
b._forEachPageNode(b.root, 0, fn)
|
||||
b._forEachPageNode(b.RootPage(), 0, fn)
|
||||
}
|
||||
|
||||
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
|
||||
var p, n = b.pageNode(pgid)
|
||||
func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) {
|
||||
var p, n = b.pageNode(pgId)
|
||||
|
||||
// Execute function.
|
||||
fn(p, n, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if p != nil {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
b._forEachPageNode(elem.pgid, depth+1, fn)
|
||||
if p.IsBranchPage() {
|
||||
for i := 0; i < int(p.Count()); i++ {
|
||||
elem := p.BranchPageElement(uint16(i))
|
||||
b._forEachPageNode(elem.Pgid(), depth+1, fn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !n.isLeaf {
|
||||
for _, inode := range n.inodes {
|
||||
b._forEachPageNode(inode.pgid, depth+1, fn)
|
||||
b._forEachPageNode(inode.Pgid(), depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -539,9 +755,9 @@ func (b *Bucket) spill() error {
|
||||
}
|
||||
|
||||
// Update the child bucket header in this bucket.
|
||||
value = make([]byte, unsafe.Sizeof(bucket{}))
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *child.bucket
|
||||
value = make([]byte, unsafe.Sizeof(common.InBucket{}))
|
||||
var bucket = (*common.InBucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *child.InBucket
|
||||
}
|
||||
|
||||
// Skip writing the bucket if there are no materialized nodes.
|
||||
@ -555,10 +771,10 @@ func (b *Bucket) spill() error {
|
||||
if !bytes.Equal([]byte(name), k) {
|
||||
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||
}
|
||||
if flags&bucketLeafFlag == 0 {
|
||||
if flags&common.BucketLeafFlag == 0 {
|
||||
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||
}
|
||||
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||
c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag)
|
||||
}
|
||||
|
||||
// Ignore if there's not a materialized root node.
|
||||
@ -573,16 +789,16 @@ func (b *Bucket) spill() error {
|
||||
b.rootNode = b.rootNode.root()
|
||||
|
||||
// Update the root node for this bucket.
|
||||
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||
if b.rootNode.pgid >= b.tx.meta.Pgid() {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid()))
|
||||
}
|
||||
b.root = b.rootNode.pgid
|
||||
b.SetRootPage(b.rootNode.pgid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// inlineable returns true if a bucket is small enough to be written inline
|
||||
// and if it contains no subbuckets. Otherwise returns false.
|
||||
// and if it contains no subbuckets. Otherwise, returns false.
|
||||
func (b *Bucket) inlineable() bool {
|
||||
var n = b.rootNode
|
||||
|
||||
@ -593,11 +809,11 @@ func (b *Bucket) inlineable() bool {
|
||||
|
||||
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
var size = common.PageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value()))
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
if inode.Flags()&common.BucketLeafFlag != 0 {
|
||||
return false
|
||||
} else if size > b.maxInlineBucketSize() {
|
||||
return false
|
||||
@ -616,14 +832,14 @@ func (b *Bucket) maxInlineBucketSize() uintptr {
|
||||
func (b *Bucket) write() []byte {
|
||||
// Allocate the appropriate size.
|
||||
var n = b.rootNode
|
||||
var value = make([]byte, bucketHeaderSize+n.size())
|
||||
var value = make([]byte, common.BucketHeaderSize+n.size())
|
||||
|
||||
// Write a bucket header.
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *b.bucket
|
||||
var bucket = (*common.InBucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *b.InBucket
|
||||
|
||||
// Convert byte slice to a fake page and write the root node.
|
||||
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize]))
|
||||
n.write(p)
|
||||
|
||||
return value
|
||||
@ -640,11 +856,11 @@ func (b *Bucket) rebalance() {
|
||||
}
|
||||
|
||||
// node creates a node from a page and associates it with a given parent.
|
||||
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||
_assert(b.nodes != nil, "nodes map expected")
|
||||
func (b *Bucket) node(pgId common.Pgid, parent *node) *node {
|
||||
common.Assert(b.nodes != nil, "nodes map expected")
|
||||
|
||||
// Retrieve node if it's already been created.
|
||||
if n := b.nodes[pgid]; n != nil {
|
||||
if n := b.nodes[pgId]; n != nil {
|
||||
return n
|
||||
}
|
||||
|
||||
@ -659,34 +875,40 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||
// Use the inline page if this is an inline bucket.
|
||||
var p = b.page
|
||||
if p == nil {
|
||||
p = b.tx.page(pgid)
|
||||
p = b.tx.page(pgId)
|
||||
} else {
|
||||
// if p isn't nil, then it's an inline bucket.
|
||||
// The pgId must be 0 in this case.
|
||||
common.Verify(func() {
|
||||
common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId)
|
||||
})
|
||||
}
|
||||
|
||||
// Read the page into the node and cache it.
|
||||
n.read(p)
|
||||
b.nodes[pgid] = n
|
||||
b.nodes[pgId] = n
|
||||
|
||||
// Update statistics.
|
||||
b.tx.stats.NodeCount++
|
||||
b.tx.stats.IncNodeCount(1)
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// free recursively frees all pages in the bucket.
|
||||
func (b *Bucket) free() {
|
||||
if b.root == 0 {
|
||||
if b.RootPage() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var tx = b.tx
|
||||
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||
b.forEachPageNode(func(p *common.Page, n *node, _ int) {
|
||||
if p != nil {
|
||||
tx.db.freelist.free(tx.meta.txid, p)
|
||||
tx.db.freelist.Free(tx.meta.Txid(), p)
|
||||
} else {
|
||||
n.free()
|
||||
}
|
||||
})
|
||||
b.root = 0
|
||||
b.SetRootPage(0)
|
||||
}
|
||||
|
||||
// dereference removes all references to the old mmap.
|
||||
@ -701,11 +923,11 @@ func (b *Bucket) dereference() {
|
||||
}
|
||||
|
||||
// pageNode returns the in-memory node, if it exists.
|
||||
// Otherwise returns the underlying page.
|
||||
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||
// Otherwise, returns the underlying page.
|
||||
func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) {
|
||||
// Inline buckets have a fake page embedded in their value so treat them
|
||||
// differently. We'll return the rootNode (if available) or the fake page.
|
||||
if b.root == 0 {
|
||||
if b.RootPage() == 0 {
|
||||
if id != 0 {
|
||||
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||
}
|
||||
@ -775,3 +997,9 @@ func cloneBytes(v []byte) []byte {
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
||||
|
||||
type BucketStructure struct {
|
||||
Name string `json:"name"` // name of the bucket
|
||||
KeyN int `json:"keyN"` // number of key/value pairs
|
||||
Children []BucketStructure `json:"buckets,omitempty"` // child buckets
|
||||
}
|
||||
|
119
vendor/go.etcd.io/bbolt/compact.go
generated
vendored
Normal file
119
vendor/go.etcd.io/bbolt/compact.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
package bbolt
|
||||
|
||||
// Compact will create a copy of the source DB and in the destination DB. This may
|
||||
// reclaim space that the source database no longer has use for. txMaxSize can be
|
||||
// used to limit the transactions size of this process and may trigger intermittent
|
||||
// commits. A value of zero will ignore transaction sizes.
|
||||
// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349
|
||||
func Compact(dst, src *DB, txMaxSize int64) error {
|
||||
// commit regularly, or we'll run out of memory for large datasets if using one transaction.
|
||||
var size int64
|
||||
tx, err := dst.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if tempErr := tx.Rollback(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
|
||||
// On each key/value, check if we have exceeded tx size.
|
||||
sz := int64(len(k) + len(v))
|
||||
if size+sz > txMaxSize && txMaxSize != 0 {
|
||||
// Commit previous transaction.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start new transaction.
|
||||
tx, err = dst.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size = 0
|
||||
}
|
||||
size += sz
|
||||
|
||||
// Create bucket on the root transaction if this is the first level.
|
||||
nk := len(keys)
|
||||
if nk == 0 {
|
||||
bkt, err := tx.CreateBucket(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bkt.SetSequence(seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create buckets on subsequent levels, if necessary.
|
||||
b := tx.Bucket(keys[0])
|
||||
if nk > 1 {
|
||||
for _, k := range keys[1:] {
|
||||
b = b.Bucket(k)
|
||||
}
|
||||
}
|
||||
|
||||
// Fill the entire page for best compaction.
|
||||
b.FillPercent = 1.0
|
||||
|
||||
// If there is no value then this is a bucket call.
|
||||
if v == nil {
|
||||
bkt, err := b.CreateBucket(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bkt.SetSequence(seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise treat it as a key/value pair.
|
||||
return b.Put(k, v)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
err = tx.Commit()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// walkFunc is the type of the function called for keys (buckets and "normal"
|
||||
// values) discovered by Walk. keys is the list of keys to descend to the bucket
|
||||
// owning the discovered key/value pair k/v.
|
||||
type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
|
||||
|
||||
// walk walks recursively the bolt database db, calling walkFn for each key it finds.
|
||||
func walk(db *DB, walkFn walkFunc) error {
|
||||
return db.View(func(tx *Tx) error {
|
||||
return tx.ForEach(func(name []byte, b *Bucket) error {
|
||||
return walkBucket(b, nil, name, nil, b.Sequence(), walkFn)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error {
|
||||
// Execute callback.
|
||||
if err := fn(keypath, k, v, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If this is not a bucket then stop.
|
||||
if v != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate over each child key/value.
|
||||
keypath = append(keypath, k)
|
||||
return b.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
bkt := b.Bucket(k)
|
||||
return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn)
|
||||
}
|
||||
return walkBucket(b, keypath, k, v, b.Sequence(), fn)
|
||||
})
|
||||
}
|
194
vendor/go.etcd.io/bbolt/cursor.go
generated
vendored
194
vendor/go.etcd.io/bbolt/cursor.go
generated
vendored
@ -4,9 +4,13 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"go.etcd.io/bbolt/errors"
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket
|
||||
// in lexicographical order.
|
||||
// Cursors see nested buckets with value == nil.
|
||||
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||
//
|
||||
@ -29,11 +33,19 @@ func (c *Cursor) Bucket() *Bucket {
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) First() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
common.Assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.first()
|
||||
if (flags & uint32(common.BucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
func (c *Cursor) first() (key []byte, value []byte, flags uint32) {
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
p, n := c.bucket.pageNode(c.bucket.RootPage())
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
c.first()
|
||||
c.goToFirstElementOnTheStack()
|
||||
|
||||
// If we land on an empty page then move to the next value.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
@ -42,26 +54,36 @@ func (c *Cursor) First() (key []byte, value []byte) {
|
||||
}
|
||||
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
if (flags & uint32(common.BucketLeafFlag)) != 0 {
|
||||
return k, nil, flags
|
||||
}
|
||||
return k, v
|
||||
|
||||
return k, v, flags
|
||||
}
|
||||
|
||||
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
common.Assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
p, n := c.bucket.pageNode(c.bucket.RootPage())
|
||||
ref := elemRef{page: p, node: n}
|
||||
ref.index = ref.count() - 1
|
||||
c.stack = append(c.stack, ref)
|
||||
c.last()
|
||||
|
||||
// If this is an empty page (calling Delete may result in empty pages)
|
||||
// we call prev to find the last page that is not empty
|
||||
for len(c.stack) > 1 && c.stack[len(c.stack)-1].count() == 0 {
|
||||
c.prev()
|
||||
}
|
||||
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
if (flags & uint32(common.BucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
@ -71,9 +93,9 @@ func (c *Cursor) Last() (key []byte, value []byte) {
|
||||
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
common.Assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.next()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
if (flags & uint32(common.BucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
@ -83,38 +105,21 @@ func (c *Cursor) Next() (key []byte, value []byte) {
|
||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Attempt to move back one element until we're successful.
|
||||
// Move up the stack as we hit the beginning of each page in our stack.
|
||||
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index > 0 {
|
||||
elem.index--
|
||||
break
|
||||
}
|
||||
c.stack = c.stack[:i]
|
||||
}
|
||||
|
||||
// If we've hit the end then return nil.
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Move down the stack to find the last element of the last leaf under this branch.
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
common.Assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.prev()
|
||||
if (flags & uint32(common.BucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Seek moves the cursor to a given key and returns it.
|
||||
// Seek moves the cursor to a given key using a b-tree search and returns it.
|
||||
// If the key does not exist then the next key is used. If no keys
|
||||
// follow, a nil key is returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
common.Assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
k, v, flags := c.seek(seek)
|
||||
|
||||
// If we ended up after the last element of a page then move to the next one.
|
||||
@ -124,7 +129,7 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
} else if (flags & uint32(common.BucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
@ -134,15 +139,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
// Delete fails if current key/value is a bucket or if the transaction is not writable.
|
||||
func (c *Cursor) Delete() error {
|
||||
if c.bucket.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
return errors.ErrTxClosed
|
||||
} else if !c.bucket.Writable() {
|
||||
return ErrTxNotWritable
|
||||
return errors.ErrTxNotWritable
|
||||
}
|
||||
|
||||
key, _, flags := c.keyValue()
|
||||
// Return an error if current value is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
if (flags & common.BucketLeafFlag) != 0 {
|
||||
return errors.ErrIncompatibleValue
|
||||
}
|
||||
c.node().del(key)
|
||||
|
||||
@ -152,18 +157,16 @@ func (c *Cursor) Delete() error {
|
||||
// seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used.
|
||||
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Start from root page/node and traverse to correct page.
|
||||
c.stack = c.stack[:0]
|
||||
c.search(seek, c.bucket.root)
|
||||
c.search(seek, c.bucket.RootPage())
|
||||
|
||||
// If this is a bucket then return a nil value.
|
||||
return c.keyValue()
|
||||
}
|
||||
|
||||
// first moves the cursor to the first leaf element under the last page in the stack.
|
||||
func (c *Cursor) first() {
|
||||
func (c *Cursor) goToFirstElementOnTheStack() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
var ref = &c.stack[len(c.stack)-1]
|
||||
@ -172,13 +175,13 @@ func (c *Cursor) first() {
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the first element to the stack.
|
||||
var pgid pgid
|
||||
var pgId common.Pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
pgId = ref.node.inodes[ref.index].Pgid()
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid()
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
p, n := c.bucket.pageNode(pgId)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
}
|
||||
}
|
||||
@ -193,13 +196,13 @@ func (c *Cursor) last() {
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the last element in the stack.
|
||||
var pgid pgid
|
||||
var pgId common.Pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
pgId = ref.node.inodes[ref.index].Pgid()
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid()
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
p, n := c.bucket.pageNode(pgId)
|
||||
|
||||
var nextRef = elemRef{page: p, node: n}
|
||||
nextRef.index = nextRef.count() - 1
|
||||
@ -231,7 +234,7 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||
// Otherwise start from where we left off in the stack and find the
|
||||
// first element of the first leaf page.
|
||||
c.stack = c.stack[:i+1]
|
||||
c.first()
|
||||
c.goToFirstElementOnTheStack()
|
||||
|
||||
// If this is an empty page then restart and move back up the stack.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
@ -243,11 +246,44 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||
}
|
||||
}
|
||||
|
||||
// prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||
func (c *Cursor) prev() (key []byte, value []byte, flags uint32) {
|
||||
// Attempt to move back one element until we're successful.
|
||||
// Move up the stack as we hit the beginning of each page in our stack.
|
||||
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index > 0 {
|
||||
elem.index--
|
||||
break
|
||||
}
|
||||
// If we've hit the beginning, we should stop moving the cursor,
|
||||
// and stay at the first element, so that users can continue to
|
||||
// iterate over the elements in reverse direction by calling `Next`.
|
||||
// We should return nil in such case.
|
||||
// Refer to https://github.com/etcd-io/bbolt/issues/733
|
||||
if len(c.stack) == 1 {
|
||||
c.first()
|
||||
return nil, nil, 0
|
||||
}
|
||||
c.stack = c.stack[:i]
|
||||
}
|
||||
|
||||
// If we've hit the end then return nil.
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Move down the stack to find the last element of the last leaf under this branch.
|
||||
c.last()
|
||||
return c.keyValue()
|
||||
}
|
||||
|
||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||
func (c *Cursor) search(key []byte, pgId common.Pgid) {
|
||||
p, n := c.bucket.pageNode(pgId)
|
||||
if p != nil && !p.IsBranchPage() && !p.IsLeafPage() {
|
||||
panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags()))
|
||||
}
|
||||
e := elemRef{page: p, node: n}
|
||||
c.stack = append(c.stack, e)
|
||||
@ -270,7 +306,7 @@ func (c *Cursor) searchNode(key []byte, n *node) {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(n.inodes[i].key, key)
|
||||
ret := bytes.Compare(n.inodes[i].Key(), key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
@ -282,18 +318,18 @@ func (c *Cursor) searchNode(key []byte, n *node) {
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, n.inodes[index].pgid)
|
||||
c.search(key, n.inodes[index].Pgid())
|
||||
}
|
||||
|
||||
func (c *Cursor) searchPage(key []byte, p *page) {
|
||||
func (c *Cursor) searchPage(key []byte, p *common.Page) {
|
||||
// Binary search for the correct range.
|
||||
inodes := p.branchPageElements()
|
||||
inodes := p.BranchPageElements()
|
||||
|
||||
var exact bool
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
index := sort.Search(int(p.Count()), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(inodes[i].key(), key)
|
||||
ret := bytes.Compare(inodes[i].Key(), key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
@ -305,7 +341,7 @@ func (c *Cursor) searchPage(key []byte, p *page) {
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, inodes[index].pgid)
|
||||
c.search(key, inodes[index].Pgid())
|
||||
}
|
||||
|
||||
// nsearch searches the leaf node on the top of the stack for a key.
|
||||
@ -316,16 +352,16 @@ func (c *Cursor) nsearch(key []byte) {
|
||||
// If we have a node then search its inodes.
|
||||
if n != nil {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
return bytes.Compare(n.inodes[i].key, key) != -1
|
||||
return bytes.Compare(n.inodes[i].Key(), key) != -1
|
||||
})
|
||||
e.index = index
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a page then search its leaf elements.
|
||||
inodes := p.leafPageElements()
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
return bytes.Compare(inodes[i].key(), key) != -1
|
||||
inodes := p.LeafPageElements()
|
||||
index := sort.Search(int(p.Count()), func(i int) bool {
|
||||
return bytes.Compare(inodes[i].Key(), key) != -1
|
||||
})
|
||||
e.index = index
|
||||
}
|
||||
@ -342,17 +378,17 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||
// Retrieve value from node.
|
||||
if ref.node != nil {
|
||||
inode := &ref.node.inodes[ref.index]
|
||||
return inode.key, inode.value, inode.flags
|
||||
return inode.Key(), inode.Value(), inode.Flags()
|
||||
}
|
||||
|
||||
// Or retrieve value from page.
|
||||
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||
return elem.key(), elem.value(), elem.flags
|
||||
elem := ref.page.LeafPageElement(uint16(ref.index))
|
||||
return elem.Key(), elem.Value(), elem.Flags()
|
||||
}
|
||||
|
||||
// node returns the node that the cursor is currently positioned on.
|
||||
func (c *Cursor) node() *node {
|
||||
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||
common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||
|
||||
// If the top of the stack is a leaf node then just return it.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
|
||||
@ -362,19 +398,19 @@ func (c *Cursor) node() *node {
|
||||
// Start from root and traverse down the hierarchy.
|
||||
var n = c.stack[0].node
|
||||
if n == nil {
|
||||
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||
n = c.bucket.node(c.stack[0].page.Id(), nil)
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
common.Assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(ref.index)
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
common.Assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
}
|
||||
|
||||
// elemRef represents a reference to an element on a given page/node.
|
||||
type elemRef struct {
|
||||
page *page
|
||||
page *common.Page
|
||||
node *node
|
||||
index int
|
||||
}
|
||||
@ -384,7 +420,7 @@ func (r *elemRef) isLeaf() bool {
|
||||
if r.node != nil {
|
||||
return r.node.isLeaf
|
||||
}
|
||||
return (r.page.flags & leafPageFlag) != 0
|
||||
return r.page.IsLeafPage()
|
||||
}
|
||||
|
||||
// count returns the number of inodes or page elements.
|
||||
@ -392,5 +428,5 @@ func (r *elemRef) count() int {
|
||||
if r.node != nil {
|
||||
return len(r.node.inodes)
|
||||
}
|
||||
return int(r.page.count)
|
||||
return int(r.page.Count())
|
||||
}
|
||||
|
706
vendor/go.etcd.io/bbolt/db.go
generated
vendored
706
vendor/go.etcd.io/bbolt/db.go
generated
vendored
File diff suppressed because it is too large
Load Diff
8
vendor/go.etcd.io/bbolt/doc.go
generated
vendored
8
vendor/go.etcd.io/bbolt/doc.go
generated
vendored
@ -14,8 +14,7 @@ The design of Bolt is based on Howard Chu's LMDB database project.
|
||||
|
||||
Bolt currently works on Windows, Mac OS X, and Linux.
|
||||
|
||||
|
||||
Basics
|
||||
# Basics
|
||||
|
||||
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
|
||||
a collection of buckets and is represented by a single file on disk. A bucket is
|
||||
@ -27,8 +26,7 @@ iterate over the dataset sequentially. Read-write transactions can create and
|
||||
delete buckets and can insert and remove keys. Only one read-write transaction
|
||||
is allowed at a time.
|
||||
|
||||
|
||||
Caveats
|
||||
# Caveats
|
||||
|
||||
The database uses a read-only, memory-mapped data file to ensure that
|
||||
applications cannot corrupt the database, however, this means that keys and
|
||||
@ -38,7 +36,5 @@ will cause Go to panic.
|
||||
Keys and values retrieved from the database are only valid for the life of
|
||||
the transaction. When used outside the transaction, these byte slices can
|
||||
point to different data or can point to invalid memory which will cause a panic.
|
||||
|
||||
|
||||
*/
|
||||
package bbolt
|
||||
|
79
vendor/go.etcd.io/bbolt/errors.go
generated
vendored
79
vendor/go.etcd.io/bbolt/errors.go
generated
vendored
@ -1,71 +1,108 @@
|
||||
package bbolt
|
||||
|
||||
import "errors"
|
||||
import "go.etcd.io/bbolt/errors"
|
||||
|
||||
// These errors can be returned when opening or calling methods on a DB.
|
||||
var (
|
||||
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||
// is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = errors.New("database not open")
|
||||
|
||||
// ErrDatabaseOpen is returned when opening a database that is
|
||||
// already open.
|
||||
ErrDatabaseOpen = errors.New("database already open")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen
|
||||
|
||||
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||
// This typically occurs when a file is not a bolt database.
|
||||
ErrInvalid = errors.New("invalid database")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrInvalid = errors.ErrInvalid
|
||||
|
||||
// ErrInvalidMapping is returned when the database file fails to get mapped.
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrInvalidMapping = errors.ErrInvalidMapping
|
||||
|
||||
// ErrVersionMismatch is returned when the data file was created with a
|
||||
// different version of Bolt.
|
||||
ErrVersionMismatch = errors.New("version mismatch")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrVersionMismatch = errors.ErrVersionMismatch
|
||||
|
||||
// ErrChecksum is returned when either meta page checksum does not match.
|
||||
ErrChecksum = errors.New("checksum error")
|
||||
// ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages.
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrChecksum = errors.ErrChecksum
|
||||
|
||||
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||
// on the data file after the timeout passed to Open().
|
||||
ErrTimeout = errors.New("timeout")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrTimeout = errors.ErrTimeout
|
||||
)
|
||||
|
||||
// These errors can occur when beginning or committing a Tx.
|
||||
var (
|
||||
// ErrTxNotWritable is returned when performing a write operation on a
|
||||
// read-only transaction.
|
||||
ErrTxNotWritable = errors.New("tx not writable")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrTxNotWritable = errors.ErrTxNotWritable
|
||||
|
||||
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||
// that has already been committed or rolled back.
|
||||
ErrTxClosed = errors.New("tx closed")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrTxClosed = errors.ErrTxClosed
|
||||
|
||||
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||
// read-only database.
|
||||
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly
|
||||
|
||||
// ErrFreePagesNotLoaded is returned when a readonly transaction without
|
||||
// preloading the free pages is trying to access the free pages.
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded
|
||||
)
|
||||
|
||||
// These errors can occur when putting or deleting a value or a bucket.
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrBucketNotFound = errors.ErrBucketNotFound
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||
ErrBucketExists = errors.New("bucket already exists")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrBucketExists = errors.ErrBucketExists
|
||||
|
||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||
ErrBucketNameRequired = errors.New("bucket name required")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrBucketNameRequired = errors.ErrBucketNameRequired
|
||||
|
||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||
ErrKeyRequired = errors.New("key required")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrKeyRequired = errors.ErrKeyRequired
|
||||
|
||||
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||
ErrKeyTooLarge = errors.New("key too large")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrKeyTooLarge = errors.ErrKeyTooLarge
|
||||
|
||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||
ErrValueTooLarge = errors.New("value too large")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrValueTooLarge = errors.ErrValueTooLarge
|
||||
|
||||
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||
// on an existing non-bucket key or when trying to create or delete a
|
||||
// non-bucket key on an existing bucket key.
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
//
|
||||
// Deprecated: Use the error variables defined in the bbolt/errors package.
|
||||
ErrIncompatibleValue = errors.ErrIncompatibleValue
|
||||
)
|
||||
|
84
vendor/go.etcd.io/bbolt/errors/errors.go
generated
vendored
Normal file
84
vendor/go.etcd.io/bbolt/errors/errors.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Package errors defines the error variables that may be returned
|
||||
// during bbolt operations.
|
||||
package errors
|
||||
|
||||
import "errors"
|
||||
|
||||
// These errors can be returned when opening or calling methods on a DB.
|
||||
var (
|
||||
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||
// is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = errors.New("database not open")
|
||||
|
||||
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||
// This typically occurs when a file is not a bolt database.
|
||||
ErrInvalid = errors.New("invalid database")
|
||||
|
||||
// ErrInvalidMapping is returned when the database file fails to get mapped.
|
||||
ErrInvalidMapping = errors.New("database isn't correctly mapped")
|
||||
|
||||
// ErrVersionMismatch is returned when the data file was created with a
|
||||
// different version of Bolt.
|
||||
ErrVersionMismatch = errors.New("version mismatch")
|
||||
|
||||
// ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages.
|
||||
ErrChecksum = errors.New("checksum error")
|
||||
|
||||
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||
// on the data file after the timeout passed to Open().
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
// These errors can occur when beginning or committing a Tx.
|
||||
var (
|
||||
// ErrTxNotWritable is returned when performing a write operation on a
|
||||
// read-only transaction.
|
||||
ErrTxNotWritable = errors.New("tx not writable")
|
||||
|
||||
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||
// that has already been committed or rolled back.
|
||||
ErrTxClosed = errors.New("tx closed")
|
||||
|
||||
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||
// read-only database.
|
||||
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||
|
||||
// ErrFreePagesNotLoaded is returned when a readonly transaction without
|
||||
// preloading the free pages is trying to access the free pages.
|
||||
ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded")
|
||||
)
|
||||
|
||||
// These errors can occur when putting or deleting a value or a bucket.
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||
ErrBucketExists = errors.New("bucket already exists")
|
||||
|
||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||
ErrBucketNameRequired = errors.New("bucket name required")
|
||||
|
||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||
ErrKeyRequired = errors.New("key required")
|
||||
|
||||
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||
ErrKeyTooLarge = errors.New("key too large")
|
||||
|
||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||
ErrValueTooLarge = errors.New("value too large")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying to create or delete a bucket
|
||||
// on an existing non-bucket key or when trying to create or delete a
|
||||
// non-bucket key on an existing bucket key.
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
|
||||
// ErrSameBuckets is returned when trying to move a sub-bucket between
|
||||
// source and target buckets, while source and target buckets are the same.
|
||||
ErrSameBuckets = errors.New("the source and target are the same bucket")
|
||||
|
||||
// ErrDifferentDB is returned when trying to move a sub-bucket between
|
||||
// source and target buckets, while source and target buckets are in different database files.
|
||||
ErrDifferentDB = errors.New("the source and target buckets are in different database files")
|
||||
)
|
404
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
404
vendor/go.etcd.io/bbolt/freelist.go
generated
vendored
@ -1,404 +0,0 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txPending holds a list of pgids and corresponding allocation txns
|
||||
// that are pending to be freed.
|
||||
type txPending struct {
|
||||
ids []pgid
|
||||
alloctx []txid // txids allocating the ids
|
||||
lastReleaseBegin txid // beginning txid of last matching releaseRange
|
||||
}
|
||||
|
||||
// pidSet holds the set of starting pgids which have the same span size
|
||||
type pidSet map[pgid]struct{}
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
freelistType FreelistType // freelist type
|
||||
ids []pgid // all free and available free page ids.
|
||||
allocs map[pgid]txid // mapping of txid that allocated a pgid.
|
||||
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
|
||||
forwardMap map[pgid]uint64 // key is start pgid, value is its span size
|
||||
backwardMap map[pgid]uint64 // key is end pgid, value is its span size
|
||||
allocate func(txid txid, n int) pgid // the freelist allocate func
|
||||
free_count func() int // the function which gives you free page number
|
||||
mergeSpans func(ids pgids) // the mergeSpan func
|
||||
getFreePageIDs func() []pgid // get free pgids func
|
||||
readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist(freelistType FreelistType) *freelist {
|
||||
f := &freelist{
|
||||
freelistType: freelistType,
|
||||
allocs: make(map[pgid]txid),
|
||||
pending: make(map[txid]*txPending),
|
||||
cache: make(map[pgid]bool),
|
||||
freemaps: make(map[uint64]pidSet),
|
||||
forwardMap: make(map[pgid]uint64),
|
||||
backwardMap: make(map[pgid]uint64),
|
||||
}
|
||||
|
||||
if freelistType == FreelistMapType {
|
||||
f.allocate = f.hashmapAllocate
|
||||
f.free_count = f.hashmapFreeCount
|
||||
f.mergeSpans = f.hashmapMergeSpans
|
||||
f.getFreePageIDs = f.hashmapGetFreePageIDs
|
||||
f.readIDs = f.hashmapReadIDs
|
||||
} else {
|
||||
f.allocate = f.arrayAllocate
|
||||
f.free_count = f.arrayFreeCount
|
||||
f.mergeSpans = f.arrayMergeSpans
|
||||
f.getFreePageIDs = f.arrayGetFreePageIDs
|
||||
f.readIDs = f.arrayReadIDs
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// arrayFreeCount returns count of free pages(array version)
|
||||
func (f *freelist) arrayFreeCount() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, txp := range f.pending {
|
||||
count += len(txp.ids)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
m = append(m, txp.ids...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
mergepgids(dst, f.getFreePageIDs(), m)
|
||||
}
|
||||
|
||||
// arrayAllocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) arrayAllocate(txid txid, n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
f.allocs[initial] = txid
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
txp = &txPending{}
|
||||
f.pending[txid] = txp
|
||||
}
|
||||
allocTxid, ok := f.allocs[p.id]
|
||||
if ok {
|
||||
delete(f.allocs, p.id)
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
// Freelist is always allocated by prior tx.
|
||||
allocTxid = txid - 1
|
||||
}
|
||||
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
// Add to the freelist and cache.
|
||||
txp.ids = append(txp.ids, id)
|
||||
txp.alloctx = append(txp.alloctx, allocTxid)
|
||||
f.cache[id] = true
|
||||
}
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, txp := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, txp.ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
f.mergeSpans(m)
|
||||
}
|
||||
|
||||
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
|
||||
func (f *freelist) releaseRange(begin, end txid) {
|
||||
if begin > end {
|
||||
return
|
||||
}
|
||||
var m pgids
|
||||
for tid, txp := range f.pending {
|
||||
if tid < begin || tid > end {
|
||||
continue
|
||||
}
|
||||
// Don't recompute freed pages if ranges haven't updated.
|
||||
if txp.lastReleaseBegin == begin {
|
||||
continue
|
||||
}
|
||||
for i := 0; i < len(txp.ids); i++ {
|
||||
if atx := txp.alloctx[i]; atx < begin || atx > end {
|
||||
continue
|
||||
}
|
||||
m = append(m, txp.ids[i])
|
||||
txp.ids[i] = txp.ids[len(txp.ids)-1]
|
||||
txp.ids = txp.ids[:len(txp.ids)-1]
|
||||
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
|
||||
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
|
||||
i--
|
||||
}
|
||||
txp.lastReleaseBegin = begin
|
||||
if len(txp.ids) == 0 {
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
f.mergeSpans(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
return
|
||||
}
|
||||
var m pgids
|
||||
for i, pgid := range txp.ids {
|
||||
delete(f.cache, pgid)
|
||||
tx := txp.alloctx[i]
|
||||
if tx == 0 {
|
||||
continue
|
||||
}
|
||||
if tx != txid {
|
||||
// Pending free aborted; restore page back to alloc list.
|
||||
f.allocs[pgid] = tx
|
||||
} else {
|
||||
// Freed page was allocated by this txn; OK to throw away.
|
||||
m = append(m, pgid)
|
||||
}
|
||||
}
|
||||
// Remove pages from pending list and mark as free if allocated by txid.
|
||||
delete(f.pending, txid)
|
||||
f.mergeSpans(m)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
if (p.flags & freelistPageFlag) == 0 {
|
||||
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
|
||||
}
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
var idx, count = 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
count = int(c)
|
||||
if count < 0 {
|
||||
panic(fmt.Sprintf("leading element count %d overflows int", c))
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
var ids []pgid
|
||||
data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, count)
|
||||
|
||||
// copy the ids, so we don't modify on the freelist page directly
|
||||
idsCopy := make([]pgid, count)
|
||||
copy(idsCopy, ids)
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(idsCopy))
|
||||
|
||||
f.readIDs(idsCopy)
|
||||
}
|
||||
}
|
||||
|
||||
// arrayReadIDs initializes the freelist from a given list of ids.
|
||||
func (f *freelist) arrayReadIDs(ids []pgid) {
|
||||
f.ids = ids
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
func (f *freelist) arrayGetFreePageIDs() []pgid {
|
||||
return f.ids
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
l := f.count()
|
||||
if l == 0 {
|
||||
p.count = uint16(l)
|
||||
} else if l < 0xFFFF {
|
||||
p.count = uint16(l)
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l)
|
||||
f.copyall(ids)
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
var ids []pgid
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&ids), data, l+1)
|
||||
ids[0] = pgid(l)
|
||||
f.copyall(ids[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.getFreePageIDs() {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
|
||||
f.readIDs(a)
|
||||
}
|
||||
|
||||
// noSyncReload reads the freelist from pgids and filters out pending items.
|
||||
func (f *freelist) noSyncReload(pgids []pgid) {
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range pgids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
|
||||
f.readIDs(a)
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
ids := f.getFreePageIDs()
|
||||
f.cache = make(map[pgid]bool, len(ids))
|
||||
for _, id := range ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array
|
||||
func (f *freelist) arrayMergeSpans(ids pgids) {
|
||||
sort.Sort(ids)
|
||||
f.ids = pgids(f.ids).merge(ids)
|
||||
}
|
178
vendor/go.etcd.io/bbolt/freelist_hmap.go
generated
vendored
178
vendor/go.etcd.io/bbolt/freelist_hmap.go
generated
vendored
@ -1,178 +0,0 @@
|
||||
package bbolt
|
||||
|
||||
import "sort"
|
||||
|
||||
// hashmapFreeCount returns count of free pages(hashmap version)
|
||||
func (f *freelist) hashmapFreeCount() int {
|
||||
// use the forwardmap to get the total count
|
||||
count := 0
|
||||
for _, size := range f.forwardMap {
|
||||
count += int(size)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend
|
||||
func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// if we have a exact size match just return short path
|
||||
if bm, ok := f.freemaps[uint64(n)]; ok {
|
||||
for pid := range bm {
|
||||
// remove the span
|
||||
f.delSpan(pid, uint64(n))
|
||||
|
||||
f.allocs[pid] = txid
|
||||
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, pid+i)
|
||||
}
|
||||
return pid
|
||||
}
|
||||
}
|
||||
|
||||
// lookup the map to find larger span
|
||||
for size, bm := range f.freemaps {
|
||||
if size < uint64(n) {
|
||||
continue
|
||||
}
|
||||
|
||||
for pid := range bm {
|
||||
// remove the initial
|
||||
f.delSpan(pid, uint64(size))
|
||||
|
||||
f.allocs[pid] = txid
|
||||
|
||||
remain := size - uint64(n)
|
||||
|
||||
// add remain span
|
||||
f.addSpan(pid+pgid(n), remain)
|
||||
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, pid+pgid(i))
|
||||
}
|
||||
return pid
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
|
||||
func (f *freelist) hashmapReadIDs(pgids []pgid) {
|
||||
f.init(pgids)
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// hashmapGetFreePageIDs returns the sorted free page ids
|
||||
func (f *freelist) hashmapGetFreePageIDs() []pgid {
|
||||
count := f.free_count()
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
m := make([]pgid, 0, count)
|
||||
for start, size := range f.forwardMap {
|
||||
for i := 0; i < int(size); i++ {
|
||||
m = append(m, start+pgid(i))
|
||||
}
|
||||
}
|
||||
sort.Sort(pgids(m))
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans
|
||||
func (f *freelist) hashmapMergeSpans(ids pgids) {
|
||||
for _, id := range ids {
|
||||
// try to see if we can merge and update
|
||||
f.mergeWithExistingSpan(id)
|
||||
}
|
||||
}
|
||||
|
||||
// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
|
||||
func (f *freelist) mergeWithExistingSpan(pid pgid) {
|
||||
prev := pid - 1
|
||||
next := pid + 1
|
||||
|
||||
preSize, mergeWithPrev := f.backwardMap[prev]
|
||||
nextSize, mergeWithNext := f.forwardMap[next]
|
||||
newStart := pid
|
||||
newSize := uint64(1)
|
||||
|
||||
if mergeWithPrev {
|
||||
//merge with previous span
|
||||
start := prev + 1 - pgid(preSize)
|
||||
f.delSpan(start, preSize)
|
||||
|
||||
newStart -= pgid(preSize)
|
||||
newSize += preSize
|
||||
}
|
||||
|
||||
if mergeWithNext {
|
||||
// merge with next span
|
||||
f.delSpan(next, nextSize)
|
||||
newSize += nextSize
|
||||
}
|
||||
|
||||
f.addSpan(newStart, newSize)
|
||||
}
|
||||
|
||||
func (f *freelist) addSpan(start pgid, size uint64) {
|
||||
f.backwardMap[start-1+pgid(size)] = size
|
||||
f.forwardMap[start] = size
|
||||
if _, ok := f.freemaps[size]; !ok {
|
||||
f.freemaps[size] = make(map[pgid]struct{})
|
||||
}
|
||||
|
||||
f.freemaps[size][start] = struct{}{}
|
||||
}
|
||||
|
||||
func (f *freelist) delSpan(start pgid, size uint64) {
|
||||
delete(f.forwardMap, start)
|
||||
delete(f.backwardMap, start+pgid(size-1))
|
||||
delete(f.freemaps[size], start)
|
||||
if len(f.freemaps[size]) == 0 {
|
||||
delete(f.freemaps, size)
|
||||
}
|
||||
}
|
||||
|
||||
// initial from pgids using when use hashmap version
|
||||
// pgids must be sorted
|
||||
func (f *freelist) init(pgids []pgid) {
|
||||
if len(pgids) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
size := uint64(1)
|
||||
start := pgids[0]
|
||||
|
||||
if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
|
||||
panic("pgids not sorted")
|
||||
}
|
||||
|
||||
f.freemaps = make(map[uint64]pidSet)
|
||||
f.forwardMap = make(map[pgid]uint64)
|
||||
f.backwardMap = make(map[pgid]uint64)
|
||||
|
||||
for i := 1; i < len(pgids); i++ {
|
||||
// continuous page
|
||||
if pgids[i] == pgids[i-1]+1 {
|
||||
size++
|
||||
} else {
|
||||
f.addSpan(start, size)
|
||||
|
||||
size = 1
|
||||
start = pgids[i]
|
||||
}
|
||||
}
|
||||
|
||||
// init the tail
|
||||
if size != 0 && start != 0 {
|
||||
f.addSpan(start, size)
|
||||
}
|
||||
}
|
54
vendor/go.etcd.io/bbolt/internal/common/bucket.go
generated
vendored
Normal file
54
vendor/go.etcd.io/bbolt/internal/common/bucket.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const BucketHeaderSize = int(unsafe.Sizeof(InBucket{}))
|
||||
|
||||
// InBucket represents the on-file representation of a bucket.
|
||||
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||
// then its root page can be stored inline in the "value", after the bucket
|
||||
// header. In the case of inline buckets, the "root" will be 0.
|
||||
type InBucket struct {
|
||||
root Pgid // page id of the bucket's root-level page
|
||||
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||
}
|
||||
|
||||
func NewInBucket(root Pgid, seq uint64) InBucket {
|
||||
return InBucket{
|
||||
root: root,
|
||||
sequence: seq,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *InBucket) RootPage() Pgid {
|
||||
return b.root
|
||||
}
|
||||
|
||||
func (b *InBucket) SetRootPage(id Pgid) {
|
||||
b.root = id
|
||||
}
|
||||
|
||||
// InSequence returns the sequence. The reason why not naming it `Sequence`
|
||||
// is to avoid duplicated name as `(*Bucket) Sequence()`
|
||||
func (b *InBucket) InSequence() uint64 {
|
||||
return b.sequence
|
||||
}
|
||||
|
||||
func (b *InBucket) SetInSequence(v uint64) {
|
||||
b.sequence = v
|
||||
}
|
||||
|
||||
func (b *InBucket) IncSequence() {
|
||||
b.sequence++
|
||||
}
|
||||
|
||||
func (b *InBucket) InlinePage(v []byte) *Page {
|
||||
return (*Page)(unsafe.Pointer(&v[BucketHeaderSize]))
|
||||
}
|
||||
|
||||
func (b *InBucket) String() string {
|
||||
return fmt.Sprintf("<pgid=%d,seq=%d>", b.root, b.sequence)
|
||||
}
|
115
vendor/go.etcd.io/bbolt/internal/common/inode.go
generated
vendored
Normal file
115
vendor/go.etcd.io/bbolt/internal/common/inode.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
package common
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
// to an element which hasn't been added to a page yet.
|
||||
type Inode struct {
|
||||
flags uint32
|
||||
pgid Pgid
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
type Inodes []Inode
|
||||
|
||||
func (in *Inode) Flags() uint32 {
|
||||
return in.flags
|
||||
}
|
||||
|
||||
func (in *Inode) SetFlags(flags uint32) {
|
||||
in.flags = flags
|
||||
}
|
||||
|
||||
func (in *Inode) Pgid() Pgid {
|
||||
return in.pgid
|
||||
}
|
||||
|
||||
func (in *Inode) SetPgid(id Pgid) {
|
||||
in.pgid = id
|
||||
}
|
||||
|
||||
func (in *Inode) Key() []byte {
|
||||
return in.key
|
||||
}
|
||||
|
||||
func (in *Inode) SetKey(key []byte) {
|
||||
in.key = key
|
||||
}
|
||||
|
||||
func (in *Inode) Value() []byte {
|
||||
return in.value
|
||||
}
|
||||
|
||||
func (in *Inode) SetValue(value []byte) {
|
||||
in.value = value
|
||||
}
|
||||
|
||||
func ReadInodeFromPage(p *Page) Inodes {
|
||||
inodes := make(Inodes, int(p.Count()))
|
||||
isLeaf := p.IsLeafPage()
|
||||
for i := 0; i < int(p.Count()); i++ {
|
||||
inode := &inodes[i]
|
||||
if isLeaf {
|
||||
elem := p.LeafPageElement(uint16(i))
|
||||
inode.SetFlags(elem.Flags())
|
||||
inode.SetKey(elem.Key())
|
||||
inode.SetValue(elem.Value())
|
||||
} else {
|
||||
elem := p.BranchPageElement(uint16(i))
|
||||
inode.SetPgid(elem.Pgid())
|
||||
inode.SetKey(elem.Key())
|
||||
}
|
||||
Assert(len(inode.Key()) > 0, "read: zero-length inode key")
|
||||
}
|
||||
|
||||
return inodes
|
||||
}
|
||||
|
||||
func WriteInodeToPage(inodes Inodes, p *Page) uint32 {
|
||||
// Loop over each item and write it to the page.
|
||||
// off tracks the offset into p of the start of the next data.
|
||||
off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
|
||||
isLeaf := p.IsLeafPage()
|
||||
for i, item := range inodes {
|
||||
Assert(len(item.Key()) > 0, "write: zero-length inode key")
|
||||
|
||||
// Create a slice to write into of needed size and advance
|
||||
// byte pointer for next iteration.
|
||||
sz := len(item.Key()) + len(item.Value())
|
||||
b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
|
||||
off += uintptr(sz)
|
||||
|
||||
// Write the page element.
|
||||
if isLeaf {
|
||||
elem := p.LeafPageElement(uint16(i))
|
||||
elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
|
||||
elem.SetFlags(item.Flags())
|
||||
elem.SetKsize(uint32(len(item.Key())))
|
||||
elem.SetVsize(uint32(len(item.Value())))
|
||||
} else {
|
||||
elem := p.BranchPageElement(uint16(i))
|
||||
elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
|
||||
elem.SetKsize(uint32(len(item.Key())))
|
||||
elem.SetPgid(item.Pgid())
|
||||
Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
l := copy(b, item.Key())
|
||||
copy(b[l:], item.Value())
|
||||
}
|
||||
|
||||
return uint32(off)
|
||||
}
|
||||
|
||||
func UsedSpaceInPage(inodes Inodes, p *Page) uint32 {
|
||||
off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
|
||||
for _, item := range inodes {
|
||||
sz := len(item.Key()) + len(item.Value())
|
||||
off += uintptr(sz)
|
||||
}
|
||||
|
||||
return uint32(off)
|
||||
}
|
161
vendor/go.etcd.io/bbolt/internal/common/meta.go
generated
vendored
Normal file
161
vendor/go.etcd.io/bbolt/internal/common/meta.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"unsafe"
|
||||
|
||||
"go.etcd.io/bbolt/errors"
|
||||
)
|
||||
|
||||
type Meta struct {
|
||||
magic uint32
|
||||
version uint32
|
||||
pageSize uint32
|
||||
flags uint32
|
||||
root InBucket
|
||||
freelist Pgid
|
||||
pgid Pgid
|
||||
txid Txid
|
||||
checksum uint64
|
||||
}
|
||||
|
||||
// Validate checks the marker bytes and version of the meta page to ensure it matches this binary.
|
||||
func (m *Meta) Validate() error {
|
||||
if m.magic != Magic {
|
||||
return errors.ErrInvalid
|
||||
} else if m.version != Version {
|
||||
return errors.ErrVersionMismatch
|
||||
} else if m.checksum != m.Sum64() {
|
||||
return errors.ErrChecksum
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies one meta object to another.
|
||||
func (m *Meta) Copy(dest *Meta) {
|
||||
*dest = *m
|
||||
}
|
||||
|
||||
// Write writes the meta onto a page.
|
||||
func (m *Meta) Write(p *Page) {
|
||||
if m.root.root >= m.pgid {
|
||||
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
||||
} else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist {
|
||||
// TODO: reject pgidNoFreeList if !NoFreelistSync
|
||||
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
||||
}
|
||||
|
||||
// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
|
||||
p.id = Pgid(m.txid % 2)
|
||||
p.SetFlags(MetaPageFlag)
|
||||
|
||||
// Calculate the checksum.
|
||||
m.checksum = m.Sum64()
|
||||
|
||||
m.Copy(p.Meta())
|
||||
}
|
||||
|
||||
// Sum64 generates the checksum for the meta.
|
||||
func (m *Meta) Sum64() uint64 {
|
||||
var h = fnv.New64a()
|
||||
_, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
|
||||
return h.Sum64()
|
||||
}
|
||||
|
||||
func (m *Meta) Magic() uint32 {
|
||||
return m.magic
|
||||
}
|
||||
|
||||
func (m *Meta) SetMagic(v uint32) {
|
||||
m.magic = v
|
||||
}
|
||||
|
||||
func (m *Meta) Version() uint32 {
|
||||
return m.version
|
||||
}
|
||||
|
||||
func (m *Meta) SetVersion(v uint32) {
|
||||
m.version = v
|
||||
}
|
||||
|
||||
func (m *Meta) PageSize() uint32 {
|
||||
return m.pageSize
|
||||
}
|
||||
|
||||
func (m *Meta) SetPageSize(v uint32) {
|
||||
m.pageSize = v
|
||||
}
|
||||
|
||||
func (m *Meta) Flags() uint32 {
|
||||
return m.flags
|
||||
}
|
||||
|
||||
func (m *Meta) SetFlags(v uint32) {
|
||||
m.flags = v
|
||||
}
|
||||
|
||||
func (m *Meta) SetRootBucket(b InBucket) {
|
||||
m.root = b
|
||||
}
|
||||
|
||||
func (m *Meta) RootBucket() *InBucket {
|
||||
return &m.root
|
||||
}
|
||||
|
||||
func (m *Meta) Freelist() Pgid {
|
||||
return m.freelist
|
||||
}
|
||||
|
||||
func (m *Meta) SetFreelist(v Pgid) {
|
||||
m.freelist = v
|
||||
}
|
||||
|
||||
func (m *Meta) IsFreelistPersisted() bool {
|
||||
return m.freelist != PgidNoFreelist
|
||||
}
|
||||
|
||||
func (m *Meta) Pgid() Pgid {
|
||||
return m.pgid
|
||||
}
|
||||
|
||||
func (m *Meta) SetPgid(id Pgid) {
|
||||
m.pgid = id
|
||||
}
|
||||
|
||||
func (m *Meta) Txid() Txid {
|
||||
return m.txid
|
||||
}
|
||||
|
||||
func (m *Meta) SetTxid(id Txid) {
|
||||
m.txid = id
|
||||
}
|
||||
|
||||
func (m *Meta) IncTxid() {
|
||||
m.txid += 1
|
||||
}
|
||||
|
||||
func (m *Meta) DecTxid() {
|
||||
m.txid -= 1
|
||||
}
|
||||
|
||||
func (m *Meta) Checksum() uint64 {
|
||||
return m.checksum
|
||||
}
|
||||
|
||||
func (m *Meta) SetChecksum(v uint64) {
|
||||
m.checksum = v
|
||||
}
|
||||
|
||||
func (m *Meta) Print(w io.Writer) {
|
||||
fmt.Fprintf(w, "Version: %d\n", m.version)
|
||||
fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize)
|
||||
fmt.Fprintf(w, "Flags: %08x\n", m.flags)
|
||||
fmt.Fprintf(w, "Root: <pgid=%d>\n", m.root.root)
|
||||
fmt.Fprintf(w, "Freelist: <pgid=%d>\n", m.freelist)
|
||||
fmt.Fprintf(w, "HWM: <pgid=%d>\n", m.pgid)
|
||||
fmt.Fprintf(w, "Txn ID: %d\n", m.txid)
|
||||
fmt.Fprintf(w, "Checksum: %016x\n", m.checksum)
|
||||
fmt.Fprintf(w, "\n")
|
||||
}
|
391
vendor/go.etcd.io/bbolt/internal/common/page.go
generated
vendored
Normal file
391
vendor/go.etcd.io/bbolt/internal/common/page.go
generated
vendored
Normal file
@ -0,0 +1,391 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const PageHeaderSize = unsafe.Sizeof(Page{})
|
||||
|
||||
const MinKeysPerPage = 2
|
||||
|
||||
const BranchPageElementSize = unsafe.Sizeof(branchPageElement{})
|
||||
const LeafPageElementSize = unsafe.Sizeof(leafPageElement{})
|
||||
const pgidSize = unsafe.Sizeof(Pgid(0))
|
||||
|
||||
const (
|
||||
BranchPageFlag = 0x01
|
||||
LeafPageFlag = 0x02
|
||||
MetaPageFlag = 0x04
|
||||
FreelistPageFlag = 0x10
|
||||
)
|
||||
|
||||
const (
|
||||
BucketLeafFlag = 0x01
|
||||
)
|
||||
|
||||
type Pgid uint64
|
||||
|
||||
type Page struct {
|
||||
id Pgid
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
}
|
||||
|
||||
func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page {
|
||||
return &Page{
|
||||
id: id,
|
||||
flags: flags,
|
||||
count: count,
|
||||
overflow: overflow,
|
||||
}
|
||||
}
|
||||
|
||||
// Typ returns a human-readable page type string used for debugging.
|
||||
func (p *Page) Typ() string {
|
||||
if p.IsBranchPage() {
|
||||
return "branch"
|
||||
} else if p.IsLeafPage() {
|
||||
return "leaf"
|
||||
} else if p.IsMetaPage() {
|
||||
return "meta"
|
||||
} else if p.IsFreelistPage() {
|
||||
return "freelist"
|
||||
}
|
||||
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||
}
|
||||
|
||||
func (p *Page) IsBranchPage() bool {
|
||||
return p.flags == BranchPageFlag
|
||||
}
|
||||
|
||||
func (p *Page) IsLeafPage() bool {
|
||||
return p.flags == LeafPageFlag
|
||||
}
|
||||
|
||||
func (p *Page) IsMetaPage() bool {
|
||||
return p.flags == MetaPageFlag
|
||||
}
|
||||
|
||||
func (p *Page) IsFreelistPage() bool {
|
||||
return p.flags == FreelistPageFlag
|
||||
}
|
||||
|
||||
// Meta returns a pointer to the metadata section of the page.
|
||||
func (p *Page) Meta() *Meta {
|
||||
return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
}
|
||||
|
||||
func (p *Page) FastCheck(id Pgid) {
|
||||
Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id)
|
||||
// Only one flag of page-type can be set.
|
||||
Assert(p.IsBranchPage() ||
|
||||
p.IsLeafPage() ||
|
||||
p.IsMetaPage() ||
|
||||
p.IsFreelistPage(),
|
||||
"page %v: has unexpected type/flags: %x", p.id, p.flags)
|
||||
}
|
||||
|
||||
// LeafPageElement retrieves the leaf node by index
|
||||
func (p *Page) LeafPageElement(index uint16) *leafPageElement {
|
||||
return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
LeafPageElementSize, int(index)))
|
||||
}
|
||||
|
||||
// LeafPageElements retrieves a list of leaf nodes.
|
||||
func (p *Page) LeafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
elems := unsafe.Slice((*leafPageElement)(data), int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// BranchPageElement retrieves the branch node by index
|
||||
func (p *Page) BranchPageElement(index uint16) *branchPageElement {
|
||||
return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
unsafe.Sizeof(branchPageElement{}), int(index)))
|
||||
}
|
||||
|
||||
// BranchPageElements retrieves a list of branch nodes.
|
||||
func (p *Page) BranchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
elems := unsafe.Slice((*branchPageElement)(data), int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
func (p *Page) FreelistPageCount() (int, int) {
|
||||
Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags))
|
||||
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
var idx, count = 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
count = int(c)
|
||||
if count < 0 {
|
||||
panic(fmt.Sprintf("leading element count %d overflows int", c))
|
||||
}
|
||||
}
|
||||
|
||||
return idx, count
|
||||
}
|
||||
|
||||
func (p *Page) FreelistPageIds() []Pgid {
|
||||
Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags))
|
||||
|
||||
idx, count := p.FreelistPageCount()
|
||||
|
||||
if count == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx)
|
||||
ids := unsafe.Slice((*Pgid)(data), count)
|
||||
|
||||
return ids
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *Page) hexdump(n int) {
|
||||
buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
func (p *Page) PageElementSize() uintptr {
|
||||
if p.IsLeafPage() {
|
||||
return LeafPageElementSize
|
||||
}
|
||||
return BranchPageElementSize
|
||||
}
|
||||
|
||||
func (p *Page) Id() Pgid {
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *Page) SetId(target Pgid) {
|
||||
p.id = target
|
||||
}
|
||||
|
||||
func (p *Page) Flags() uint16 {
|
||||
return p.flags
|
||||
}
|
||||
|
||||
func (p *Page) SetFlags(v uint16) {
|
||||
p.flags = v
|
||||
}
|
||||
|
||||
func (p *Page) Count() uint16 {
|
||||
return p.count
|
||||
}
|
||||
|
||||
func (p *Page) SetCount(target uint16) {
|
||||
p.count = target
|
||||
}
|
||||
|
||||
func (p *Page) Overflow() uint32 {
|
||||
return p.overflow
|
||||
}
|
||||
|
||||
func (p *Page) SetOverflow(target uint32) {
|
||||
p.overflow = target
|
||||
}
|
||||
|
||||
func (p *Page) String() string {
|
||||
return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow)
|
||||
}
|
||||
|
||||
type Pages []*Page
|
||||
|
||||
func (s Pages) Len() int { return len(s) }
|
||||
func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||
|
||||
// branchPageElement represents a node on a branch page.
|
||||
type branchPageElement struct {
|
||||
pos uint32
|
||||
ksize uint32
|
||||
pgid Pgid
|
||||
}
|
||||
|
||||
func (n *branchPageElement) Pos() uint32 {
|
||||
return n.pos
|
||||
}
|
||||
|
||||
func (n *branchPageElement) SetPos(v uint32) {
|
||||
n.pos = v
|
||||
}
|
||||
|
||||
func (n *branchPageElement) Ksize() uint32 {
|
||||
return n.ksize
|
||||
}
|
||||
|
||||
func (n *branchPageElement) SetKsize(v uint32) {
|
||||
n.ksize = v
|
||||
}
|
||||
|
||||
func (n *branchPageElement) Pgid() Pgid {
|
||||
return n.pgid
|
||||
}
|
||||
|
||||
func (n *branchPageElement) SetPgid(v Pgid) {
|
||||
n.pgid = v
|
||||
}
|
||||
|
||||
// Key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) Key() []byte {
|
||||
return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
type leafPageElement struct {
|
||||
flags uint32
|
||||
pos uint32
|
||||
ksize uint32
|
||||
vsize uint32
|
||||
}
|
||||
|
||||
func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement {
|
||||
return &leafPageElement{
|
||||
flags: flags,
|
||||
pos: pos,
|
||||
ksize: ksize,
|
||||
vsize: vsize,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *leafPageElement) Flags() uint32 {
|
||||
return n.flags
|
||||
}
|
||||
|
||||
func (n *leafPageElement) SetFlags(v uint32) {
|
||||
n.flags = v
|
||||
}
|
||||
|
||||
func (n *leafPageElement) Pos() uint32 {
|
||||
return n.pos
|
||||
}
|
||||
|
||||
func (n *leafPageElement) SetPos(v uint32) {
|
||||
n.pos = v
|
||||
}
|
||||
|
||||
func (n *leafPageElement) Ksize() uint32 {
|
||||
return n.ksize
|
||||
}
|
||||
|
||||
func (n *leafPageElement) SetKsize(v uint32) {
|
||||
n.ksize = v
|
||||
}
|
||||
|
||||
func (n *leafPageElement) Vsize() uint32 {
|
||||
return n.vsize
|
||||
}
|
||||
|
||||
func (n *leafPageElement) SetVsize(v uint32) {
|
||||
n.vsize = v
|
||||
}
|
||||
|
||||
// Key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) Key() []byte {
|
||||
i := int(n.pos)
|
||||
j := i + int(n.ksize)
|
||||
return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// Value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) Value() []byte {
|
||||
i := int(n.pos) + int(n.ksize)
|
||||
j := i + int(n.vsize)
|
||||
return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
func (n *leafPageElement) IsBucketEntry() bool {
|
||||
return n.flags&uint32(BucketLeafFlag) != 0
|
||||
}
|
||||
|
||||
func (n *leafPageElement) Bucket() *InBucket {
|
||||
if n.IsBucketEntry() {
|
||||
return LoadBucket(n.Value())
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
type PageInfo struct {
|
||||
ID int
|
||||
Type string
|
||||
Count int
|
||||
OverflowCount int
|
||||
}
|
||||
|
||||
type Pgids []Pgid
|
||||
|
||||
func (s Pgids) Len() int { return len(s) }
|
||||
func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s Pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// Merge returns the sorted union of a and b.
|
||||
func (a Pgids) Merge(b Pgids) Pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
merged := make(Pgids, len(a)+len(b))
|
||||
Mergepgids(merged, a, b)
|
||||
return merged
|
||||
}
|
||||
|
||||
// Mergepgids copies the sorted union of a and b into dst.
|
||||
// If dst is too small, it panics.
|
||||
func Mergepgids(dst, a, b Pgids) {
|
||||
if len(dst) < len(a)+len(b) {
|
||||
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||
}
|
||||
// Copy in the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
copy(dst, b)
|
||||
return
|
||||
}
|
||||
if len(b) == 0 {
|
||||
copy(dst, a)
|
||||
return
|
||||
}
|
||||
|
||||
// Merged will hold all elements from both lists.
|
||||
merged := dst[:0]
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
if b[0] < a[0] {
|
||||
lead, follow = b, a
|
||||
}
|
||||
|
||||
// Continue while there are elements in the lead.
|
||||
for len(lead) > 0 {
|
||||
// Merge largest prefix of lead that is ahead of follow[0].
|
||||
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||
merged = append(merged, lead[:n]...)
|
||||
if n >= len(lead) {
|
||||
break
|
||||
}
|
||||
|
||||
// Swap lead and follow.
|
||||
lead, follow = follow, lead[n:]
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
_ = append(merged, follow...)
|
||||
}
|
40
vendor/go.etcd.io/bbolt/internal/common/types.go
generated
vendored
Normal file
40
vendor/go.etcd.io/bbolt/internal/common/types.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MaxMmapStep is the largest step that can be taken when remapping the mmap.
|
||||
const MaxMmapStep = 1 << 30 // 1GB
|
||||
|
||||
// Version represents the data file format version.
|
||||
const Version uint32 = 2
|
||||
|
||||
// Magic represents a marker value to indicate that a file is a Bolt DB.
|
||||
const Magic uint32 = 0xED0CDAED
|
||||
|
||||
const PgidNoFreelist Pgid = 0xffffffffffffffff
|
||||
|
||||
// DO NOT EDIT. Copied from the "bolt" package.
|
||||
const pageMaxAllocSize = 0xFFFFFFF
|
||||
|
||||
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
|
||||
// syncing changes to a file. This is required as some operating systems,
|
||||
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
|
||||
// must be synchronized using the msync(2) syscall.
|
||||
const IgnoreNoSync = runtime.GOOS == "openbsd"
|
||||
|
||||
// Default values if not set in a DB instance.
|
||||
const (
|
||||
DefaultMaxBatchSize int = 1000
|
||||
DefaultMaxBatchDelay = 10 * time.Millisecond
|
||||
DefaultAllocSize = 16 * 1024 * 1024
|
||||
)
|
||||
|
||||
// DefaultPageSize is the default page size for db which is set to the OS page size.
|
||||
var DefaultPageSize = os.Getpagesize()
|
||||
|
||||
// Txid represents the internal transaction identifier.
|
||||
type Txid uint64
|
22
vendor/go.etcd.io/bbolt/unsafe.go → vendor/go.etcd.io/bbolt/internal/common/unsafe.go
generated
vendored
22
vendor/go.etcd.io/bbolt/unsafe.go → vendor/go.etcd.io/bbolt/internal/common/unsafe.go
generated
vendored
@ -1,19 +1,18 @@
|
||||
package bbolt
|
||||
package common
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
|
||||
func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset)
|
||||
}
|
||||
|
||||
func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
|
||||
func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
|
||||
}
|
||||
|
||||
func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
|
||||
func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
|
||||
// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
|
||||
//
|
||||
// This memory is not allocated from C, but it is unmanaged by Go's
|
||||
@ -24,16 +23,5 @@ func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
|
||||
// index 0. However, the wiki never says that the address must be to
|
||||
// the beginning of a C allocation (or even that malloc was used at
|
||||
// all), so this is believed to be correct.
|
||||
return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
|
||||
}
|
||||
|
||||
// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
|
||||
// the slice parameter. This helper should be used over other direct
|
||||
// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
|
||||
// from reflect.SliceHeader to a Go slice type.
|
||||
func unsafeSlice(slice, data unsafe.Pointer, len int) {
|
||||
s := (*reflect.SliceHeader)(slice)
|
||||
s.Data = uintptr(data)
|
||||
s.Cap = len
|
||||
s.Len = len
|
||||
return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j]
|
||||
}
|
64
vendor/go.etcd.io/bbolt/internal/common/utils.go
generated
vendored
Normal file
64
vendor/go.etcd.io/bbolt/internal/common/utils.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func LoadBucket(buf []byte) *InBucket {
|
||||
return (*InBucket)(unsafe.Pointer(&buf[0]))
|
||||
}
|
||||
|
||||
func LoadPage(buf []byte) *Page {
|
||||
return (*Page)(unsafe.Pointer(&buf[0]))
|
||||
}
|
||||
|
||||
func LoadPageMeta(buf []byte) *Meta {
|
||||
return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize]))
|
||||
}
|
||||
|
||||
func CopyFile(srcPath, dstPath string) error {
|
||||
// Ensure source file exists.
|
||||
_, err := os.Stat(srcPath)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("source file %q not found", srcPath)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure output file not exist.
|
||||
_, err = os.Stat(dstPath)
|
||||
if err == nil {
|
||||
return fmt.Errorf("output file %q already exists", dstPath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
srcDB, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file %q: %w", srcPath, err)
|
||||
}
|
||||
defer srcDB.Close()
|
||||
dstDB, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file %q: %w", dstPath, err)
|
||||
}
|
||||
defer dstDB.Close()
|
||||
written, err := io.Copy(dstDB, srcDB)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err)
|
||||
}
|
||||
|
||||
srcFi, err := srcDB.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get source file info %q: %w", srcPath, err)
|
||||
}
|
||||
initialSize := srcFi.Size()
|
||||
if initialSize != written {
|
||||
return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
67
vendor/go.etcd.io/bbolt/internal/common/verify.go
generated
vendored
Normal file
67
vendor/go.etcd.io/bbolt/internal/common/verify.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const ENV_VERIFY = "BBOLT_VERIFY"
|
||||
|
||||
type VerificationType string
|
||||
|
||||
const (
|
||||
ENV_VERIFY_VALUE_ALL VerificationType = "all"
|
||||
ENV_VERIFY_VALUE_ASSERT VerificationType = "assert"
|
||||
)
|
||||
|
||||
func getEnvVerify() string {
|
||||
return strings.ToLower(os.Getenv(ENV_VERIFY))
|
||||
}
|
||||
|
||||
func IsVerificationEnabled(verification VerificationType) bool {
|
||||
env := getEnvVerify()
|
||||
return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification))
|
||||
}
|
||||
|
||||
// EnableVerifications sets `ENV_VERIFY` and returns a function that
|
||||
// can be used to bring the original settings.
|
||||
func EnableVerifications(verification VerificationType) func() {
|
||||
previousEnv := getEnvVerify()
|
||||
os.Setenv(ENV_VERIFY, string(verification))
|
||||
return func() {
|
||||
os.Setenv(ENV_VERIFY, previousEnv)
|
||||
}
|
||||
}
|
||||
|
||||
// EnableAllVerifications enables verification and returns a function
|
||||
// that can be used to bring the original settings.
|
||||
func EnableAllVerifications() func() {
|
||||
return EnableVerifications(ENV_VERIFY_VALUE_ALL)
|
||||
}
|
||||
|
||||
// DisableVerifications unsets `ENV_VERIFY` and returns a function that
|
||||
// can be used to bring the original settings.
|
||||
func DisableVerifications() func() {
|
||||
previousEnv := getEnvVerify()
|
||||
os.Unsetenv(ENV_VERIFY)
|
||||
return func() {
|
||||
os.Setenv(ENV_VERIFY, previousEnv)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify performs verification if the assertions are enabled.
|
||||
// In the default setup running in tests and skipped in the production code.
|
||||
func Verify(f func()) {
|
||||
if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
// Assert will panic with a given formatted message if the given condition is false.
|
||||
func Assert(condition bool, msg string, v ...any) {
|
||||
if !condition {
|
||||
panic(fmt.Sprintf("assertion failed: "+msg, v...))
|
||||
}
|
||||
}
|
108
vendor/go.etcd.io/bbolt/internal/freelist/array.go
generated
vendored
Normal file
108
vendor/go.etcd.io/bbolt/internal/freelist/array.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package freelist
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
type array struct {
|
||||
*shared
|
||||
|
||||
ids []common.Pgid // all free and available free page ids.
|
||||
}
|
||||
|
||||
func (f *array) Init(ids common.Pgids) {
|
||||
f.ids = ids
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
func (f *array) Allocate(txid common.Txid, n int) common.Pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd common.Pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == common.Pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := common.Pgid(0); i < common.Pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
f.allocs[initial] = txid
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *array) FreeCount() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
func (f *array) freePageIds() common.Pgids {
|
||||
return f.ids
|
||||
}
|
||||
|
||||
func (f *array) mergeSpans(ids common.Pgids) {
|
||||
sort.Sort(ids)
|
||||
common.Verify(func() {
|
||||
idsIdx := make(map[common.Pgid]struct{})
|
||||
for _, id := range f.ids {
|
||||
// The existing f.ids shouldn't have duplicated free ID.
|
||||
if _, ok := idsIdx[id]; ok {
|
||||
panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids))
|
||||
}
|
||||
idsIdx[id] = struct{}{}
|
||||
}
|
||||
|
||||
prev := common.Pgid(0)
|
||||
for _, id := range ids {
|
||||
// The ids shouldn't have duplicated free ID. Note page 0 and 1
|
||||
// are reserved for meta pages, so they can never be free page IDs.
|
||||
if prev == id {
|
||||
panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
|
||||
}
|
||||
prev = id
|
||||
|
||||
// The ids shouldn't have any overlap with the existing f.ids.
|
||||
if _, ok := idsIdx[id]; ok {
|
||||
panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids))
|
||||
}
|
||||
}
|
||||
})
|
||||
f.ids = common.Pgids(f.ids).Merge(ids)
|
||||
}
|
||||
|
||||
func NewArrayFreelist() Interface {
|
||||
a := &array{
|
||||
shared: newShared(),
|
||||
ids: []common.Pgid{},
|
||||
}
|
||||
a.Interface = a
|
||||
return a
|
||||
}
|
82
vendor/go.etcd.io/bbolt/internal/freelist/freelist.go
generated
vendored
Normal file
82
vendor/go.etcd.io/bbolt/internal/freelist/freelist.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package freelist
|
||||
|
||||
import (
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
type ReadWriter interface {
|
||||
// Read calls Init with the page ids stored in the given page.
|
||||
Read(page *common.Page)
|
||||
|
||||
// Write writes the freelist into the given page.
|
||||
Write(page *common.Page)
|
||||
|
||||
// EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write.
|
||||
// This should never underestimate the size.
|
||||
EstimatedWritePageSize() int
|
||||
}
|
||||
|
||||
type Interface interface {
|
||||
ReadWriter
|
||||
|
||||
// Init initializes this freelist with the given list of pages.
|
||||
Init(ids common.Pgids)
|
||||
|
||||
// Allocate tries to allocate the given number of contiguous pages
|
||||
// from the free list pages. It returns the starting page ID if
|
||||
// available; otherwise, it returns 0.
|
||||
Allocate(txid common.Txid, numPages int) common.Pgid
|
||||
|
||||
// Count returns the number of free and pending pages.
|
||||
Count() int
|
||||
|
||||
// FreeCount returns the number of free pages.
|
||||
FreeCount() int
|
||||
|
||||
// PendingCount returns the number of pending pages.
|
||||
PendingCount() int
|
||||
|
||||
// AddReadonlyTXID adds a given read-only transaction id for pending page tracking.
|
||||
AddReadonlyTXID(txid common.Txid)
|
||||
|
||||
// RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking.
|
||||
RemoveReadonlyTXID(txid common.Txid)
|
||||
|
||||
// ReleasePendingPages releases any pages associated with closed read-only transactions.
|
||||
ReleasePendingPages()
|
||||
|
||||
// Free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free or is one of the meta pages, then a panic will occur.
|
||||
Free(txId common.Txid, p *common.Page)
|
||||
|
||||
// Freed returns whether a given page is in the free list.
|
||||
Freed(pgId common.Pgid) bool
|
||||
|
||||
// Rollback removes the pages from a given pending tx.
|
||||
Rollback(txId common.Txid)
|
||||
|
||||
// Copyall copies a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
Copyall(dst []common.Pgid)
|
||||
|
||||
// Reload reads the freelist from a page and filters out pending items.
|
||||
Reload(p *common.Page)
|
||||
|
||||
// NoSyncReload reads the freelist from Pgids and filters out pending items.
|
||||
NoSyncReload(pgIds common.Pgids)
|
||||
|
||||
// freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available.
|
||||
freePageIds() common.Pgids
|
||||
|
||||
// pendingPageIds returns all pending pages by transaction id.
|
||||
pendingPageIds() map[common.Txid]*txPending
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
release(txId common.Txid)
|
||||
|
||||
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
|
||||
releaseRange(begin, end common.Txid)
|
||||
|
||||
// mergeSpans is merging the given pages into the freelist
|
||||
mergeSpans(ids common.Pgids)
|
||||
}
|
292
vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go
generated
vendored
Normal file
292
vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go
generated
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
package freelist
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
// pidSet holds the set of starting pgids which have the same span size
|
||||
type pidSet map[common.Pgid]struct{}
|
||||
|
||||
type hashMap struct {
|
||||
*shared
|
||||
|
||||
freePagesCount uint64 // count of free pages(hashmap version)
|
||||
freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
|
||||
forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size
|
||||
backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size
|
||||
}
|
||||
|
||||
func (f *hashMap) Init(pgids common.Pgids) {
|
||||
// reset the counter when freelist init
|
||||
f.freePagesCount = 0
|
||||
f.freemaps = make(map[uint64]pidSet)
|
||||
f.forwardMap = make(map[common.Pgid]uint64)
|
||||
f.backwardMap = make(map[common.Pgid]uint64)
|
||||
|
||||
if len(pgids) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
|
||||
panic("pgids not sorted")
|
||||
}
|
||||
|
||||
size := uint64(1)
|
||||
start := pgids[0]
|
||||
|
||||
for i := 1; i < len(pgids); i++ {
|
||||
// continuous page
|
||||
if pgids[i] == pgids[i-1]+1 {
|
||||
size++
|
||||
} else {
|
||||
f.addSpan(start, size)
|
||||
|
||||
size = 1
|
||||
start = pgids[i]
|
||||
}
|
||||
}
|
||||
|
||||
// init the tail
|
||||
if size != 0 && start != 0 {
|
||||
f.addSpan(start, size)
|
||||
}
|
||||
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid {
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// if we have a exact size match just return short path
|
||||
if bm, ok := f.freemaps[uint64(n)]; ok {
|
||||
for pid := range bm {
|
||||
// remove the span
|
||||
f.delSpan(pid, uint64(n))
|
||||
|
||||
f.allocs[pid] = txid
|
||||
|
||||
for i := common.Pgid(0); i < common.Pgid(n); i++ {
|
||||
delete(f.cache, pid+i)
|
||||
}
|
||||
return pid
|
||||
}
|
||||
}
|
||||
|
||||
// lookup the map to find larger span
|
||||
for size, bm := range f.freemaps {
|
||||
if size < uint64(n) {
|
||||
continue
|
||||
}
|
||||
|
||||
for pid := range bm {
|
||||
// remove the initial
|
||||
f.delSpan(pid, size)
|
||||
|
||||
f.allocs[pid] = txid
|
||||
|
||||
remain := size - uint64(n)
|
||||
|
||||
// add remain span
|
||||
f.addSpan(pid+common.Pgid(n), remain)
|
||||
|
||||
for i := common.Pgid(0); i < common.Pgid(n); i++ {
|
||||
delete(f.cache, pid+i)
|
||||
}
|
||||
return pid
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *hashMap) FreeCount() int {
|
||||
common.Verify(func() {
|
||||
expectedFreePageCount := f.hashmapFreeCountSlow()
|
||||
common.Assert(int(f.freePagesCount) == expectedFreePageCount,
|
||||
"freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount)
|
||||
})
|
||||
return int(f.freePagesCount)
|
||||
}
|
||||
|
||||
func (f *hashMap) freePageIds() common.Pgids {
|
||||
count := f.FreeCount()
|
||||
if count == 0 {
|
||||
return common.Pgids{}
|
||||
}
|
||||
|
||||
m := make([]common.Pgid, 0, count)
|
||||
|
||||
startPageIds := make([]common.Pgid, 0, len(f.forwardMap))
|
||||
for k := range f.forwardMap {
|
||||
startPageIds = append(startPageIds, k)
|
||||
}
|
||||
sort.Sort(common.Pgids(startPageIds))
|
||||
|
||||
for _, start := range startPageIds {
|
||||
if size, ok := f.forwardMap[start]; ok {
|
||||
for i := 0; i < int(size); i++ {
|
||||
m = append(m, start+common.Pgid(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (f *hashMap) hashmapFreeCountSlow() int {
|
||||
count := 0
|
||||
for _, size := range f.forwardMap {
|
||||
count += int(size)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (f *hashMap) addSpan(start common.Pgid, size uint64) {
|
||||
f.backwardMap[start-1+common.Pgid(size)] = size
|
||||
f.forwardMap[start] = size
|
||||
if _, ok := f.freemaps[size]; !ok {
|
||||
f.freemaps[size] = make(map[common.Pgid]struct{})
|
||||
}
|
||||
|
||||
f.freemaps[size][start] = struct{}{}
|
||||
f.freePagesCount += size
|
||||
}
|
||||
|
||||
func (f *hashMap) delSpan(start common.Pgid, size uint64) {
|
||||
delete(f.forwardMap, start)
|
||||
delete(f.backwardMap, start+common.Pgid(size-1))
|
||||
delete(f.freemaps[size], start)
|
||||
if len(f.freemaps[size]) == 0 {
|
||||
delete(f.freemaps, size)
|
||||
}
|
||||
f.freePagesCount -= size
|
||||
}
|
||||
|
||||
func (f *hashMap) mergeSpans(ids common.Pgids) {
|
||||
common.Verify(func() {
|
||||
ids1Freemap := f.idsFromFreemaps()
|
||||
ids2Forward := f.idsFromForwardMap()
|
||||
ids3Backward := f.idsFromBackwardMap()
|
||||
|
||||
if !reflect.DeepEqual(ids1Freemap, ids2Forward) {
|
||||
panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap))
|
||||
}
|
||||
if !reflect.DeepEqual(ids1Freemap, ids3Backward) {
|
||||
panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap))
|
||||
}
|
||||
|
||||
sort.Sort(ids)
|
||||
prev := common.Pgid(0)
|
||||
for _, id := range ids {
|
||||
// The ids shouldn't have duplicated free ID.
|
||||
if prev == id {
|
||||
panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
|
||||
}
|
||||
prev = id
|
||||
|
||||
// The ids shouldn't have any overlap with the existing f.freemaps.
|
||||
if _, ok := ids1Freemap[id]; ok {
|
||||
panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps))
|
||||
}
|
||||
}
|
||||
})
|
||||
for _, id := range ids {
|
||||
// try to see if we can merge and update
|
||||
f.mergeWithExistingSpan(id)
|
||||
}
|
||||
}
|
||||
|
||||
// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
|
||||
func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) {
|
||||
prev := pid - 1
|
||||
next := pid + 1
|
||||
|
||||
preSize, mergeWithPrev := f.backwardMap[prev]
|
||||
nextSize, mergeWithNext := f.forwardMap[next]
|
||||
newStart := pid
|
||||
newSize := uint64(1)
|
||||
|
||||
if mergeWithPrev {
|
||||
//merge with previous span
|
||||
start := prev + 1 - common.Pgid(preSize)
|
||||
f.delSpan(start, preSize)
|
||||
|
||||
newStart -= common.Pgid(preSize)
|
||||
newSize += preSize
|
||||
}
|
||||
|
||||
if mergeWithNext {
|
||||
// merge with next span
|
||||
f.delSpan(next, nextSize)
|
||||
newSize += nextSize
|
||||
}
|
||||
|
||||
f.addSpan(newStart, newSize)
|
||||
}
|
||||
|
||||
// idsFromFreemaps get all free page IDs from f.freemaps.
|
||||
// used by test only.
|
||||
func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} {
|
||||
ids := make(map[common.Pgid]struct{})
|
||||
for size, idSet := range f.freemaps {
|
||||
for start := range idSet {
|
||||
for i := 0; i < int(size); i++ {
|
||||
id := start + common.Pgid(i)
|
||||
if _, ok := ids[id]; ok {
|
||||
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps))
|
||||
}
|
||||
ids[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// idsFromForwardMap get all free page IDs from f.forwardMap.
|
||||
// used by test only.
|
||||
func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} {
|
||||
ids := make(map[common.Pgid]struct{})
|
||||
for start, size := range f.forwardMap {
|
||||
for i := 0; i < int(size); i++ {
|
||||
id := start + common.Pgid(i)
|
||||
if _, ok := ids[id]; ok {
|
||||
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap))
|
||||
}
|
||||
ids[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// idsFromBackwardMap get all free page IDs from f.backwardMap.
|
||||
// used by test only.
|
||||
func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} {
|
||||
ids := make(map[common.Pgid]struct{})
|
||||
for end, size := range f.backwardMap {
|
||||
for i := 0; i < int(size); i++ {
|
||||
id := end - common.Pgid(i)
|
||||
if _, ok := ids[id]; ok {
|
||||
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap))
|
||||
}
|
||||
ids[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func NewHashMapFreelist() Interface {
|
||||
hm := &hashMap{
|
||||
shared: newShared(),
|
||||
freemaps: make(map[uint64]pidSet),
|
||||
forwardMap: make(map[common.Pgid]uint64),
|
||||
backwardMap: make(map[common.Pgid]uint64),
|
||||
}
|
||||
hm.Interface = hm
|
||||
return hm
|
||||
}
|
310
vendor/go.etcd.io/bbolt/internal/freelist/shared.go
generated
vendored
Normal file
310
vendor/go.etcd.io/bbolt/internal/freelist/shared.go
generated
vendored
Normal file
@ -0,0 +1,310 @@
|
||||
package freelist
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"unsafe"
|
||||
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
type txPending struct {
|
||||
ids []common.Pgid
|
||||
alloctx []common.Txid // txids allocating the ids
|
||||
lastReleaseBegin common.Txid // beginning txid of last matching releaseRange
|
||||
}
|
||||
|
||||
type shared struct {
|
||||
Interface
|
||||
|
||||
readonlyTXIDs []common.Txid // all readonly transaction IDs.
|
||||
allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid.
|
||||
cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids.
|
||||
pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx.
|
||||
}
|
||||
|
||||
func newShared() *shared {
|
||||
return &shared{
|
||||
pending: make(map[common.Txid]*txPending),
|
||||
allocs: make(map[common.Pgid]common.Txid),
|
||||
cache: make(map[common.Pgid]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *shared) pendingPageIds() map[common.Txid]*txPending {
|
||||
return t.pending
|
||||
}
|
||||
|
||||
func (t *shared) PendingCount() int {
|
||||
var count int
|
||||
for _, txp := range t.pending {
|
||||
count += len(txp.ids)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (t *shared) Count() int {
|
||||
return t.FreeCount() + t.PendingCount()
|
||||
}
|
||||
|
||||
func (t *shared) Freed(pgId common.Pgid) bool {
|
||||
_, ok := t.cache[pgId]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (t *shared) Free(txid common.Txid, p *common.Page) {
|
||||
if p.Id() <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id()))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
txp := t.pending[txid]
|
||||
if txp == nil {
|
||||
txp = &txPending{}
|
||||
t.pending[txid] = txp
|
||||
}
|
||||
allocTxid, ok := t.allocs[p.Id()]
|
||||
common.Verify(func() {
|
||||
if allocTxid == txid {
|
||||
panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid))
|
||||
}
|
||||
})
|
||||
if ok {
|
||||
delete(t.allocs, p.Id())
|
||||
}
|
||||
|
||||
for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ {
|
||||
// Verify that page is not already free.
|
||||
if _, ok := t.cache[id]; ok {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
// Add to the freelist and cache.
|
||||
txp.ids = append(txp.ids, id)
|
||||
txp.alloctx = append(txp.alloctx, allocTxid)
|
||||
t.cache[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *shared) Rollback(txid common.Txid) {
|
||||
// Remove page ids from cache.
|
||||
txp := t.pending[txid]
|
||||
if txp == nil {
|
||||
return
|
||||
}
|
||||
for i, pgid := range txp.ids {
|
||||
delete(t.cache, pgid)
|
||||
tx := txp.alloctx[i]
|
||||
if tx == 0 {
|
||||
continue
|
||||
}
|
||||
if tx != txid {
|
||||
// Pending free aborted; restore page back to alloc list.
|
||||
t.allocs[pgid] = tx
|
||||
} else {
|
||||
// A writing TXN should never free a page which was allocated by itself.
|
||||
panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid))
|
||||
}
|
||||
}
|
||||
// Remove pages from pending list and mark as free if allocated by txid.
|
||||
delete(t.pending, txid)
|
||||
|
||||
// Remove pgids which are allocated by this txid
|
||||
for pgid, tid := range t.allocs {
|
||||
if tid == txid {
|
||||
delete(t.allocs, pgid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *shared) AddReadonlyTXID(tid common.Txid) {
|
||||
t.readonlyTXIDs = append(t.readonlyTXIDs, tid)
|
||||
}
|
||||
|
||||
func (t *shared) RemoveReadonlyTXID(tid common.Txid) {
|
||||
for i := range t.readonlyTXIDs {
|
||||
if t.readonlyTXIDs[i] == tid {
|
||||
last := len(t.readonlyTXIDs) - 1
|
||||
t.readonlyTXIDs[i] = t.readonlyTXIDs[last]
|
||||
t.readonlyTXIDs = t.readonlyTXIDs[:last]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type txIDx []common.Txid
|
||||
|
||||
func (t txIDx) Len() int { return len(t) }
|
||||
func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t txIDx) Less(i, j int) bool { return t[i] < t[j] }
|
||||
|
||||
func (t *shared) ReleasePendingPages() {
|
||||
// Free all pending pages prior to the earliest open transaction.
|
||||
sort.Sort(txIDx(t.readonlyTXIDs))
|
||||
minid := common.Txid(math.MaxUint64)
|
||||
if len(t.readonlyTXIDs) > 0 {
|
||||
minid = t.readonlyTXIDs[0]
|
||||
}
|
||||
if minid > 0 {
|
||||
t.release(minid - 1)
|
||||
}
|
||||
// Release unused txid extents.
|
||||
for _, tid := range t.readonlyTXIDs {
|
||||
t.releaseRange(minid, tid-1)
|
||||
minid = tid + 1
|
||||
}
|
||||
t.releaseRange(minid, common.Txid(math.MaxUint64))
|
||||
// Any page both allocated and freed in an extent is safe to release.
|
||||
}
|
||||
|
||||
func (t *shared) release(txid common.Txid) {
|
||||
m := make(common.Pgids, 0)
|
||||
for tid, txp := range t.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, txp.ids...)
|
||||
delete(t.pending, tid)
|
||||
}
|
||||
}
|
||||
t.mergeSpans(m)
|
||||
}
|
||||
|
||||
func (t *shared) releaseRange(begin, end common.Txid) {
|
||||
if begin > end {
|
||||
return
|
||||
}
|
||||
m := common.Pgids{}
|
||||
for tid, txp := range t.pending {
|
||||
if tid < begin || tid > end {
|
||||
continue
|
||||
}
|
||||
// Don't recompute freed pages if ranges haven't updated.
|
||||
if txp.lastReleaseBegin == begin {
|
||||
continue
|
||||
}
|
||||
for i := 0; i < len(txp.ids); i++ {
|
||||
if atx := txp.alloctx[i]; atx < begin || atx > end {
|
||||
continue
|
||||
}
|
||||
m = append(m, txp.ids[i])
|
||||
txp.ids[i] = txp.ids[len(txp.ids)-1]
|
||||
txp.ids = txp.ids[:len(txp.ids)-1]
|
||||
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
|
||||
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
|
||||
i--
|
||||
}
|
||||
txp.lastReleaseBegin = begin
|
||||
if len(txp.ids) == 0 {
|
||||
delete(t.pending, tid)
|
||||
}
|
||||
}
|
||||
t.mergeSpans(m)
|
||||
}
|
||||
|
||||
// Copyall copies a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (t *shared) Copyall(dst []common.Pgid) {
|
||||
m := make(common.Pgids, 0, t.PendingCount())
|
||||
for _, txp := range t.pendingPageIds() {
|
||||
m = append(m, txp.ids...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
common.Mergepgids(dst, t.freePageIds(), m)
|
||||
}
|
||||
|
||||
func (t *shared) Reload(p *common.Page) {
|
||||
t.Read(p)
|
||||
t.NoSyncReload(t.freePageIds())
|
||||
}
|
||||
|
||||
func (t *shared) NoSyncReload(pgIds common.Pgids) {
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[common.Pgid]bool)
|
||||
for _, txp := range t.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
a := []common.Pgid{}
|
||||
for _, id := range pgIds {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
|
||||
t.Init(a)
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (t *shared) reindex() {
|
||||
free := t.freePageIds()
|
||||
pending := t.pendingPageIds()
|
||||
t.cache = make(map[common.Pgid]struct{}, len(free))
|
||||
for _, id := range free {
|
||||
t.cache[id] = struct{}{}
|
||||
}
|
||||
for _, txp := range pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
t.cache[pendingID] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *shared) Read(p *common.Page) {
|
||||
if !p.IsFreelistPage() {
|
||||
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ()))
|
||||
}
|
||||
|
||||
ids := p.FreelistPageIds()
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if len(ids) == 0 {
|
||||
t.Init([]common.Pgid{})
|
||||
} else {
|
||||
// copy the ids, so we don't modify on the freelist page directly
|
||||
idsCopy := make([]common.Pgid, len(ids))
|
||||
copy(idsCopy, ids)
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(common.Pgids(idsCopy))
|
||||
|
||||
t.Init(idsCopy)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *shared) EstimatedWritePageSize() int {
|
||||
n := t.Count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n)
|
||||
}
|
||||
|
||||
func (t *shared) Write(p *common.Page) {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.SetFlags(common.FreelistPageFlag)
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
l := t.Count()
|
||||
if l == 0 {
|
||||
p.SetCount(uint16(l))
|
||||
} else if l < 0xFFFF {
|
||||
p.SetCount(uint16(l))
|
||||
data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
ids := unsafe.Slice((*common.Pgid)(data), l)
|
||||
t.Copyall(ids)
|
||||
} else {
|
||||
p.SetCount(0xFFFF)
|
||||
data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
ids := unsafe.Slice((*common.Pgid)(data), l+1)
|
||||
ids[0] = common.Pgid(l)
|
||||
t.Copyall(ids[1:])
|
||||
}
|
||||
}
|
113
vendor/go.etcd.io/bbolt/logger.go
generated
vendored
Normal file
113
vendor/go.etcd.io/bbolt/logger.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
package bbolt
|
||||
|
||||
// See https://github.com/etcd-io/raft/blob/main/logger.go
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Logger interface {
|
||||
Debug(v ...interface{})
|
||||
Debugf(format string, v ...interface{})
|
||||
|
||||
Error(v ...interface{})
|
||||
Errorf(format string, v ...interface{})
|
||||
|
||||
Info(v ...interface{})
|
||||
Infof(format string, v ...interface{})
|
||||
|
||||
Warning(v ...interface{})
|
||||
Warningf(format string, v ...interface{})
|
||||
|
||||
Fatal(v ...interface{})
|
||||
Fatalf(format string, v ...interface{})
|
||||
|
||||
Panic(v ...interface{})
|
||||
Panicf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
func getDiscardLogger() Logger {
|
||||
return discardLogger
|
||||
}
|
||||
|
||||
var (
|
||||
discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)}
|
||||
)
|
||||
|
||||
const (
|
||||
calldepth = 2
|
||||
)
|
||||
|
||||
// DefaultLogger is a default implementation of the Logger interface.
|
||||
type DefaultLogger struct {
|
||||
*log.Logger
|
||||
debug bool
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) EnableTimestamps() {
|
||||
l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) EnableDebug() {
|
||||
l.debug = true
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Debug(v ...interface{}) {
|
||||
if l.debug {
|
||||
_ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
|
||||
if l.debug {
|
||||
_ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Info(v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Infof(format string, v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Error(v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Warning(v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Fatal(v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
|
||||
_ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Panic(v ...interface{}) {
|
||||
l.Logger.Panic(v...)
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
|
||||
l.Logger.Panicf(format, v...)
|
||||
}
|
||||
|
||||
func header(lvl, msg string) string {
|
||||
return fmt.Sprintf("%s: %s", lvl, msg)
|
||||
}
|
36
vendor/go.etcd.io/bbolt/mlock_unix.go
generated
vendored
Normal file
36
vendor/go.etcd.io/bbolt/mlock_unix.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
//go:build !windows
|
||||
|
||||
package bbolt
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// mlock locks memory of db file
|
||||
func mlock(db *DB, fileSize int) error {
|
||||
sizeToLock := fileSize
|
||||
if sizeToLock > db.datasz {
|
||||
// Can't lock more than mmaped slice
|
||||
sizeToLock = db.datasz
|
||||
}
|
||||
if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// munlock unlocks memory of db file
|
||||
func munlock(db *DB, fileSize int) error {
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
sizeToUnlock := fileSize
|
||||
if sizeToUnlock > db.datasz {
|
||||
// Can't unlock more than mmaped slice
|
||||
sizeToUnlock = db.datasz
|
||||
}
|
||||
|
||||
if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
11
vendor/go.etcd.io/bbolt/mlock_windows.go
generated
vendored
Normal file
11
vendor/go.etcd.io/bbolt/mlock_windows.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
package bbolt
|
||||
|
||||
// mlock locks memory of db file
|
||||
func mlock(_ *DB, _ int) error {
|
||||
panic("mlock is supported only on UNIX systems")
|
||||
}
|
||||
|
||||
// munlock unlocks memory of db file
|
||||
func munlock(_ *DB, _ int) error {
|
||||
panic("munlock is supported only on UNIX systems")
|
||||
}
|
266
vendor/go.etcd.io/bbolt/node.go
generated
vendored
266
vendor/go.etcd.io/bbolt/node.go
generated
vendored
@ -4,7 +4,8 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
// node represents an in-memory, deserialized page.
|
||||
@ -14,10 +15,10 @@ type node struct {
|
||||
unbalanced bool
|
||||
spilled bool
|
||||
key []byte
|
||||
pgid pgid
|
||||
pgid common.Pgid
|
||||
parent *node
|
||||
children nodes
|
||||
inodes inodes
|
||||
inodes common.Inodes
|
||||
}
|
||||
|
||||
// root returns the top-level node this node is attached to.
|
||||
@ -38,10 +39,10 @@ func (n *node) minKeys() int {
|
||||
|
||||
// size returns the size of the node after serialization.
|
||||
func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
sz, elsz := common.PageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value()))
|
||||
}
|
||||
return int(sz)
|
||||
}
|
||||
@ -50,10 +51,10 @@ func (n *node) size() int {
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v uintptr) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
sz, elsz := common.PageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value()))
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
@ -64,9 +65,9 @@ func (n *node) sizeLessThan(v uintptr) bool {
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() uintptr {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
return common.LeafPageElementSize
|
||||
}
|
||||
return branchPageElementSize
|
||||
return common.BranchPageElementSize
|
||||
}
|
||||
|
||||
// childAt returns the child node at a given index.
|
||||
@ -74,12 +75,12 @@ func (n *node) childAt(index int) *node {
|
||||
if n.isLeaf {
|
||||
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||
}
|
||||
return n.bucket.node(n.inodes[index].pgid, n)
|
||||
return n.bucket.node(n.inodes[index].Pgid(), n)
|
||||
}
|
||||
|
||||
// childIndex returns the index of a given child node.
|
||||
func (n *node) childIndex(child *node) int {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 })
|
||||
return index
|
||||
}
|
||||
|
||||
@ -113,9 +114,9 @@ func (n *node) prevSibling() *node {
|
||||
}
|
||||
|
||||
// put inserts a key/value.
|
||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||
if pgid >= n.bucket.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||
func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) {
|
||||
if pgId >= n.bucket.tx.meta.Pgid() {
|
||||
panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid()))
|
||||
} else if len(oldKey) <= 0 {
|
||||
panic("put: zero-length old key")
|
||||
} else if len(newKey) <= 0 {
|
||||
@ -123,30 +124,30 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||
}
|
||||
|
||||
// Find insertion index.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 })
|
||||
|
||||
// Add capacity and shift nodes if we don't have an exact match and need to insert.
|
||||
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
|
||||
exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey)
|
||||
if !exact {
|
||||
n.inodes = append(n.inodes, inode{})
|
||||
n.inodes = append(n.inodes, common.Inode{})
|
||||
copy(n.inodes[index+1:], n.inodes[index:])
|
||||
}
|
||||
|
||||
inode := &n.inodes[index]
|
||||
inode.flags = flags
|
||||
inode.key = newKey
|
||||
inode.value = value
|
||||
inode.pgid = pgid
|
||||
_assert(len(inode.key) > 0, "put: zero-length inode key")
|
||||
inode.SetFlags(flags)
|
||||
inode.SetKey(newKey)
|
||||
inode.SetValue(value)
|
||||
inode.SetPgid(pgId)
|
||||
common.Assert(len(inode.Key()) > 0, "put: zero-length inode key")
|
||||
}
|
||||
|
||||
// del removes a key from the node.
|
||||
func (n *node) del(key []byte) {
|
||||
// Find index of key.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 })
|
||||
|
||||
// Exit if the key isn't found.
|
||||
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
|
||||
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) {
|
||||
return
|
||||
}
|
||||
|
||||
@ -158,85 +159,44 @@ func (n *node) del(key []byte) {
|
||||
}
|
||||
|
||||
// read initializes the node from a page.
|
||||
func (n *node) read(p *page) {
|
||||
n.pgid = p.id
|
||||
n.isLeaf = ((p.flags & leafPageFlag) != 0)
|
||||
n.inodes = make(inodes, int(p.count))
|
||||
func (n *node) read(p *common.Page) {
|
||||
n.pgid = p.Id()
|
||||
n.isLeaf = p.IsLeafPage()
|
||||
n.inodes = common.ReadInodeFromPage(p)
|
||||
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
inode := &n.inodes[i]
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
inode.flags = elem.flags
|
||||
inode.key = elem.key()
|
||||
inode.value = elem.value()
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
inode.pgid = elem.pgid
|
||||
inode.key = elem.key()
|
||||
}
|
||||
_assert(len(inode.key) > 0, "read: zero-length inode key")
|
||||
}
|
||||
|
||||
// Save first key so we can find the node in the parent when we spill.
|
||||
// Save first key, so we can find the node in the parent when we spill.
|
||||
if len(n.inodes) > 0 {
|
||||
n.key = n.inodes[0].key
|
||||
_assert(len(n.key) > 0, "read: zero-length node key")
|
||||
n.key = n.inodes[0].Key()
|
||||
common.Assert(len(n.key) > 0, "read: zero-length node key")
|
||||
} else {
|
||||
n.key = nil
|
||||
}
|
||||
}
|
||||
|
||||
// write writes the items onto one or more pages.
|
||||
func (n *node) write(p *page) {
|
||||
// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set
|
||||
// and the rest should be zeroed.
|
||||
func (n *node) write(p *common.Page) {
|
||||
common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page")
|
||||
|
||||
// Initialize page.
|
||||
if n.isLeaf {
|
||||
p.flags |= leafPageFlag
|
||||
p.SetFlags(common.LeafPageFlag)
|
||||
} else {
|
||||
p.flags |= branchPageFlag
|
||||
p.SetFlags(common.BranchPageFlag)
|
||||
}
|
||||
|
||||
if len(n.inodes) >= 0xFFFF {
|
||||
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id()))
|
||||
}
|
||||
p.count = uint16(len(n.inodes))
|
||||
p.SetCount(uint16(len(n.inodes)))
|
||||
|
||||
// Stop here if there are no items to write.
|
||||
if p.count == 0 {
|
||||
if p.Count() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
// off tracks the offset into p of the start of the next data.
|
||||
off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Create a slice to write into of needed size and advance
|
||||
// byte pointer for next iteration.
|
||||
sz := len(item.key) + len(item.value)
|
||||
b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
|
||||
off += uintptr(sz)
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
l := copy(b, item.key)
|
||||
copy(b[l:], item.value)
|
||||
}
|
||||
common.WriteInodeToPage(n.inodes, p)
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
}
|
||||
@ -269,7 +229,7 @@ func (n *node) split(pageSize uintptr) []*node {
|
||||
func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
@ -300,7 +260,7 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
|
||||
n.inodes = n.inodes[:splitIndex]
|
||||
|
||||
// Update the statistics.
|
||||
n.bucket.tx.stats.Split++
|
||||
n.bucket.tx.stats.IncSplit(1)
|
||||
|
||||
return n, next
|
||||
}
|
||||
@ -309,17 +269,17 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz uintptr) {
|
||||
sz = pageHeaderSize
|
||||
sz = common.PageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ {
|
||||
index = uintptr(i)
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value()))
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
|
||||
if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) {
|
||||
break
|
||||
}
|
||||
|
||||
@ -356,7 +316,7 @@ func (n *node) spill() error {
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||
tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid))
|
||||
node.pgid = 0
|
||||
}
|
||||
|
||||
@ -367,10 +327,10 @@ func (n *node) spill() error {
|
||||
}
|
||||
|
||||
// Write the node.
|
||||
if p.id >= tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||
if p.Id() >= tx.meta.Pgid() {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid()))
|
||||
}
|
||||
node.pgid = p.id
|
||||
node.pgid = p.Id()
|
||||
node.write(p)
|
||||
node.spilled = true
|
||||
|
||||
@ -378,16 +338,16 @@ func (n *node) spill() error {
|
||||
if node.parent != nil {
|
||||
var key = node.key
|
||||
if key == nil {
|
||||
key = node.inodes[0].key
|
||||
key = node.inodes[0].Key()
|
||||
}
|
||||
|
||||
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
|
||||
node.key = node.inodes[0].key
|
||||
_assert(len(node.key) > 0, "spill: zero-length node key")
|
||||
node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0)
|
||||
node.key = node.inodes[0].Key()
|
||||
common.Assert(len(node.key) > 0, "spill: zero-length node key")
|
||||
}
|
||||
|
||||
// Update the statistics.
|
||||
tx.stats.Spill++
|
||||
tx.stats.IncSpill(1)
|
||||
}
|
||||
|
||||
// If the root node split and created a new root then we need to spill that
|
||||
@ -409,10 +369,10 @@ func (n *node) rebalance() {
|
||||
n.unbalanced = false
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.Rebalance++
|
||||
n.bucket.tx.stats.IncRebalance(1)
|
||||
|
||||
// Ignore if node is above threshold (25%) and has enough keys.
|
||||
var threshold = n.bucket.tx.db.pageSize / 4
|
||||
// Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys.
|
||||
var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2
|
||||
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||
return
|
||||
}
|
||||
@ -422,14 +382,14 @@ func (n *node) rebalance() {
|
||||
// If root node is a branch and only has one node then collapse it.
|
||||
if !n.isLeaf && len(n.inodes) == 1 {
|
||||
// Move root's child up.
|
||||
child := n.bucket.node(n.inodes[0].pgid, n)
|
||||
child := n.bucket.node(n.inodes[0].Pgid(), n)
|
||||
n.isLeaf = child.isLeaf
|
||||
n.inodes = child.inodes[:]
|
||||
n.children = child.children
|
||||
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
if child, ok := n.bucket.nodes[inode.Pgid()]; ok {
|
||||
child.parent = n
|
||||
}
|
||||
}
|
||||
@ -453,53 +413,37 @@ func (n *node) rebalance() {
|
||||
return
|
||||
}
|
||||
|
||||
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||
common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||
|
||||
// Destination node is right sibling if idx == 0, otherwise left sibling.
|
||||
var target *node
|
||||
var useNextSibling = (n.parent.childIndex(n) == 0)
|
||||
// Merge with right sibling if idx == 0, otherwise left sibling.
|
||||
var leftNode, rightNode *node
|
||||
var useNextSibling = n.parent.childIndex(n) == 0
|
||||
if useNextSibling {
|
||||
target = n.nextSibling()
|
||||
leftNode = n
|
||||
rightNode = n.nextSibling()
|
||||
} else {
|
||||
target = n.prevSibling()
|
||||
leftNode = n.prevSibling()
|
||||
rightNode = n
|
||||
}
|
||||
|
||||
// If both this node and the target node are too small then merge them.
|
||||
if useNextSibling {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range target.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
// If both nodes are too small then merge them.
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range rightNode.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.Pgid()]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = leftNode
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
|
||||
// Copy over inodes from target and remove target.
|
||||
n.inodes = append(n.inodes, target.inodes...)
|
||||
n.parent.del(target.key)
|
||||
n.parent.removeChild(target)
|
||||
delete(n.bucket.nodes, target.pgid)
|
||||
target.free()
|
||||
} else {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = target
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes to target and remove node.
|
||||
target.inodes = append(target.inodes, n.inodes...)
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
}
|
||||
|
||||
// Either this node or the target node was deleted from the parent so rebalance it.
|
||||
// Copy over inodes from right node to left node and remove right node.
|
||||
leftNode.inodes = append(leftNode.inodes, rightNode.inodes...)
|
||||
n.parent.del(rightNode.key)
|
||||
n.parent.removeChild(rightNode)
|
||||
delete(n.bucket.nodes, rightNode.pgid)
|
||||
rightNode.free()
|
||||
|
||||
// Either this node or the sibling node was deleted from the parent so rebalance it.
|
||||
n.parent.rebalance()
|
||||
}
|
||||
|
||||
@ -521,20 +465,20 @@ func (n *node) dereference() {
|
||||
key := make([]byte, len(n.key))
|
||||
copy(key, n.key)
|
||||
n.key = key
|
||||
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||
common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||
}
|
||||
|
||||
for i := range n.inodes {
|
||||
inode := &n.inodes[i]
|
||||
|
||||
key := make([]byte, len(inode.key))
|
||||
copy(key, inode.key)
|
||||
inode.key = key
|
||||
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
|
||||
key := make([]byte, len(inode.Key()))
|
||||
copy(key, inode.Key())
|
||||
inode.SetKey(key)
|
||||
common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key")
|
||||
|
||||
value := make([]byte, len(inode.value))
|
||||
copy(value, inode.value)
|
||||
inode.value = value
|
||||
value := make([]byte, len(inode.Value()))
|
||||
copy(value, inode.Value())
|
||||
inode.SetValue(value)
|
||||
}
|
||||
|
||||
// Recursively dereference children.
|
||||
@ -543,13 +487,13 @@ func (n *node) dereference() {
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.NodeDeref++
|
||||
n.bucket.tx.stats.IncNodeDeref(1)
|
||||
}
|
||||
|
||||
// free adds the node's underlying page to the freelist.
|
||||
func (n *node) free() {
|
||||
if n.pgid != 0 {
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||
n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid))
|
||||
n.pgid = 0
|
||||
}
|
||||
}
|
||||
@ -581,22 +525,14 @@ func (n *node) dump() {
|
||||
}
|
||||
*/
|
||||
|
||||
func compareKeys(left, right []byte) int {
|
||||
return bytes.Compare(left, right)
|
||||
}
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool {
|
||||
return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
|
||||
return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1
|
||||
}
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
// to an element which hasn't been added to a page yet.
|
||||
type inode struct {
|
||||
flags uint32
|
||||
pgid pgid
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
type inodes []inode
|
||||
|
204
vendor/go.etcd.io/bbolt/page.go
generated
vendored
204
vendor/go.etcd.io/bbolt/page.go
generated
vendored
@ -1,204 +0,0 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = unsafe.Sizeof(page{})
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
|
||||
const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
leafPageFlag = 0x02
|
||||
metaPageFlag = 0x04
|
||||
freelistPageFlag = 0x10
|
||||
)
|
||||
|
||||
const (
|
||||
bucketLeafFlag = 0x01
|
||||
)
|
||||
|
||||
type pgid uint64
|
||||
|
||||
type page struct {
|
||||
id pgid
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
func (p *page) typ() string {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
return "branch"
|
||||
} else if (p.flags & leafPageFlag) != 0 {
|
||||
return "leaf"
|
||||
} else if (p.flags & metaPageFlag) != 0 {
|
||||
return "meta"
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
return "freelist"
|
||||
}
|
||||
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||
}
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
leafPageElementSize, int(index)))
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
var elems []leafPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
|
||||
unsafe.Sizeof(branchPageElement{}), int(index)))
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
var elems []branchPageElement
|
||||
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
|
||||
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
|
||||
return elems
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
type pages []*page
|
||||
|
||||
func (s pages) Len() int { return len(s) }
|
||||
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||
|
||||
// branchPageElement represents a node on a branch page.
|
||||
type branchPageElement struct {
|
||||
pos uint32
|
||||
ksize uint32
|
||||
pgid pgid
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
type leafPageElement struct {
|
||||
flags uint32
|
||||
pos uint32
|
||||
ksize uint32
|
||||
vsize uint32
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
i := int(n.pos)
|
||||
j := i + int(n.ksize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
i := int(n.pos) + int(n.ksize)
|
||||
j := i + int(n.vsize)
|
||||
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
type PageInfo struct {
|
||||
ID int
|
||||
Type string
|
||||
Count int
|
||||
OverflowCount int
|
||||
}
|
||||
|
||||
type pgids []pgid
|
||||
|
||||
func (s pgids) Len() int { return len(s) }
|
||||
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// merge returns the sorted union of a and b.
|
||||
func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
merged := make(pgids, len(a)+len(b))
|
||||
mergepgids(merged, a, b)
|
||||
return merged
|
||||
}
|
||||
|
||||
// mergepgids copies the sorted union of a and b into dst.
|
||||
// If dst is too small, it panics.
|
||||
func mergepgids(dst, a, b pgids) {
|
||||
if len(dst) < len(a)+len(b) {
|
||||
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||
}
|
||||
// Copy in the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
copy(dst, b)
|
||||
return
|
||||
}
|
||||
if len(b) == 0 {
|
||||
copy(dst, a)
|
||||
return
|
||||
}
|
||||
|
||||
// Merged will hold all elements from both lists.
|
||||
merged := dst[:0]
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
if b[0] < a[0] {
|
||||
lead, follow = b, a
|
||||
}
|
||||
|
||||
// Continue while there are elements in the lead.
|
||||
for len(lead) > 0 {
|
||||
// Merge largest prefix of lead that is ahead of follow[0].
|
||||
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||
merged = append(merged, lead[:n]...)
|
||||
if n >= len(lead) {
|
||||
break
|
||||
}
|
||||
|
||||
// Swap lead and follow.
|
||||
lead, follow = follow, lead[n:]
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
_ = append(merged, follow...)
|
||||
}
|
600
vendor/go.etcd.io/bbolt/tx.go
generated
vendored
600
vendor/go.etcd.io/bbolt/tx.go
generated
vendored
@ -1,17 +1,20 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txid represents the internal transaction identifier.
|
||||
type txid uint64
|
||||
berrors "go.etcd.io/bbolt/errors"
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
// Tx represents a read-only or read/write transaction on the database.
|
||||
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
||||
@ -25,9 +28,9 @@ type Tx struct {
|
||||
writable bool
|
||||
managed bool
|
||||
db *DB
|
||||
meta *meta
|
||||
meta *common.Meta
|
||||
root Bucket
|
||||
pages map[pgid]*page
|
||||
pages map[common.Pgid]*common.Page
|
||||
stats TxStats
|
||||
commitHandlers []func()
|
||||
|
||||
@ -46,24 +49,27 @@ func (tx *Tx) init(db *DB) {
|
||||
tx.pages = nil
|
||||
|
||||
// Copy the meta page since it can be changed by the writer.
|
||||
tx.meta = &meta{}
|
||||
db.meta().copy(tx.meta)
|
||||
tx.meta = &common.Meta{}
|
||||
db.meta().Copy(tx.meta)
|
||||
|
||||
// Copy over the root bucket.
|
||||
tx.root = newBucket(tx)
|
||||
tx.root.bucket = &bucket{}
|
||||
*tx.root.bucket = tx.meta.root
|
||||
tx.root.InBucket = &common.InBucket{}
|
||||
*tx.root.InBucket = *(tx.meta.RootBucket())
|
||||
|
||||
// Increment the transaction id and add a page cache for writable transactions.
|
||||
if tx.writable {
|
||||
tx.pages = make(map[pgid]*page)
|
||||
tx.meta.txid += txid(1)
|
||||
tx.pages = make(map[common.Pgid]*common.Page)
|
||||
tx.meta.IncTxid()
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the transaction id.
|
||||
func (tx *Tx) ID() int {
|
||||
return int(tx.meta.txid)
|
||||
if tx == nil || tx.meta == nil {
|
||||
return -1
|
||||
}
|
||||
return int(tx.meta.Txid())
|
||||
}
|
||||
|
||||
// DB returns a reference to the database that created the transaction.
|
||||
@ -73,7 +79,7 @@ func (tx *Tx) DB() *DB {
|
||||
|
||||
// Size returns current database size in bytes as seen by this transaction.
|
||||
func (tx *Tx) Size() int64 {
|
||||
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
||||
return int64(tx.meta.Pgid()) * int64(tx.db.pageSize)
|
||||
}
|
||||
|
||||
// Writable returns whether the transaction can perform write operations.
|
||||
@ -94,6 +100,11 @@ func (tx *Tx) Stats() TxStats {
|
||||
return tx.stats
|
||||
}
|
||||
|
||||
// Inspect returns the structure of the database.
|
||||
func (tx *Tx) Inspect() BucketStructure {
|
||||
return tx.root.Inspect()
|
||||
}
|
||||
|
||||
// Bucket retrieves a bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
@ -121,6 +132,24 @@ func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
return tx.root.DeleteBucket(name)
|
||||
}
|
||||
|
||||
// MoveBucket moves a sub-bucket from the source bucket to the destination bucket.
|
||||
// Returns an error if
|
||||
// 1. the sub-bucket cannot be found in the source bucket;
|
||||
// 2. or the key already exists in the destination bucket;
|
||||
// 3. the key represents a non-bucket value.
|
||||
//
|
||||
// If src is nil, it means moving a top level bucket into the target bucket.
|
||||
// If dst is nil, it means converting the child bucket into a top level bucket.
|
||||
func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error {
|
||||
if src == nil {
|
||||
src = &tx.root
|
||||
}
|
||||
if dst == nil {
|
||||
dst = &tx.root
|
||||
}
|
||||
return src.MoveBucket(child, dst)
|
||||
}
|
||||
|
||||
// ForEach executes a function for each bucket in the root.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller.
|
||||
@ -135,15 +164,28 @@ func (tx *Tx) OnCommit(fn func()) {
|
||||
tx.commitHandlers = append(tx.commitHandlers, fn)
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk and updates the meta page.
|
||||
// Commit writes all changes to disk, updates the meta page and closes the transaction.
|
||||
// Returns an error if a disk write error occurs, or if Commit is
|
||||
// called on a read-only transaction.
|
||||
func (tx *Tx) Commit() error {
|
||||
_assert(!tx.managed, "managed tx commit not allowed")
|
||||
func (tx *Tx) Commit() (err error) {
|
||||
txId := tx.ID()
|
||||
lg := tx.db.Logger()
|
||||
if lg != discardLogger {
|
||||
lg.Debugf("Committing transaction %d", txId)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
lg.Errorf("Committing transaction failed: %v", err)
|
||||
} else {
|
||||
lg.Debugf("Committing transaction %d successfully", txId)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
common.Assert(!tx.managed, "managed tx commit not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
return berrors.ErrTxClosed
|
||||
} else if !tx.writable {
|
||||
return ErrTxNotWritable
|
||||
return berrors.ErrTxNotWritable
|
||||
}
|
||||
|
||||
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
||||
@ -151,53 +193,70 @@ func (tx *Tx) Commit() error {
|
||||
// Rebalance nodes which have had deletions.
|
||||
var startTime = time.Now()
|
||||
tx.root.rebalance()
|
||||
if tx.stats.Rebalance > 0 {
|
||||
tx.stats.RebalanceTime += time.Since(startTime)
|
||||
if tx.stats.GetRebalance() > 0 {
|
||||
tx.stats.IncRebalanceTime(time.Since(startTime))
|
||||
}
|
||||
|
||||
opgid := tx.meta.Pgid()
|
||||
|
||||
// spill data onto dirty pages.
|
||||
startTime = time.Now()
|
||||
if err := tx.root.spill(); err != nil {
|
||||
if err = tx.root.spill(); err != nil {
|
||||
lg.Errorf("spilling data onto dirty pages failed: %v", err)
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.SpillTime += time.Since(startTime)
|
||||
tx.stats.IncSpillTime(time.Since(startTime))
|
||||
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
tx.meta.RootBucket().SetRootPage(tx.root.RootPage())
|
||||
|
||||
// Free the old freelist because commit writes out a fresh freelist.
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
if tx.meta.Freelist() != common.PgidNoFreelist {
|
||||
tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist()))
|
||||
}
|
||||
|
||||
if !tx.db.NoFreelistSync {
|
||||
err := tx.commitFreelist()
|
||||
err = tx.commitFreelist()
|
||||
if err != nil {
|
||||
lg.Errorf("committing freelist failed: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
tx.meta.freelist = pgidNoFreelist
|
||||
tx.meta.SetFreelist(common.PgidNoFreelist)
|
||||
}
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.Pgid() > opgid {
|
||||
_ = errors.New("")
|
||||
// gofail: var lackOfDiskSpace string
|
||||
// tx.rollback()
|
||||
// return errors.New(lackOfDiskSpace)
|
||||
if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil {
|
||||
lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err)
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
startTime = time.Now()
|
||||
if err := tx.write(); err != nil {
|
||||
if err = tx.write(); err != nil {
|
||||
lg.Errorf("writing data failed: %v", err)
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// If strict mode is enabled then perform a consistency check.
|
||||
// Only the first consistency error is reported in the panic.
|
||||
if tx.db.StrictMode {
|
||||
ch := tx.Check()
|
||||
var errs []string
|
||||
for {
|
||||
err, ok := <-ch
|
||||
chkErr, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
errs = append(errs, chkErr.Error())
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
panic("check fail: " + strings.Join(errs, "\n"))
|
||||
@ -205,11 +264,12 @@ func (tx *Tx) Commit() error {
|
||||
}
|
||||
|
||||
// Write meta to disk.
|
||||
if err := tx.writeMeta(); err != nil {
|
||||
if err = tx.writeMeta(); err != nil {
|
||||
lg.Errorf("writeMeta failed: %v", err)
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.WriteTime += time.Since(startTime)
|
||||
tx.stats.IncWriteTime(time.Since(startTime))
|
||||
|
||||
// Finalize the transaction.
|
||||
tx.close()
|
||||
@ -225,24 +285,14 @@ func (tx *Tx) Commit() error {
|
||||
func (tx *Tx) commitFreelist() error {
|
||||
// Allocate new pages for the new free list. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
opgid := tx.meta.pgid
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tx.db.freelist.Write(p)
|
||||
tx.meta.SetFreelist(p.Id())
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -250,9 +300,9 @@ func (tx *Tx) commitFreelist() error {
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||
common.Assert(!tx.managed, "managed tx rollback not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
return berrors.ErrTxClosed
|
||||
}
|
||||
tx.nonPhysicalRollback()
|
||||
return nil
|
||||
@ -264,7 +314,7 @@ func (tx *Tx) nonPhysicalRollback() {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.Rollback(tx.meta.Txid())
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
@ -275,14 +325,18 @@ func (tx *Tx) rollback() {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
if !tx.db.hasSyncedFreelist() {
|
||||
// Reconstruct free page list by scanning the DB to get the whole free page list.
|
||||
// Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
|
||||
tx.db.freelist.noSyncReload(tx.db.freepages())
|
||||
} else {
|
||||
// Read free page list from freelist page.
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
tx.db.freelist.Rollback(tx.meta.Txid())
|
||||
// When mmap fails, the `data`, `dataref` and `datasz` may be reset to
|
||||
// zero values, and there is no way to reload free page IDs in this case.
|
||||
if tx.db.data != nil {
|
||||
if !tx.db.hasSyncedFreelist() {
|
||||
// Reconstruct free page list by scanning the DB to get the whole free page list.
|
||||
// Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode.
|
||||
tx.db.freelist.NoSyncReload(tx.db.freepages())
|
||||
} else {
|
||||
// Read free page list from freelist page.
|
||||
tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist()))
|
||||
}
|
||||
}
|
||||
}
|
||||
tx.close()
|
||||
@ -294,9 +348,9 @@ func (tx *Tx) close() {
|
||||
}
|
||||
if tx.writable {
|
||||
// Grab freelist stats.
|
||||
var freelistFreeN = tx.db.freelist.free_count()
|
||||
var freelistPendingN = tx.db.freelist.pending_count()
|
||||
var freelistAlloc = tx.db.freelist.size()
|
||||
var freelistFreeN = tx.db.freelist.FreeCount()
|
||||
var freelistPendingN = tx.db.freelist.PendingCount()
|
||||
var freelistAlloc = tx.db.freelist.EstimatedWritePageSize()
|
||||
|
||||
// Remove transaction ref & writer lock.
|
||||
tx.db.rwtx = nil
|
||||
@ -324,7 +378,7 @@ func (tx *Tx) close() {
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility.
|
||||
//
|
||||
// Deprecated; Use WriteTo() instead.
|
||||
// Deprecated: Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
@ -346,13 +400,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||
page.flags = metaPageFlag
|
||||
*page.meta() = *tx.meta
|
||||
page := (*common.Page)(unsafe.Pointer(&buf[0]))
|
||||
page.SetFlags(common.MetaPageFlag)
|
||||
*page.Meta() = *tx.meta
|
||||
|
||||
// Write meta 0.
|
||||
page.id = 0
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
page.SetId(0)
|
||||
page.Meta().SetChecksum(page.Meta().Sum64())
|
||||
nn, err := w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
@ -360,9 +414,9 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
}
|
||||
|
||||
// Write meta 1 with a lower transaction id.
|
||||
page.id = 1
|
||||
page.meta().txid -= 1
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
page.SetId(1)
|
||||
page.Meta().DecTxid()
|
||||
page.Meta().SetChecksum(page.Meta().Sum64())
|
||||
nn, err = w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
@ -393,7 +447,7 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Copy(f)
|
||||
_, err = tx.WriteTo(f)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
@ -401,111 +455,21 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Check performs several consistency checks on the database for this transaction.
|
||||
// An error is returned if any inconsistency is found.
|
||||
//
|
||||
// It can be safely run concurrently on a writable transaction. However, this
|
||||
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||
// because of caching. This overhead can be removed if running on a read-only
|
||||
// transaction, however, it is not safe to execute other writer transactions at
|
||||
// the same time.
|
||||
func (tx *Tx) Check() <-chan error {
|
||||
ch := make(chan error)
|
||||
go tx.check(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Force loading free list if opened in ReadOnly mode.
|
||||
tx.db.loadFreelist()
|
||||
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
all := make([]pgid, tx.db.freelist.count())
|
||||
tx.db.freelist.copyall(all)
|
||||
for _, id := range all {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
freed[id] = true
|
||||
}
|
||||
|
||||
// Track every reachable page.
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
tx.checkBucket(&tx.root, reachable, freed, ch)
|
||||
|
||||
// Ensure all pages below high water mark are either reachable or freed.
|
||||
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||
_, isReachable := reachable[i]
|
||||
if !isReachable && !freed[i] {
|
||||
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel to signal completion.
|
||||
close(ch)
|
||||
}
|
||||
|
||||
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
||||
// Ignore inline buckets.
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Check every page used by this bucket.
|
||||
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||
if p.id > tx.meta.pgid {
|
||||
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
||||
}
|
||||
|
||||
// Ensure each page is only referenced once.
|
||||
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||
var id = p.id + i
|
||||
if _, ok := reachable[id]; ok {
|
||||
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
||||
}
|
||||
reachable[id] = p
|
||||
}
|
||||
|
||||
// We should only encounter un-freed leaf and branch pages.
|
||||
if freed[p.id] {
|
||||
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
||||
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
||||
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
||||
}
|
||||
})
|
||||
|
||||
// Check each bucket within this bucket.
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
if child := b.Bucket(k); child != nil {
|
||||
tx.checkBucket(child, reachable, freed, ch)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(tx.meta.txid, count)
|
||||
func (tx *Tx) allocate(count int) (*common.Page, error) {
|
||||
lg := tx.db.Logger()
|
||||
p, err := tx.db.allocate(tx.meta.Txid(), count)
|
||||
if err != nil {
|
||||
lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save to our page cache.
|
||||
tx.pages[p.id] = p
|
||||
tx.pages[p.Id()] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount += count
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
tx.stats.IncPageCount(int64(count))
|
||||
tx.stats.IncPageAlloc(int64(count * tx.db.pageSize))
|
||||
|
||||
return p, nil
|
||||
}
|
||||
@ -513,18 +477,19 @@ func (tx *Tx) allocate(count int) (*page, error) {
|
||||
// write writes any dirty pages to disk.
|
||||
func (tx *Tx) write() error {
|
||||
// Sort pages by id.
|
||||
pages := make(pages, 0, len(tx.pages))
|
||||
lg := tx.db.Logger()
|
||||
pages := make(common.Pages, 0, len(tx.pages))
|
||||
for _, p := range tx.pages {
|
||||
pages = append(pages, p)
|
||||
}
|
||||
// Clear out page cache early.
|
||||
tx.pages = make(map[pgid]*page)
|
||||
tx.pages = make(map[common.Pgid]*common.Page)
|
||||
sort.Sort(pages)
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize)
|
||||
offset := int64(p.Id()) * int64(tx.db.pageSize)
|
||||
var written uintptr
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
@ -533,14 +498,15 @@ func (tx *Tx) write() error {
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
|
||||
buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
|
||||
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
lg.Errorf("writeAt failed, offset: %d: %w", offset, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
tx.stats.IncWrite(1)
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
rem -= sz
|
||||
@ -555,8 +521,10 @@ func (tx *Tx) write() error {
|
||||
}
|
||||
|
||||
// Ignore file sync if flag is set on DB.
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if !tx.db.NoSync || common.IgnoreNoSync {
|
||||
// gofail: var beforeSyncDataPages struct{}
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -565,17 +533,17 @@ func (tx *Tx) write() error {
|
||||
for _, p := range pages {
|
||||
// Ignore page sizes over 1 page.
|
||||
// These are allocated using make() instead of the page pool.
|
||||
if int(p.overflow) != 0 {
|
||||
if int(p.Overflow()) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
|
||||
buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
buf[i] = 0
|
||||
}
|
||||
tx.db.pagePool.Put(buf)
|
||||
tx.db.pagePool.Put(buf) //nolint:staticcheck
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -583,79 +551,99 @@ func (tx *Tx) write() error {
|
||||
|
||||
// writeMeta writes the meta to the disk.
|
||||
func (tx *Tx) writeMeta() error {
|
||||
// gofail: var beforeWriteMetaError string
|
||||
// return errors.New(beforeWriteMetaError)
|
||||
|
||||
// Create a temporary buffer for the meta page.
|
||||
lg := tx.db.Logger()
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
p := tx.db.pageInBuffer(buf, 0)
|
||||
tx.meta.write(p)
|
||||
tx.meta.Write(p)
|
||||
|
||||
// Write the meta page to file.
|
||||
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
||||
if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil {
|
||||
lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err)
|
||||
return err
|
||||
}
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if !tx.db.NoSync || common.IgnoreNoSync {
|
||||
// gofail: var beforeSyncMetaPage struct{}
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
tx.stats.IncWrite(1)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// page returns a reference to the page with a given id.
|
||||
// If page has been written to then a temporary buffered page is returned.
|
||||
func (tx *Tx) page(id pgid) *page {
|
||||
func (tx *Tx) page(id common.Pgid) *common.Page {
|
||||
// Check the dirty pages first.
|
||||
if tx.pages != nil {
|
||||
if p, ok := tx.pages[id]; ok {
|
||||
p.FastCheck(id)
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise return directly from the mmap.
|
||||
return tx.db.page(id)
|
||||
p := tx.db.page(id)
|
||||
p.FastCheck(id)
|
||||
return p
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page within a given page and executes a function.
|
||||
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||
p := tx.page(pgid)
|
||||
func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) {
|
||||
stack := make([]common.Pgid, 10)
|
||||
stack[0] = pgidnum
|
||||
tx.forEachPageInternal(stack[:1], fn)
|
||||
}
|
||||
|
||||
func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) {
|
||||
p := tx.page(pgidstack[len(pgidstack)-1])
|
||||
|
||||
// Execute function.
|
||||
fn(p, depth)
|
||||
fn(p, len(pgidstack)-1, pgidstack)
|
||||
|
||||
// Recursively loop over children.
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
tx.forEachPage(elem.pgid, depth+1, fn)
|
||||
if p.IsBranchPage() {
|
||||
for i := 0; i < int(p.Count()); i++ {
|
||||
elem := p.BranchPageElement(uint16(i))
|
||||
tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Page returns page information for a given page number.
|
||||
// This is only safe for concurrent use when used by a writable transaction.
|
||||
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||
func (tx *Tx) Page(id int) (*common.PageInfo, error) {
|
||||
if tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if pgid(id) >= tx.meta.pgid {
|
||||
return nil, berrors.ErrTxClosed
|
||||
} else if common.Pgid(id) >= tx.meta.Pgid() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if tx.db.freelist == nil {
|
||||
return nil, berrors.ErrFreePagesNotLoaded
|
||||
}
|
||||
|
||||
// Build the page info.
|
||||
p := tx.db.page(pgid(id))
|
||||
info := &PageInfo{
|
||||
p := tx.db.page(common.Pgid(id))
|
||||
info := &common.PageInfo{
|
||||
ID: id,
|
||||
Count: int(p.count),
|
||||
OverflowCount: int(p.overflow),
|
||||
Count: int(p.Count()),
|
||||
OverflowCount: int(p.Overflow()),
|
||||
}
|
||||
|
||||
// Determine the type (or if it's free).
|
||||
if tx.db.freelist.freed(pgid(id)) {
|
||||
if tx.db.freelist.Freed(common.Pgid(id)) {
|
||||
info.Type = "free"
|
||||
} else {
|
||||
info.Type = p.typ()
|
||||
info.Type = p.Typ()
|
||||
}
|
||||
|
||||
return info, nil
|
||||
@ -664,43 +652,61 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||
// TxStats represents statistics about the actions performed by the transaction.
|
||||
type TxStats struct {
|
||||
// Page statistics.
|
||||
PageCount int // number of page allocations
|
||||
PageAlloc int // total bytes allocated
|
||||
//
|
||||
// DEPRECATED: Use GetPageCount() or IncPageCount()
|
||||
PageCount int64 // number of page allocations
|
||||
// DEPRECATED: Use GetPageAlloc() or IncPageAlloc()
|
||||
PageAlloc int64 // total bytes allocated
|
||||
|
||||
// Cursor statistics.
|
||||
CursorCount int // number of cursors created
|
||||
//
|
||||
// DEPRECATED: Use GetCursorCount() or IncCursorCount()
|
||||
CursorCount int64 // number of cursors created
|
||||
|
||||
// Node statistics
|
||||
NodeCount int // number of node allocations
|
||||
NodeDeref int // number of node dereferences
|
||||
//
|
||||
// DEPRECATED: Use GetNodeCount() or IncNodeCount()
|
||||
NodeCount int64 // number of node allocations
|
||||
// DEPRECATED: Use GetNodeDeref() or IncNodeDeref()
|
||||
NodeDeref int64 // number of node dereferences
|
||||
|
||||
// Rebalance statistics.
|
||||
Rebalance int // number of node rebalances
|
||||
//
|
||||
// DEPRECATED: Use GetRebalance() or IncRebalance()
|
||||
Rebalance int64 // number of node rebalances
|
||||
// DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime()
|
||||
RebalanceTime time.Duration // total time spent rebalancing
|
||||
|
||||
// Split/Spill statistics.
|
||||
Split int // number of nodes split
|
||||
Spill int // number of nodes spilled
|
||||
//
|
||||
// DEPRECATED: Use GetSplit() or IncSplit()
|
||||
Split int64 // number of nodes split
|
||||
// DEPRECATED: Use GetSpill() or IncSpill()
|
||||
Spill int64 // number of nodes spilled
|
||||
// DEPRECATED: Use GetSpillTime() or IncSpillTime()
|
||||
SpillTime time.Duration // total time spent spilling
|
||||
|
||||
// Write statistics.
|
||||
Write int // number of writes performed
|
||||
//
|
||||
// DEPRECATED: Use GetWrite() or IncWrite()
|
||||
Write int64 // number of writes performed
|
||||
// DEPRECATED: Use GetWriteTime() or IncWriteTime()
|
||||
WriteTime time.Duration // total time spent writing to disk
|
||||
}
|
||||
|
||||
func (s *TxStats) add(other *TxStats) {
|
||||
s.PageCount += other.PageCount
|
||||
s.PageAlloc += other.PageAlloc
|
||||
s.CursorCount += other.CursorCount
|
||||
s.NodeCount += other.NodeCount
|
||||
s.NodeDeref += other.NodeDeref
|
||||
s.Rebalance += other.Rebalance
|
||||
s.RebalanceTime += other.RebalanceTime
|
||||
s.Split += other.Split
|
||||
s.Spill += other.Spill
|
||||
s.SpillTime += other.SpillTime
|
||||
s.Write += other.Write
|
||||
s.WriteTime += other.WriteTime
|
||||
s.IncPageCount(other.GetPageCount())
|
||||
s.IncPageAlloc(other.GetPageAlloc())
|
||||
s.IncCursorCount(other.GetCursorCount())
|
||||
s.IncNodeCount(other.GetNodeCount())
|
||||
s.IncNodeDeref(other.GetNodeDeref())
|
||||
s.IncRebalance(other.GetRebalance())
|
||||
s.IncRebalanceTime(other.GetRebalanceTime())
|
||||
s.IncSplit(other.GetSplit())
|
||||
s.IncSpill(other.GetSpill())
|
||||
s.IncSpillTime(other.GetSpillTime())
|
||||
s.IncWrite(other.GetWrite())
|
||||
s.IncWriteTime(other.GetWriteTime())
|
||||
}
|
||||
|
||||
// Sub calculates and returns the difference between two sets of transaction stats.
|
||||
@ -708,17 +714,145 @@ func (s *TxStats) add(other *TxStats) {
|
||||
// you need the performance counters that occurred within that time span.
|
||||
func (s *TxStats) Sub(other *TxStats) TxStats {
|
||||
var diff TxStats
|
||||
diff.PageCount = s.PageCount - other.PageCount
|
||||
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
||||
diff.CursorCount = s.CursorCount - other.CursorCount
|
||||
diff.NodeCount = s.NodeCount - other.NodeCount
|
||||
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
||||
diff.Rebalance = s.Rebalance - other.Rebalance
|
||||
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
||||
diff.Split = s.Split - other.Split
|
||||
diff.Spill = s.Spill - other.Spill
|
||||
diff.SpillTime = s.SpillTime - other.SpillTime
|
||||
diff.Write = s.Write - other.Write
|
||||
diff.WriteTime = s.WriteTime - other.WriteTime
|
||||
diff.PageCount = s.GetPageCount() - other.GetPageCount()
|
||||
diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc()
|
||||
diff.CursorCount = s.GetCursorCount() - other.GetCursorCount()
|
||||
diff.NodeCount = s.GetNodeCount() - other.GetNodeCount()
|
||||
diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref()
|
||||
diff.Rebalance = s.GetRebalance() - other.GetRebalance()
|
||||
diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime()
|
||||
diff.Split = s.GetSplit() - other.GetSplit()
|
||||
diff.Spill = s.GetSpill() - other.GetSpill()
|
||||
diff.SpillTime = s.GetSpillTime() - other.GetSpillTime()
|
||||
diff.Write = s.GetWrite() - other.GetWrite()
|
||||
diff.WriteTime = s.GetWriteTime() - other.GetWriteTime()
|
||||
return diff
|
||||
}
|
||||
|
||||
// GetPageCount returns PageCount atomically.
|
||||
func (s *TxStats) GetPageCount() int64 {
|
||||
return atomic.LoadInt64(&s.PageCount)
|
||||
}
|
||||
|
||||
// IncPageCount increases PageCount atomically and returns the new value.
|
||||
func (s *TxStats) IncPageCount(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.PageCount, delta)
|
||||
}
|
||||
|
||||
// GetPageAlloc returns PageAlloc atomically.
|
||||
func (s *TxStats) GetPageAlloc() int64 {
|
||||
return atomic.LoadInt64(&s.PageAlloc)
|
||||
}
|
||||
|
||||
// IncPageAlloc increases PageAlloc atomically and returns the new value.
|
||||
func (s *TxStats) IncPageAlloc(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.PageAlloc, delta)
|
||||
}
|
||||
|
||||
// GetCursorCount returns CursorCount atomically.
|
||||
func (s *TxStats) GetCursorCount() int64 {
|
||||
return atomic.LoadInt64(&s.CursorCount)
|
||||
}
|
||||
|
||||
// IncCursorCount increases CursorCount atomically and return the new value.
|
||||
func (s *TxStats) IncCursorCount(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.CursorCount, delta)
|
||||
}
|
||||
|
||||
// GetNodeCount returns NodeCount atomically.
|
||||
func (s *TxStats) GetNodeCount() int64 {
|
||||
return atomic.LoadInt64(&s.NodeCount)
|
||||
}
|
||||
|
||||
// IncNodeCount increases NodeCount atomically and returns the new value.
|
||||
func (s *TxStats) IncNodeCount(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.NodeCount, delta)
|
||||
}
|
||||
|
||||
// GetNodeDeref returns NodeDeref atomically.
|
||||
func (s *TxStats) GetNodeDeref() int64 {
|
||||
return atomic.LoadInt64(&s.NodeDeref)
|
||||
}
|
||||
|
||||
// IncNodeDeref increases NodeDeref atomically and returns the new value.
|
||||
func (s *TxStats) IncNodeDeref(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.NodeDeref, delta)
|
||||
}
|
||||
|
||||
// GetRebalance returns Rebalance atomically.
|
||||
func (s *TxStats) GetRebalance() int64 {
|
||||
return atomic.LoadInt64(&s.Rebalance)
|
||||
}
|
||||
|
||||
// IncRebalance increases Rebalance atomically and returns the new value.
|
||||
func (s *TxStats) IncRebalance(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.Rebalance, delta)
|
||||
}
|
||||
|
||||
// GetRebalanceTime returns RebalanceTime atomically.
|
||||
func (s *TxStats) GetRebalanceTime() time.Duration {
|
||||
return atomicLoadDuration(&s.RebalanceTime)
|
||||
}
|
||||
|
||||
// IncRebalanceTime increases RebalanceTime atomically and returns the new value.
|
||||
func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration {
|
||||
return atomicAddDuration(&s.RebalanceTime, delta)
|
||||
}
|
||||
|
||||
// GetSplit returns Split atomically.
|
||||
func (s *TxStats) GetSplit() int64 {
|
||||
return atomic.LoadInt64(&s.Split)
|
||||
}
|
||||
|
||||
// IncSplit increases Split atomically and returns the new value.
|
||||
func (s *TxStats) IncSplit(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.Split, delta)
|
||||
}
|
||||
|
||||
// GetSpill returns Spill atomically.
|
||||
func (s *TxStats) GetSpill() int64 {
|
||||
return atomic.LoadInt64(&s.Spill)
|
||||
}
|
||||
|
||||
// IncSpill increases Spill atomically and returns the new value.
|
||||
func (s *TxStats) IncSpill(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.Spill, delta)
|
||||
}
|
||||
|
||||
// GetSpillTime returns SpillTime atomically.
|
||||
func (s *TxStats) GetSpillTime() time.Duration {
|
||||
return atomicLoadDuration(&s.SpillTime)
|
||||
}
|
||||
|
||||
// IncSpillTime increases SpillTime atomically and returns the new value.
|
||||
func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration {
|
||||
return atomicAddDuration(&s.SpillTime, delta)
|
||||
}
|
||||
|
||||
// GetWrite returns Write atomically.
|
||||
func (s *TxStats) GetWrite() int64 {
|
||||
return atomic.LoadInt64(&s.Write)
|
||||
}
|
||||
|
||||
// IncWrite increases Write atomically and returns the new value.
|
||||
func (s *TxStats) IncWrite(delta int64) int64 {
|
||||
return atomic.AddInt64(&s.Write, delta)
|
||||
}
|
||||
|
||||
// GetWriteTime returns WriteTime atomically.
|
||||
func (s *TxStats) GetWriteTime() time.Duration {
|
||||
return atomicLoadDuration(&s.WriteTime)
|
||||
}
|
||||
|
||||
// IncWriteTime increases WriteTime atomically and returns the new value.
|
||||
func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration {
|
||||
return atomicAddDuration(&s.WriteTime, delta)
|
||||
}
|
||||
|
||||
func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration {
|
||||
return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du)))
|
||||
}
|
||||
|
||||
func atomicLoadDuration(ptr *time.Duration) time.Duration {
|
||||
return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr))))
|
||||
}
|
||||
|
290
vendor/go.etcd.io/bbolt/tx_check.go
generated
vendored
Normal file
290
vendor/go.etcd.io/bbolt/tx_check.go
generated
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
package bbolt
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"go.etcd.io/bbolt/internal/common"
|
||||
)
|
||||
|
||||
// Check performs several consistency checks on the database for this transaction.
|
||||
// An error is returned if any inconsistency is found.
|
||||
//
|
||||
// It can be safely run concurrently on a writable transaction. However, this
|
||||
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||
// because of caching. This overhead can be removed if running on a read-only
|
||||
// transaction, however, it is not safe to execute other writer transactions at
|
||||
// the same time.
|
||||
//
|
||||
// It also allows users to provide a customized `KVStringer` implementation,
|
||||
// so that bolt can generate human-readable diagnostic messages.
|
||||
func (tx *Tx) Check(options ...CheckOption) <-chan error {
|
||||
chkConfig := checkConfig{
|
||||
kvStringer: HexKVStringer(),
|
||||
}
|
||||
for _, op := range options {
|
||||
op(&chkConfig)
|
||||
}
|
||||
|
||||
ch := make(chan error)
|
||||
go func() {
|
||||
// Close the channel to signal completion.
|
||||
defer close(ch)
|
||||
tx.check(chkConfig, ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (tx *Tx) check(cfg checkConfig, ch chan error) {
|
||||
// Force loading free list if opened in ReadOnly mode.
|
||||
tx.db.loadFreelist()
|
||||
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[common.Pgid]bool)
|
||||
all := make([]common.Pgid, tx.db.freelist.Count())
|
||||
tx.db.freelist.Copyall(all)
|
||||
for _, id := range all {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
freed[id] = true
|
||||
}
|
||||
|
||||
// Track every reachable page.
|
||||
reachable := make(map[common.Pgid]*common.Page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
if tx.meta.Freelist() != common.PgidNoFreelist {
|
||||
for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ {
|
||||
reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist())
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.pageId == 0 {
|
||||
// Check the whole db file, starting from the root bucket and
|
||||
// recursively check all child buckets.
|
||||
tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch)
|
||||
|
||||
// Ensure all pages below high water mark are either reachable or freed.
|
||||
for i := common.Pgid(0); i < tx.meta.Pgid(); i++ {
|
||||
_, isReachable := reachable[i]
|
||||
if !isReachable && !freed[i] {
|
||||
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Check the db file starting from a specified pageId.
|
||||
if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) {
|
||||
ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid())
|
||||
return
|
||||
}
|
||||
|
||||
tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch)
|
||||
}
|
||||
}
|
||||
|
||||
func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
|
||||
kvStringer KVStringer, ch chan error) {
|
||||
tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch)
|
||||
tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch)
|
||||
}
|
||||
|
||||
func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
|
||||
kvStringer KVStringer, ch chan error) {
|
||||
p := tx.page(pageId)
|
||||
|
||||
switch {
|
||||
case p.IsBranchPage():
|
||||
for i := range p.BranchPageElements() {
|
||||
elem := p.BranchPageElement(uint16(i))
|
||||
tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch)
|
||||
}
|
||||
case p.IsLeafPage():
|
||||
for i := range p.LeafPageElements() {
|
||||
elem := p.LeafPageElement(uint16(i))
|
||||
if elem.IsBucketEntry() {
|
||||
inBkt := common.NewInBucket(pageId, 0)
|
||||
tmpBucket := Bucket{
|
||||
InBucket: &inBkt,
|
||||
rootNode: &node{isLeaf: p.IsLeafPage()},
|
||||
FillPercent: DefaultFillPercent,
|
||||
tx: tx,
|
||||
}
|
||||
if child := tmpBucket.Bucket(elem.Key()); child != nil {
|
||||
tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId)
|
||||
}
|
||||
}
|
||||
|
||||
func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
|
||||
kvStringer KVStringer, ch chan error) {
|
||||
// Ignore inline buckets.
|
||||
if b.RootPage() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch)
|
||||
|
||||
// Check each bucket within this bucket.
|
||||
_ = b.ForEachBucket(func(k []byte) error {
|
||||
if child := b.Bucket(k); child != nil {
|
||||
tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
|
||||
kvStringer KVStringer, ch chan error) {
|
||||
tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) {
|
||||
verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch)
|
||||
})
|
||||
|
||||
tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch)
|
||||
}
|
||||
|
||||
func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) {
|
||||
if p.Id() > hwm {
|
||||
ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack)
|
||||
}
|
||||
|
||||
// Ensure each page is only referenced once.
|
||||
for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ {
|
||||
var id = p.Id() + i
|
||||
if _, ok := reachable[id]; ok {
|
||||
ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack)
|
||||
}
|
||||
reachable[id] = p
|
||||
}
|
||||
|
||||
// We should only encounter un-freed leaf and branch pages.
|
||||
if freed[p.Id()] {
|
||||
ch <- fmt.Errorf("page %d: reachable freed", int(p.Id()))
|
||||
} else if !p.IsBranchPage() && !p.IsLeafPage() {
|
||||
ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack)
|
||||
}
|
||||
}
|
||||
|
||||
// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree
|
||||
// key order constraints:
|
||||
// - keys on pages must be sorted
|
||||
// - keys on children pages are between 2 consecutive keys on the parent's branch page).
|
||||
func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) {
|
||||
tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch)
|
||||
}
|
||||
|
||||
// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are:
|
||||
// - >=`minKeyClosed` (can be nil)
|
||||
// - <`maxKeyOpen` (can be nil)
|
||||
// - Are in right ordering relationship to their parents.
|
||||
// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message.
|
||||
func (tx *Tx) recursivelyCheckPageKeyOrderInternal(
|
||||
pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid,
|
||||
keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) {
|
||||
|
||||
p := tx.page(pgId)
|
||||
pagesStack = append(pagesStack, pgId)
|
||||
switch {
|
||||
case p.IsBranchPage():
|
||||
// For branch page we navigate ranges of all subpages.
|
||||
runningMin := minKeyClosed
|
||||
for i := range p.BranchPageElements() {
|
||||
elem := p.BranchPageElement(uint16(i))
|
||||
verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
|
||||
|
||||
maxKey := maxKeyOpen
|
||||
if i < len(p.BranchPageElements())-1 {
|
||||
maxKey = p.BranchPageElement(uint16(i + 1)).Key()
|
||||
}
|
||||
maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch)
|
||||
runningMin = maxKeyInSubtree
|
||||
}
|
||||
return maxKeyInSubtree
|
||||
case p.IsLeafPage():
|
||||
runningMin := minKeyClosed
|
||||
for i := range p.LeafPageElements() {
|
||||
elem := p.LeafPageElement(uint16(i))
|
||||
verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
|
||||
runningMin = elem.Key()
|
||||
}
|
||||
if p.Count() > 0 {
|
||||
return p.LeafPageElement(p.Count() - 1).Key()
|
||||
}
|
||||
default:
|
||||
ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId)
|
||||
}
|
||||
return maxKeyInSubtree
|
||||
}
|
||||
|
||||
/***
|
||||
* verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key",
|
||||
* is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch).
|
||||
*/
|
||||
func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) {
|
||||
if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 {
|
||||
ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v",
|
||||
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
|
||||
}
|
||||
if index > 0 {
|
||||
cmpRet := compareKeys(previousKey, key)
|
||||
if cmpRet > 0 {
|
||||
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v",
|
||||
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
|
||||
}
|
||||
if cmpRet == 0 {
|
||||
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v",
|
||||
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
|
||||
}
|
||||
}
|
||||
if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 {
|
||||
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v",
|
||||
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
|
||||
}
|
||||
}
|
||||
|
||||
// ===========================================================================================
|
||||
|
||||
type checkConfig struct {
|
||||
kvStringer KVStringer
|
||||
pageId uint64
|
||||
}
|
||||
|
||||
type CheckOption func(options *checkConfig)
|
||||
|
||||
func WithKVStringer(kvStringer KVStringer) CheckOption {
|
||||
return func(c *checkConfig) {
|
||||
c.kvStringer = kvStringer
|
||||
}
|
||||
}
|
||||
|
||||
// WithPageId sets a page ID from which the check command starts to check
|
||||
func WithPageId(pageId uint64) CheckOption {
|
||||
return func(c *checkConfig) {
|
||||
c.pageId = pageId
|
||||
}
|
||||
}
|
||||
|
||||
// KVStringer allows to prepare human-readable diagnostic messages.
|
||||
type KVStringer interface {
|
||||
KeyToString([]byte) string
|
||||
ValueToString([]byte) string
|
||||
}
|
||||
|
||||
// HexKVStringer serializes both key & value to hex representation.
|
||||
func HexKVStringer() KVStringer {
|
||||
return hexKvStringer{}
|
||||
}
|
||||
|
||||
type hexKvStringer struct{}
|
||||
|
||||
func (_ hexKvStringer) KeyToString(key []byte) string {
|
||||
return hex.EncodeToString(key)
|
||||
}
|
||||
|
||||
func (_ hexKvStringer) ValueToString(value []byte) string {
|
||||
return hex.EncodeToString(value)
|
||||
}
|
42
vendor/modules.txt
vendored
42
vendor/modules.txt
vendored
@ -20,7 +20,7 @@ github.com/coreos/go-systemd/v22/journal
|
||||
# github.com/denisbrodbeck/machineid v1.0.1
|
||||
## explicit
|
||||
github.com/denisbrodbeck/machineid
|
||||
# github.com/fatih/color v1.15.0
|
||||
# github.com/fatih/color v1.18.0
|
||||
## explicit; go 1.17
|
||||
github.com/fatih/color
|
||||
# github.com/gogo/protobuf v1.3.2
|
||||
@ -34,7 +34,7 @@ github.com/golang/protobuf/proto
|
||||
# github.com/hashicorp/errwrap v1.1.0
|
||||
## explicit
|
||||
github.com/hashicorp/errwrap
|
||||
# github.com/hashicorp/go-hclog v1.6.2
|
||||
# github.com/hashicorp/go-hclog v1.6.3
|
||||
## explicit; go 1.13
|
||||
github.com/hashicorp/go-hclog
|
||||
# github.com/hashicorp/go-immutable-radix v1.3.1
|
||||
@ -46,7 +46,7 @@ github.com/hashicorp/go-metrics
|
||||
github.com/hashicorp/go-metrics/compat
|
||||
# github.com/hashicorp/go-msgpack v1.1.5
|
||||
## explicit; go 1.13
|
||||
# github.com/hashicorp/go-msgpack/v2 v2.1.2
|
||||
# github.com/hashicorp/go-msgpack/v2 v2.1.3
|
||||
## explicit; go 1.19
|
||||
github.com/hashicorp/go-msgpack/v2/codec
|
||||
# github.com/hashicorp/go-multierror v1.1.1
|
||||
@ -58,27 +58,26 @@ github.com/hashicorp/golang-lru/simplelru
|
||||
# github.com/hashicorp/raft v1.7.2
|
||||
## explicit; go 1.20
|
||||
github.com/hashicorp/raft
|
||||
# github.com/hashicorp/raft-boltdb v0.0.0-20250113192317-e8660f88bcc9
|
||||
## explicit; go 1.16
|
||||
# github.com/hashicorp/raft-boltdb/v2 v2.3.1
|
||||
## explicit; go 1.20
|
||||
github.com/hashicorp/raft-boltdb/v2
|
||||
# github.com/json-iterator/go v1.1.12
|
||||
## explicit; go 1.12
|
||||
github.com/json-iterator/go
|
||||
# github.com/klauspost/compress v1.17.11
|
||||
## explicit; go 1.21
|
||||
# github.com/klauspost/compress v1.18.0
|
||||
## explicit; go 1.22
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
github.com/klauspost/compress/internal/cpuinfo
|
||||
github.com/klauspost/compress/internal/le
|
||||
github.com/klauspost/compress/internal/snapref
|
||||
github.com/klauspost/compress/zstd
|
||||
github.com/klauspost/compress/zstd/internal/xxhash
|
||||
# github.com/mattn/go-colorable v0.1.13
|
||||
## explicit; go 1.15
|
||||
# github.com/mattn/go-colorable v0.1.14
|
||||
## explicit; go 1.18
|
||||
github.com/mattn/go-colorable
|
||||
# github.com/mattn/go-isatty v0.0.19
|
||||
# github.com/mattn/go-isatty v0.0.20
|
||||
## explicit; go 1.15
|
||||
github.com/mattn/go-isatty
|
||||
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
|
||||
@ -93,8 +92,8 @@ github.com/munnerz/goautoneg
|
||||
# github.com/pkg/errors v0.9.1
|
||||
## explicit
|
||||
github.com/pkg/errors
|
||||
# github.com/prometheus/client_golang v1.20.5
|
||||
## explicit; go 1.20
|
||||
# github.com/prometheus/client_golang v1.21.0
|
||||
## explicit; go 1.21
|
||||
github.com/prometheus/client_golang/api
|
||||
github.com/prometheus/client_golang/api/prometheus/v1
|
||||
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
|
||||
@ -117,9 +116,12 @@ github.com/prometheus/procfs/internal/util
|
||||
# github.com/rabbitmq/amqp091-go v1.10.0
|
||||
## explicit; go 1.20
|
||||
github.com/rabbitmq/amqp091-go
|
||||
# go.etcd.io/bbolt v1.3.5
|
||||
## explicit; go 1.12
|
||||
# go.etcd.io/bbolt v1.4.0
|
||||
## explicit; go 1.23
|
||||
go.etcd.io/bbolt
|
||||
go.etcd.io/bbolt/errors
|
||||
go.etcd.io/bbolt/internal/common
|
||||
go.etcd.io/bbolt/internal/freelist
|
||||
# go.etcd.io/etcd/api/v3 v3.5.18
|
||||
## explicit; go 1.22
|
||||
go.etcd.io/etcd/api/v3/authpb
|
||||
@ -163,8 +165,8 @@ golang.org/x/net/idna
|
||||
golang.org/x/net/internal/httpcommon
|
||||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/oauth2 v0.26.0
|
||||
## explicit; go 1.18
|
||||
# golang.org/x/oauth2 v0.27.0
|
||||
## explicit; go 1.23.0
|
||||
# golang.org/x/sys v0.30.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sys/unix
|
||||
@ -176,12 +178,12 @@ golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# google.golang.org/genproto/googleapis/api v0.0.0-20250207221924-e9438ea467c6
|
||||
## explicit; go 1.22
|
||||
# google.golang.org/genproto/googleapis/api v0.0.0-20250224174004-546df14abb99
|
||||
## explicit; go 1.23.0
|
||||
google.golang.org/genproto/googleapis/api
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6
|
||||
## explicit; go 1.22
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20250224174004-546df14abb99
|
||||
## explicit; go 1.23.0
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.70.0
|
||||
## explicit; go 1.22
|
||||
|
Loading…
x
Reference in New Issue
Block a user