feat: switch vendor to go mod.

This commit is contained in:
Bo-Yi Wu 2019-03-16 09:27:47 +08:00
parent 7f97bbf178
commit 5321ed4575
1296 changed files with 266 additions and 491090 deletions

116
go.mod
View File

@ -1,72 +1,70 @@
module github.com/appleboy/gorush module github.com/appleboy/gorush
go 1.12
require ( require (
github.com/AndreasBriese/bbloom v0.0.0-20170702084017-28f7e881ca57 // indirect github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 // indirect
github.com/apex/gateway v0.0.0-20180121231047-e9c6ccec8851 github.com/DataDog/zstd v1.3.5 // indirect
github.com/appleboy/com v0.0.0-20180410030638-c0b5901f9622 github.com/Sereal/Sereal v0.0.0-20190203221631-e99c7e10fca4 // indirect
github.com/appleboy/go-fcm v0.0.0-20180823141355-c8cd26f2c452 github.com/apex/gateway v1.1.1
github.com/asdine/storm v0.0.0-20170209191653-de95a2ead13a github.com/appleboy/com v0.0.1
github.com/aws/aws-lambda-go v0.0.0-20180121103608-6736675908bd // indirect github.com/appleboy/go-fcm v0.0.0-20190214153823-cfd5eac22a0f
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect github.com/appleboy/gofight v2.0.0+incompatible // indirect
github.com/boltdb/bolt v0.0.0-20170131192018-e9cf4fae01b5 // indirect github.com/asdine/storm v2.1.2+incompatible
github.com/buger/jsonparser v0.0.0-20170215081816-6bd16707875b github.com/aws/aws-lambda-go v1.9.0 // indirect
github.com/davecgh/go-spew v1.1.0 // indirect github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23
github.com/dgraph-io/badger v0.0.0-20180406055407-deb140b63781 github.com/dgraph-io/badger v1.5.4
github.com/dgrijalva/jwt-go v3.1.0+incompatible // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 // indirect github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f // indirect
github.com/emirpasic/gods v0.0.0-20170204130913-fc3e4a43ff31 // indirect
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect
github.com/facebookgo/grace v0.0.0-20170218225239-4afe952a37a4 github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
github.com/facebookgo/httpdown v0.0.0-20160323221027-a3b1354551a2 // indirect github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 // indirect
github.com/facebookgo/grace v0.0.0-20180706040059-75cf19382434
github.com/facebookgo/httpdown v0.0.0-20180706035922-5979d39b15c2 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 // indirect github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 // indirect
github.com/fsnotify/fsnotify v0.0.0-20170329110642-4da3e2cfbabc // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7 // indirect github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 // indirect
github.com/gin-gonic/gin v0.0.0-20170929084810-b8b68314faa0 github.com/gin-gonic/gin v1.3.0
github.com/gogo/protobuf v1.1.1 github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v0.0.0-20171021043952-1643683e1b54 // indirect github.com/json-iterator/go v1.1.6 // indirect
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 // indirect github.com/labstack/echo v3.3.10+incompatible // indirect
github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb // indirect github.com/labstack/gommon v0.2.8 // indirect
github.com/json-iterator/go v0.0.0-20170713110145-8b0360418449 // indirect github.com/mattn/go-colorable v0.1.1 // indirect
github.com/magiconair/properties v0.0.0-20170902060319-8d7837e64d3c // indirect github.com/mattn/go-isatty v0.0.7
github.com/mattn/go-isatty v0.0.0-20170216235908-dda3de49cbfc
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure v1.1.2
github.com/pelletier/go-toml v0.0.0-20171022022338-8c31c2ec65b2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e // indirect github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pkg/errors v0.8.1 // indirect
github.com/prometheus/client_golang v0.0.0-20170217083107-6ab3432d241c github.com/prometheus/client_golang v0.9.2
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 // indirect
github.com/prometheus/common v0.0.0-20170218233558-3007b6072c17 // indirect
github.com/prometheus/procfs v0.0.0-20170216223256-a1dba9ce8bae // indirect
github.com/sideshow/apns2 v0.0.0-20181014012405-060d44b53d05 github.com/sideshow/apns2 v0.0.0-20181014012405-060d44b53d05
github.com/sirupsen/logrus v0.0.0-20170620144510-3d4380f53a34 github.com/sirupsen/logrus v1.4.0
github.com/spf13/afero v0.0.0-20171021110813-5660eeed305f // indirect github.com/spf13/viper v1.3.2
github.com/spf13/cast v1.1.0 // indirect github.com/stretchr/testify v1.3.0
github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386 // indirect github.com/syndtr/goleveldb v1.0.0
github.com/spf13/pflag v0.0.0-20171020110617-97afa5e7ca8a // indirect github.com/thoas/stats v0.0.0-20181218120333-e97827ebd7ca
github.com/spf13/viper v0.0.0-20171020104009-8ef37cbca716
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1
github.com/syndtr/goleveldb v0.0.0-20161227110519-23851d93a229
github.com/thoas/stats v0.0.0-20160726120248-152b5d051953
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 // indirect github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 // indirect
github.com/tidwall/buntdb v0.0.0-20161202163738-74dc10171b75 github.com/tidwall/buntdb v1.1.0
github.com/tidwall/gjson v0.0.0-20170205161042-09d1c5c5bc64 // indirect github.com/tidwall/gjson v1.2.1 // indirect
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb // indirect github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb // indirect
github.com/tidwall/match v0.0.0-20160830173930-173748da739a // indirect github.com/tidwall/match v1.0.1 // indirect
github.com/tidwall/rtree v0.0.0-20160903213729-d4a8a3d30d57 // indirect github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51 // indirect
github.com/ugorji/go v0.0.0-20170312112114-708a42d24682 // indirect github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e // indirect
golang.org/x/crypto v0.0.0-20170404161947-c78caca803c9 github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 // indirect
golang.org/x/net v0.0.0-20171107184841-a337091b0525 github.com/tj/assert v0.0.0-20171129193455-018094318fb0 // indirect
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629 github.com/valyala/fasttemplate v1.0.1 // indirect
golang.org/x/sys v0.0.0-20170217003442-075e574b89e4 // indirect github.com/vmihailenco/msgpack v4.0.2+incompatible // indirect
golang.org/x/text v0.0.0-20171013141220-c01e4764d870 // indirect go.etcd.io/bbolt v1.3.2 // indirect
google.golang.org/genproto v0.0.0-20170711235230-b0a3dcfcd1a9 // indirect golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a
google.golang.org/grpc v0.0.0-20171109215322-de2209a968d4 golang.org/x/net v0.0.0-20190313220215-9f648a60d977
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6
google.golang.org/grpc v1.19.0
gopkg.in/appleboy/gin-status-api.v1 v1.0.1 gopkg.in/appleboy/gin-status-api.v1 v1.0.1
gopkg.in/appleboy/gofight.v1 v1.0.4 // indirect
gopkg.in/appleboy/gofight.v2 v2.0.0 gopkg.in/appleboy/gofight.v2 v2.0.0
gopkg.in/fukata/golang-stats-api-handler.v1 v1.0.0 // indirect gopkg.in/fukata/golang-stats-api-handler.v1 v1.0.0 // indirect
gopkg.in/gin-contrib/sse.v0 v0.0.0-20170109093832-22d885f9ecc7 // indirect gopkg.in/gin-gonic/gin.v1 v1.3.0 // indirect
gopkg.in/go-playground/validator.v8 v8.18.1 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/redis.v5 v5.2.4 gopkg.in/go-playground/validator.v8 v8.18.2 // indirect
gopkg.in/yaml.v2 v2.0.0-20170208141851-a3f3340b5840 // indirect gopkg.in/redis.v5 v5.2.9
) )

314
go.sum
View File

@ -1,127 +1,229 @@
github.com/AndreasBriese/bbloom v0.0.0-20170702084017-28f7e881ca57 h1:CVuXDbdzPW0XCNYTldy5dQues57geAs+vfwz3FTTpy8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/AndreasBriese/bbloom v0.0.0-20170702084017-28f7e881ca57/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=
github.com/apex/gateway v0.0.0-20180121231047-e9c6ccec8851/go.mod h1:x7iPY22zu9D8sfrynawEwh1wZEO/kQTRaOM5ye02tWU= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
github.com/appleboy/com v0.0.0-20180410030638-c0b5901f9622 h1:ozHD8HTq7ivv8vTJRCAzjA4wEA8BMGekxMDZrFdqz5M= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/appleboy/com v0.0.0-20180410030638-c0b5901f9622/go.mod h1:rtwjPnHClMOJw4K5oW3ASx9BCPCJ1SDbFbzJjY4Ebqw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/appleboy/go-fcm v0.0.0-20180823141355-c8cd26f2c452 h1:+O17Ly9bsfToQAJApv4FeLYqbm0OZyE2nupljZ1iQrc= github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14=
github.com/appleboy/go-fcm v0.0.0-20180823141355-c8cd26f2c452/go.mod h1:DeokSWAx9RKLcVXRodWJ0lpvHaMPfQx+cC/zODK7g8A= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/asdine/storm v0.0.0-20170209191653-de95a2ead13a h1:+fMyGdnBFcIfdzMHhOXRhdnTbRAqDv8ahtl4nzP5l9Y= github.com/Sereal/Sereal v0.0.0-20190203221631-e99c7e10fca4 h1:6Tu8RZRaWkHeTPk6TCcGt7uTdjBWne5J5xvL2b7S7Vw=
github.com/asdine/storm v0.0.0-20170209191653-de95a2ead13a/go.mod h1:RarYDc9hq1UPLImuiXK3BIWPJLdIygvV3PsInK0FbVQ= github.com/Sereal/Sereal v0.0.0-20190203221631-e99c7e10fca4/go.mod h1:D0JMgToj/WdxCgd30Kc1UcA9E+WdZoJqeVOuYW7iTBM=
github.com/aws/aws-lambda-go v0.0.0-20180121103608-6736675908bd/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= github.com/apex/gateway v1.1.1 h1:dPE3y2LQ/fSJuZikCOvekqXLyn/Wrbgt10MSECobH/Q=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= github.com/apex/gateway v1.1.1/go.mod h1:x7iPY22zu9D8sfrynawEwh1wZEO/kQTRaOM5ye02tWU=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/appleboy/com v0.0.1 h1:SeP6J/YlZGWRwH7jdPYXXWA+qJy4GoQfcQFPda0ERYU=
github.com/boltdb/bolt v0.0.0-20170131192018-e9cf4fae01b5 h1:CEa4aInusZzqB0d5gjtQFqUQjWBa30RQR8mXuu2RnXw= github.com/appleboy/com v0.0.1/go.mod h1:rtwjPnHClMOJw4K5oW3ASx9BCPCJ1SDbFbzJjY4Ebqw=
github.com/boltdb/bolt v0.0.0-20170131192018-e9cf4fae01b5/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/appleboy/go-fcm v0.0.0-20190214153823-cfd5eac22a0f h1:qlMv6xr6XufzxNTAi9LqhWk8df+8DmUS8DLP5Q17saQ=
github.com/buger/jsonparser v0.0.0-20170215081816-6bd16707875b/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/appleboy/go-fcm v0.0.0-20190214153823-cfd5eac22a0f/go.mod h1:DeokSWAx9RKLcVXRodWJ0lpvHaMPfQx+cC/zODK7g8A=
github.com/appleboy/gofight v2.0.0+incompatible h1:ECVMVpNJFBztDbnA7ead4Ffm6mizKKb6QyR78F+j4eY=
github.com/appleboy/gofight v2.0.0+incompatible/go.mod h1:H/tvof1oZHnZdlBd+AeODZGkk1C+D2na0NXr0iXuZHA=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asdine/storm v2.1.2+incompatible h1:dczuIkyqwY2LrtXPz8ixMrU/OFgZp71kbKTHGrXYt/Q=
github.com/asdine/storm v2.1.2+incompatible/go.mod h1:RarYDc9hq1UPLImuiXK3BIWPJLdIygvV3PsInK0FbVQ=
github.com/aws/aws-lambda-go v1.9.0 h1:r9TWtk8ozLYdMW+aelUeWny8z2mjghJCMx6/uUwOLNo=
github.com/aws/aws-lambda-go v1.9.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 h1:D21IyuvjDCshj1/qq+pCNd3VZOAEI9jy6Bi131YlXgI=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgraph-io/badger v0.0.0-20180406055407-deb140b63781 h1:2cZr+tpKuiLcuT0rk63IBRAuNNmktyLjSnFSFHXqZcs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/dgraph-io/badger v0.0.0-20180406055407-deb140b63781/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.1.0+incompatible h1:FFziAwDQQ2dz1XClWMkwvukur3evtZx7x/wMHKM1i20= github.com/dgraph-io/badger v1.5.4 h1:gVTrpUTbbr/T24uvoCaqY2KSHfNLVGm0w+hbee2HMeg=
github.com/dgrijalva/jwt-go v3.1.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgraph-io/badger v1.5.4/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ=
github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 h1:afESQBXJEnj3fu+34X//E8Wg3nEbMJxJkwSc0tPePK0= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/emirpasic/gods v0.0.0-20170204130913-fc3e4a43ff31 h1:l2nU4fk8IgsxoSLC2Tj+UrqiZXlFfZGMAj04oMJN+VU= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA=
github.com/emirpasic/gods v0.0.0-20170204130913-fc3e4a43ff31/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
github.com/facebookgo/grace v0.0.0-20170218225239-4afe952a37a4 h1:c8bY3vJpaia8nrZANEBY8yL7rGg7TSit8O1eAJe4hbo= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
github.com/facebookgo/grace v0.0.0-20170218225239-4afe952a37a4/go.mod h1:KigFdumBXUPSwzLDbeuzyt0elrL7+CP7TKuhrhT4bcU= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
github.com/facebookgo/httpdown v0.0.0-20160323221027-a3b1354551a2 h1:3Zvf9wRhl1cOhckN1oRGWPOkIhOketmEcrQ4TeFAoR4= github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 h1:wWke/RUCl7VRjQhwPlR/v0glZXNYzBHdNUzf/Am2Nmg=
github.com/facebookgo/httpdown v0.0.0-20160323221027-a3b1354551a2/go.mod h1:TUV/fX3XrTtBQb5+ttSUJzcFgLNpILONFTKmBuk5RSw= github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9/go.mod h1:uPmAp6Sws4L7+Q/OokbWDAK1ibXYhB3PXFP1kol5hPg=
github.com/facebookgo/grace v0.0.0-20180706040059-75cf19382434 h1:mOp33BLbcbJ8fvTAmZacbBiOASfxN+MLcLxymZCIrGE=
github.com/facebookgo/grace v0.0.0-20180706040059-75cf19382434/go.mod h1:KigFdumBXUPSwzLDbeuzyt0elrL7+CP7TKuhrhT4bcU=
github.com/facebookgo/httpdown v0.0.0-20180706035922-5979d39b15c2 h1:nXeeRHmgNgjLxi+7dY9l9aDvSS1uwVlNLqUWIY4Ath0=
github.com/facebookgo/httpdown v0.0.0-20180706035922-5979d39b15c2/go.mod h1:TUV/fX3XrTtBQb5+ttSUJzcFgLNpILONFTKmBuk5RSw=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M= github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M=
github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA= github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA=
github.com/fsnotify/fsnotify v0.0.0-20170329110642-4da3e2cfbabc h1:fqUzyjP8DApxXq0dOZJE/NvqQkyjxiTy9ARNyRwBPEw= github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
github.com/fsnotify/fsnotify v0.0.0-20170329110642-4da3e2cfbabc/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7 h1:AzN37oI0cOS+cougNAV9szl6CVoj2RYwzS3DpUQNtlY= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gin-gonic/gin v0.0.0-20170929084810-b8b68314faa0 h1:9NJhszAM+V+E0qESsPI3EGhCINyqzDWAwC0/dPEUexM= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g=
github.com/gin-gonic/gin v0.0.0-20170929084810-b8b68314faa0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gin-gonic/gin v1.3.0 h1:kCmZyPklC0gVdL728E6Aj20uYBJV93nj/TkwBTKhFbs=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y=
github.com/golang/protobuf v0.0.0-20171021043952-1643683e1b54 h1:nRNJXiJvemchkOTn0V4U11TZkvacB94gTzbTZbSA7Rw= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/golang/protobuf v0.0.0-20171021043952-1643683e1b54/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb h1:1OvvPvZkn/yCQ3xBcM8y4020wdkMXPHLB4+NfoGWh4U= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/json-iterator/go v0.0.0-20170713110145-8b0360418449/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/magiconair/properties v0.0.0-20170902060319-8d7837e64d3c h1:BDr2SMw3gKp9Xyvp33plTgRPEkE6NralNG0JLuBgkiQ= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/magiconair/properties v0.0.0-20170902060319-8d7837e64d3c/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/mattn/go-isatty v0.0.0-20170216235908-dda3de49cbfc h1:pK7tzC30erKOTfEDCYGvPZQCkmM9X5iSmmAR5m9x3Yc= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/mattn/go-isatty v0.0.0-20170216235908-dda3de49cbfc/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg=
github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
github.com/labstack/gommon v0.2.8 h1:JvRqmeZcfrHC5u6uVleB4NxxNbzx6gpbJiQknDbKQu0=
github.com/labstack/gommon v0.2.8/go.mod h1:/tj9csK2iPSBvn+3NLM9e52usepMtrd5ilFYA+wQNJ4=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/pelletier/go-toml v0.0.0-20171022022338-8c31c2ec65b2 h1:RTx6HBVnEPvm6X4nj0Tw4ebgCq6XgwnKCvsqbKGD4XQ= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/pelletier/go-toml v0.0.0-20171022022338-8c31c2ec65b2/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.0.0-20170217083107-6ab3432d241c h1:zLYM96wNvga1NMXcC49BvEOwQ8MdzKoNcMeEgWwZhMo= github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.0.0-20170217083107-6ab3432d241c/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20170218233558-3007b6072c17 h1:e4C9EN2+nlA+LbVsE9lokqKhNYlmmCrv4C6Ey96otPY= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
github.com/prometheus/common v0.0.0-20170218233558-3007b6072c17/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20170216223256-a1dba9ce8bae h1:nbLP9B5vU3a/0hOXzolmZHxr2SQ2MEu6vhZappUZY9c= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20170216223256-a1dba9ce8bae/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/sideshow/apns2 v0.0.0-20181014012405-060d44b53d05 h1:3M+RJyiSlS8LumIYw9HbwwhKMQzJZ1HzTbU3FVmXy8o= github.com/sideshow/apns2 v0.0.0-20181014012405-060d44b53d05 h1:3M+RJyiSlS8LumIYw9HbwwhKMQzJZ1HzTbU3FVmXy8o=
github.com/sideshow/apns2 v0.0.0-20181014012405-060d44b53d05/go.mod h1:f7dArLPLbiZ3qPdzzrZXdCSlMp8FD0p6z7tHssDOLvk= github.com/sideshow/apns2 v0.0.0-20181014012405-060d44b53d05/go.mod h1:f7dArLPLbiZ3qPdzzrZXdCSlMp8FD0p6z7tHssDOLvk=
github.com/sirupsen/logrus v0.0.0-20170620144510-3d4380f53a34 h1:kVLTAexkb0RpvzqHGdmxz80/bPLGuZn4qnUR0a4sW9Y= github.com/sirupsen/logrus v1.4.0 h1:yKenngtzGh+cUSSh6GWbxW2abRqhYUSR/t/6+2QqNvE=
github.com/sirupsen/logrus v0.0.0-20170620144510-3d4380f53a34/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/spf13/afero v0.0.0-20171021110813-5660eeed305f h1:+Dx5AA/mr18sj78olfUUNWiBBH18xbGhdXiOnLoKnzY= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v0.0.0-20171021110813-5660eeed305f/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.1.0 h1:0Rhw4d6C8J9VPu6cjZLIhZ8+aAOHcDvGeKn+cq5Aq3k= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386 h1:zBoLErXXAvWnNsu+pWkRYl6Cx1KXmIfAVsIuYkPN6aY= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20171020110617-97afa5e7ca8a h1:NLjz2RTuoGx4ITYI/oN9PGzz1b53vqe7xZ2juTlC/18= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v0.0.0-20171020110617-97afa5e7ca8a/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v0.0.0-20171020104009-8ef37cbca716 h1:riT9pMSDqlXO6nIDTZoGk3C5OIDdLFaCkfkgJ0fF/rQ= github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/spf13/viper v0.0.0-20171020104009-8ef37cbca716/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/syndtr/goleveldb v0.0.0-20161227110519-23851d93a229 h1:arXQNTPyszL9q5nmGtSXyGocRDQRxdtoSS25nZgPvCI= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/syndtr/goleveldb v0.0.0-20161227110519-23851d93a229/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/thoas/stats v0.0.0-20160726120248-152b5d051953 h1:PD6HdaGc9tn2a8W/33zxcA6DTq1D1K0O/PKWOGz3Lxo= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/thoas/stats v0.0.0-20160726120248-152b5d051953/go.mod h1:GkZsNBOco11YY68OnXUARbSl26IOXXAeYf6ZKmSZR2M= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/thoas/stats v0.0.0-20181218120333-e97827ebd7ca h1:Ju3LQGLQHCUv1yB2WwB1/uXHL+8SfF4E8qm/iSCQV0Q=
github.com/thoas/stats v0.0.0-20181218120333-e97827ebd7ca/go.mod h1:GkZsNBOco11YY68OnXUARbSl26IOXXAeYf6ZKmSZR2M=
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 h1:QnyrPZZvPmR0AtJCxxfCtI1qN+fYpKTKJ/5opWmZ34k= github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 h1:QnyrPZZvPmR0AtJCxxfCtI1qN+fYpKTKJ/5opWmZ34k=
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8= github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8=
github.com/tidwall/buntdb v0.0.0-20161202163738-74dc10171b75 h1:lBUdGHoBnfesVCZAYHc4uvAfp1irCkoZh8TQ6y0GzZ8= github.com/tidwall/buntdb v1.1.0 h1:H6LzK59KiNjf1nHVPFrYj4Qnl8d8YLBsYamdL8N+Bao=
github.com/tidwall/buntdb v0.0.0-20161202163738-74dc10171b75/go.mod h1:Y39xhcDW10WlyYXeLgGftXVbjtM0QP+/kpz8xl9cbzE= github.com/tidwall/buntdb v1.1.0/go.mod h1:Y39xhcDW10WlyYXeLgGftXVbjtM0QP+/kpz8xl9cbzE=
github.com/tidwall/gjson v0.0.0-20170205161042-09d1c5c5bc64 h1:6/SJyMAg0OoV6FuVP97VsiTFxdG4XyisdpvSTC1IiHY= github.com/tidwall/gjson v1.2.1 h1:j0efZLrZUvNerEf6xqoi0NjWMK5YlLrR7Guo/dxY174=
github.com/tidwall/gjson v0.0.0-20170205161042-09d1c5c5bc64/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA= github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb h1:5NSYaAdrnblKByzd7XByQEJVT8+9v0W/tIY0Oo4OwrE= github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb h1:5NSYaAdrnblKByzd7XByQEJVT8+9v0W/tIY0Oo4OwrE=
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb/go.mod h1:lKYYLFIr9OIgdgrtgkZ9zgRxRdvPYsExnYBsEAd8W5M= github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb/go.mod h1:lKYYLFIr9OIgdgrtgkZ9zgRxRdvPYsExnYBsEAd8W5M=
github.com/tidwall/match v0.0.0-20160830173930-173748da739a h1:jkSy//MOkpJzPmsdrxnM+wiF/wdmVCFGegxccsSkm2Q= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v0.0.0-20160830173930-173748da739a/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/rtree v0.0.0-20160903213729-d4a8a3d30d57 h1:k1tEEozQvcJFX6AFAfaF6cCeDvIhocoTrM47VswRfCg= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51 h1:BP2bjP495BBPaBcS5rmqviTfrOkN5rO5ceKAMRZCRFc=
github.com/tidwall/rtree v0.0.0-20160903213729-d4a8a3d30d57/go.mod h1:/h+UnNGt0IhNNJLkGikcdcJqm66zGD/uJGMRxK/9+Ao= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/ugorji/go v0.0.0-20170312112114-708a42d24682 h1:9FsLhpi/pQ3YZgrcf1xMOTaVqiUEaLCzGNFVfKswcww= github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e h1:+NL1GDIUOKxVfbp2KoJQD9cTQ6dyP2co9q4yzmT9FZo=
github.com/ugorji/go v0.0.0-20170312112114-708a42d24682/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e/go.mod h1:/h+UnNGt0IhNNJLkGikcdcJqm66zGD/uJGMRxK/9+Ao=
golang.org/x/crypto v0.0.0-20170404161947-c78caca803c9 h1:rPnURWKICR4bkjK+GABNWICXj6KLDa1jbj0IoVaQODw= github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 h1:Otn9S136ELckZ3KKDyCkxapfufrqDqwmGjcHfAyXRrE=
golang.org/x/crypto v0.0.0-20170404161947-c78caca803c9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563/go.mod h1:mLqSmt7Dv/CNneF2wfcChfN1rvapyQr01LGKnKex0DQ=
golang.org/x/net v0.0.0-20171107184841-a337091b0525 h1:KtEW9ll78DlakrUaoIv2p6oozE+wN/abax8yB4Y8+Fs= github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=
golang.org/x/net v0.0.0-20171107184841-a337091b0525/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629 h1:wqoYUzeICxRnvJCvfHTh0OY0VQ6xern7nYq+ccc19e4= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
golang.org/x/sys v0.0.0-20170217003442-075e574b89e4 h1:qDJE6+PXeuSus3ObnNHn/wLLkXfbuP0XhYcXZWTFDcs= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
golang.org/x/sys v0.0.0-20170217003442-075e574b89e4/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
golang.org/x/text v0.0.0-20171013141220-c01e4764d870 h1:xdgOfOjH4d435yr21ATHejC9Un1tBMu4Scm2B7DUbmI= github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
golang.org/x/text v0.0.0-20171013141220-c01e4764d870/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
google.golang.org/genproto v0.0.0-20170711235230-b0a3dcfcd1a9 h1:wxGvmadi0ZZjsFJf3uyqRb1Eg4Jawj4ofWGwW09gfds= github.com/vmihailenco/msgpack v4.0.2+incompatible h1:6ujmmycMfB62Mwv2N4atpnf8CKLSzhgodqMenpELKIQ=
google.golang.org/genproto v0.0.0-20170711235230-b0a3dcfcd1a9/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= github.com/vmihailenco/msgpack v4.0.2+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
google.golang.org/grpc v0.0.0-20171109215322-de2209a968d4 h1:6MdQUci2qlorHzHryzbzwf6SpQbkyyjRoUedE1A/Hls= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
google.golang.org/grpc v0.0.0-20171109215322-de2209a968d4/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a h1:YX8ljsm6wXlHZO+aRz9Exqr0evNhKRNe5K/gi+zKh4U=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977 h1:actzWV6iWn3GLqN8dZjzsB+CLt+gaV2+wsxroxiQI8I=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
gopkg.in/appleboy/gin-status-api.v1 v1.0.1 h1:c80nPT2SftCfE/2I9g/BbH5bQjG3kbnMMQ1083ugapU= gopkg.in/appleboy/gin-status-api.v1 v1.0.1 h1:c80nPT2SftCfE/2I9g/BbH5bQjG3kbnMMQ1083ugapU=
gopkg.in/appleboy/gin-status-api.v1 v1.0.1/go.mod h1:X8bhgN2h4PXwJthf69L7rIAqDdnRZptEroSSpFiSnUg= gopkg.in/appleboy/gin-status-api.v1 v1.0.1/go.mod h1:X8bhgN2h4PXwJthf69L7rIAqDdnRZptEroSSpFiSnUg=
gopkg.in/appleboy/gofight.v1 v1.0.4 h1:TTaDOzxoUYfSoCnX6Bccct4EQUVPZwfv53aE5YEoyt4=
gopkg.in/appleboy/gofight.v1 v1.0.4/go.mod h1:+uRfN6FDYnxTJ+ErmYp2sESG/haICL6CahBhD6RzsVw=
gopkg.in/appleboy/gofight.v2 v2.0.0 h1:tbwzV5a3rkoA7M+INNFZ0l1FqsbzVYbWKmJIcwpajLY=
gopkg.in/appleboy/gofight.v2 v2.0.0/go.mod h1:/oCUdVhE1UdRqheD5PmH/2GBvcaxShR7/cDBUoSk2wQ= gopkg.in/appleboy/gofight.v2 v2.0.0/go.mod h1:/oCUdVhE1UdRqheD5PmH/2GBvcaxShR7/cDBUoSk2wQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/fukata/golang-stats-api-handler.v1 v1.0.0 h1:UR6t2xlnAa3nDYeT+akxO31opz8bUDkswQ8tWd4E754= gopkg.in/fukata/golang-stats-api-handler.v1 v1.0.0 h1:UR6t2xlnAa3nDYeT+akxO31opz8bUDkswQ8tWd4E754=
gopkg.in/fukata/golang-stats-api-handler.v1 v1.0.0/go.mod h1:GUgEemw8P+egdtLtzGsN/STzYt5kI76HPw4JWg7gDvw= gopkg.in/fukata/golang-stats-api-handler.v1 v1.0.0/go.mod h1:GUgEemw8P+egdtLtzGsN/STzYt5kI76HPw4JWg7gDvw=
gopkg.in/gin-contrib/sse.v0 v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:DEvCrJoQbnluYrBi/oha9+DYe6/uKyr9zMxO/1B0NAg= gopkg.in/gin-gonic/gin.v1 v1.3.0 h1:DjAu49rN1YttQsOkVCPlAO3INcZNFT0IKsNVMk5MRT4=
gopkg.in/go-playground/validator.v8 v8.18.1 h1:F8SLY5Vqesjs1nI1EL4qmF1PQZ1sitsmq0rPYXLyfGU= gopkg.in/gin-gonic/gin.v1 v1.3.0/go.mod h1:Eljh74A/zAvUOQ835v6ySeZ+5gQG6tKjbZTaZ9iWU3A=
gopkg.in/go-playground/validator.v8 v8.18.1/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/redis.v5 v5.2.4 h1:XpIcn07AdbjbLZOqa6ipEm9aUhLwpgsm3yi0liZMycc= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/redis.v5 v5.2.4/go.mod h1:6gtv0/+A4iM08kdRfocWYB3bLX2tebpNtfKlFT6H4mY= gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=
gopkg.in/yaml.v2 v2.0.0-20170208141851-a3f3340b5840 h1:BftvRMCaj0KX6UeD7gnNJv0W8b4HAYTEWes978CoWlY= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/yaml.v2 v2.0.0-20170208141851-a3f3340b5840/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/redis.v5 v5.2.9 h1:MNZYOLPomQzZMfpN3ZtD1uyJ2IDonTTlxYiV/pEApiw=
gopkg.in/redis.v5 v5.2.9/go.mod h1:6gtv0/+A4iM08kdRfocWYB3bLX2tebpNtfKlFT6H4mY=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -93,6 +93,6 @@ func StatMiddleware() gin.HandlerFunc {
return func(c *gin.Context) { return func(c *gin.Context) {
beginning, recorder := Stats.Begin(c.Writer) beginning, recorder := Stats.Begin(c.Writer)
c.Next() c.Next()
Stats.End(beginning, recorder) Stats.End(beginning, stats.WithRecorder(recorder))
} }
} }

View File

@ -1,35 +0,0 @@
bbloom.go
// The MIT License (MIT)
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
siphash.go
// https://github.com/dchest/siphash
//
// Written in 2012 by Dmitry Chestnykh.
//
// To the extent possible under law, the author have dedicated all copyright
// and related and neighboring rights to this software to the public domain
// worldwide. This software is distributed without any warranty.
// http://creativecommons.org/publicdomain/zero/1.0/
//
// Package siphash implements SipHash-2-4, a fast short-input PRF
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.

View File

@ -1,129 +0,0 @@
## bbloom: a bitset Bloom filter for go/golang
===
package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
===
changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
Nonetheless bbloom should work with any other form of entries.
~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
Minimum hashset size is: 512 ([4]uint64; will be set automatically).
###install
```sh
go get github.com/AndreasBriese/bbloom
```
###test
+ change to folder ../bbloom
+ create wordlist in file "words.txt" (you might use `python permut.py`)
+ run 'go test -bench=.' within the folder
```go
go test -bench=.
```
~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
### usage
after installation add
```go
import (
...
"github.com/AndreasBriese/bbloom"
...
)
```
at your header. In the program use
```go
// create a bloom filter for 65536 items and 1 % wrong-positive ratio
bf := bbloom.New(float64(1<<16), float64(0.01))
// or
// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
// bf = bbloom.New(float64(650000), float64(7))
// or
bf = bbloom.New(650000.0, 7.0)
// add one item
bf.Add([]byte("butter"))
// Number of elements added is exposed now
// Note: ElemNum will not be included in JSON export (for compatability to older version)
nOfElementsInFilter := bf.ElemNum
// check if item is in the filter
isIn := bf.Has([]byte("butter")) // should be true
isNotIn := bf.Has([]byte("Butter")) // should be false
// 'add only if item is new' to the bloomfilter
added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
// add one item
bf.AddTS([]byte("peanutbutter"))
// check if item is in the filter
isIn = bf.HasTS([]byte("peanutbutter")) // should be true
isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
// 'add only if item is new' to the bloomfilter
added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
// convert to JSON ([]byte)
Json := bf.JSONMarshal()
// bloomfilters Mutex is exposed for external un-/locking
// i.e. mutex lock while doing JSON conversion
bf.Mtx.Lock()
Json = bf.JSONMarshal()
bf.Mtx.Unlock()
// restore a bloom filter from storage
bfNew := bbloom.JSONUnmarshal(Json)
isInNew := bfNew.Has([]byte("butter")) // should be true
isNotInNew := bfNew.Has([]byte("Butter")) // should be false
```
to work with the bloom filter.
### why 'fast'?
It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
Bloom filter (filter size 524288, 7 hashlocs)
github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.

View File

@ -1,270 +0,0 @@
// The MIT License (MIT)
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package bbloom
import (
"bytes"
"encoding/json"
"log"
"math"
"sync"
"unsafe"
)
// helper
var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
func getSize(ui64 uint64) (size uint64, exponent uint64) {
if ui64 < uint64(512) {
ui64 = uint64(512)
}
size = uint64(1)
for size < ui64 {
size <<= 1
exponent++
}
return size, exponent
}
func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
locs := math.Ceil(float64(0.69314718056) * size / numEntries)
return uint64(size), uint64(locs)
}
// New
// returns a new bloomfilter
func New(params ...float64) (bloomfilter Bloom) {
var entries, locs uint64
if len(params) == 2 {
if params[1] < 1 {
entries, locs = calcSizeByWrongPositives(params[0], params[1])
} else {
entries, locs = uint64(params[0]), uint64(params[1])
}
} else {
log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
}
size, exponent := getSize(uint64(entries))
bloomfilter = Bloom{
sizeExp: exponent,
size: size - 1,
setLocs: locs,
shift: 64 - exponent,
}
bloomfilter.Size(size)
return bloomfilter
}
// NewWithBoolset
// takes a []byte slice and number of locs per entry
// returns the bloomfilter with a bitset populated according to the input []byte
func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) {
bloomfilter = New(float64(len(*bs)<<3), float64(locs))
ptr := uintptr(unsafe.Pointer(&bloomfilter.bitset[0]))
for _, b := range *bs {
*(*uint8)(unsafe.Pointer(ptr)) = b
ptr++
}
return bloomfilter
}
// bloomJSONImExport
// Im/Export structure used by JSONMarshal / JSONUnmarshal
type bloomJSONImExport struct {
FilterSet []byte
SetLocs uint64
}
// JSONUnmarshal
// takes JSON-Object (type bloomJSONImExport) as []bytes
// returns bloom32 / bloom64 object
func JSONUnmarshal(dbData []byte) Bloom {
bloomImEx := bloomJSONImExport{}
json.Unmarshal(dbData, &bloomImEx)
buf := bytes.NewBuffer(bloomImEx.FilterSet)
bs := buf.Bytes()
bf := NewWithBoolset(&bs, bloomImEx.SetLocs)
return bf
}
//
// Bloom filter
type Bloom struct {
Mtx sync.Mutex
ElemNum uint64
bitset []uint64
sizeExp uint64
size uint64
setLocs uint64
shift uint64
}
// <--- http://www.cse.yorku.ca/~oz/hash.html
// modified Berkeley DB Hash (32bit)
// hash is casted to l, h = 16bit fragments
// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
// hash := uint64(len(*b))
// for _, c := range *b {
// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
// }
// h = hash >> bl.shift
// l = hash << bl.shift >> bl.shift
// return l, h
// }
// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm()
// https://131002.net/siphash/
// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash
// Add
// set the bit(s) for entry; Adds an entry to the Bloom filter
func (bl *Bloom) Add(entry []byte) {
l, h := bl.sipHash(entry)
for i := uint64(0); i < (*bl).setLocs; i++ {
(*bl).Set((h + i*l) & (*bl).size)
(*bl).ElemNum++
}
}
// AddTS
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
func (bl *Bloom) AddTS(entry []byte) {
bl.Mtx.Lock()
defer bl.Mtx.Unlock()
bl.Add(entry[:])
}
// Has
// check if bit(s) for entry is/are set
// returns true if the entry was added to the Bloom Filter
func (bl Bloom) Has(entry []byte) bool {
l, h := bl.sipHash(entry)
for i := uint64(0); i < bl.setLocs; i++ {
switch bl.IsSet((h + i*l) & bl.size) {
case false:
return false
}
}
return true
}
// HasTS
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
func (bl *Bloom) HasTS(entry []byte) bool {
bl.Mtx.Lock()
defer bl.Mtx.Unlock()
return bl.Has(entry[:])
}
// AddIfNotHas
// Only Add entry if it's not present in the bloomfilter
// returns true if entry was added
// returns false if entry was allready registered in the bloomfilter
func (bl Bloom) AddIfNotHas(entry []byte) (added bool) {
if bl.Has(entry[:]) {
return added
}
bl.Add(entry[:])
return true
}
// AddIfNotHasTS
// Tread safe: Only Add entry if it's not present in the bloomfilter
// returns true if entry was added
// returns false if entry was allready registered in the bloomfilter
func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) {
bl.Mtx.Lock()
defer bl.Mtx.Unlock()
return bl.AddIfNotHas(entry[:])
}
// Size
// make Bloom filter with as bitset of size sz
func (bl *Bloom) Size(sz uint64) {
(*bl).bitset = make([]uint64, sz>>6)
}
// Clear
// resets the Bloom filter
func (bl *Bloom) Clear() {
for i, _ := range (*bl).bitset {
(*bl).bitset[i] = 0
}
}
// Set
// set the bit[idx] of bitsit
func (bl *Bloom) Set(idx uint64) {
ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
*(*uint8)(ptr) |= mask[idx%8]
}
// IsSet
// check if bit[idx] of bitset is set
// returns true/false
func (bl *Bloom) IsSet(idx uint64) bool {
ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1
return r == 1
}
// JSONMarshal
// returns JSON-object (type bloomJSONImExport) as []byte
func (bl Bloom) JSONMarshal() []byte {
bloomImEx := bloomJSONImExport{}
bloomImEx.SetLocs = uint64(bl.setLocs)
bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
ptr := uintptr(unsafe.Pointer(&bl.bitset[0]))
for i := range bloomImEx.FilterSet {
bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(ptr))
ptr++
}
data, err := json.Marshal(bloomImEx)
if err != nil {
log.Fatal("json.Marshal failed: ", err)
}
return data
}
// // alternative hashFn
// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) {
// h64 := fnv.New64a()
// h64.Write(*b)
// hash := h64.Sum64()
// h = hash >> 32
// l = hash << 32 >> 32
// return l, h
// }
//
// // <-- http://partow.net/programming/hashfunctions/index.html
// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3,
// // under the topic of sorting and search chapter 6.4.
// // modified to fit with boolset-length
// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) {
// hash := uint64(len(*b))
// for _, c := range *b {
// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c)
// }
// h = hash >> bl.shift
// l = hash << bl.sizeExp >> bl.sizeExp
// return l, h
// }

View File

@ -1,225 +0,0 @@
// Written in 2012 by Dmitry Chestnykh.
//
// To the extent possible under law, the author have dedicated all copyright
// and related and neighboring rights to this software to the public domain
// worldwide. This software is distributed without any warranty.
// http://creativecommons.org/publicdomain/zero/1.0/
//
// Package siphash implements SipHash-2-4, a fast short-input PRF
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
package bbloom
// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit
// parts of 128-bit key: k0 and k1.
func (bl Bloom) sipHash(p []byte) (l, h uint64) {
// Initialization.
v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575
v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d
v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261
v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573
t := uint64(len(p)) << 56
// Compression.
for len(p) >= 8 {
m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 |
uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
v3 ^= m
// Round 1.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 2.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
v0 ^= m
p = p[8:]
}
// Compress last block.
switch len(p) {
case 7:
t |= uint64(p[6]) << 48
fallthrough
case 6:
t |= uint64(p[5]) << 40
fallthrough
case 5:
t |= uint64(p[4]) << 32
fallthrough
case 4:
t |= uint64(p[3]) << 24
fallthrough
case 3:
t |= uint64(p[2]) << 16
fallthrough
case 2:
t |= uint64(p[1]) << 8
fallthrough
case 1:
t |= uint64(p[0])
}
v3 ^= t
// Round 1.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 2.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
v0 ^= t
// Finalization.
v2 ^= 0xff
// Round 1.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 2.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 3.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 4.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// return v0 ^ v1 ^ v2 ^ v3
hash := v0 ^ v1 ^ v2 ^ v3
h = hash >> bl.shift
l = hash << bl.shift >> bl.shift
return l, h
}

View File

@ -1,34 +0,0 @@
<img src="http://tjholowaychuk.com:6000/svg/title/APEX/GATEWAY">
Package gateway provides a drop-in replacement for net/http's `ListenAndServe` for use in AWS Lambda & API Gateway, simply swap it out for `gateway.ListenAndServe`. Extracted from [Up](https://github.com/apex/up) which provides additional middleware features and operational functionality.
```go
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/apex/gateway"
)
func main() {
addr := ":" + os.Getenv("PORT")
http.HandleFunc("/", hello)
log.Fatal(gateway.ListenAndServe(addr, nil))
}
func hello(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Hello World from Go")
}
```
---
[![GoDoc](https://godoc.org/github.com/apex/up-go?status.svg)](https://godoc.org/github.com/apex/gateway)
![](https://img.shields.io/badge/license-MIT-blue.svg)
![](https://img.shields.io/badge/status-stable-green.svg)
<a href="https://apex.sh"><img src="http://tjholowaychuk.com:6000/svg/sponsor"></a>

View File

@ -1,33 +0,0 @@
// Package gateway provides a drop-in replacement for net/http.ListenAndServe for use in AWS Lambda & API Gateway.
package gateway
import (
"context"
"net/http"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
// ListenAndServe is a drop-in replacement for
// http.ListenAndServe for use within AWS Lambda.
//
// ListenAndServe always returns a non-nil error.
func ListenAndServe(addr string, h http.Handler) error {
if h == nil {
h = http.DefaultServeMux
}
lambda.Start(func(ctx context.Context, e events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
r, err := NewRequest(ctx, e)
if err != nil {
return events.APIGatewayProxyResponse{}, err
}
w := NewResponse()
h.ServeHTTP(w, r)
return w.End(), nil
})
return nil
}

View File

@ -1,74 +0,0 @@
package gateway
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/aws/aws-lambda-go/events"
"github.com/pkg/errors"
)
// NewRequest returns a new http.Request from the given Lambda event.
func NewRequest(ctx context.Context, e events.APIGatewayProxyRequest) (*http.Request, error) {
// path
u, err := url.Parse(e.Path)
if err != nil {
return nil, errors.Wrap(err, "parsing path")
}
// querystring
q := u.Query()
for k, v := range e.QueryStringParameters {
q.Set(k, v)
}
u.RawQuery = q.Encode()
// base64 encoded body
body := e.Body
if e.IsBase64Encoded {
b, err := base64.StdEncoding.DecodeString(body)
if err != nil {
return nil, errors.Wrap(err, "decoding base64 body")
}
body = string(b)
}
// new request
req, err := http.NewRequest(e.HTTPMethod, u.String(), strings.NewReader(body))
if err != nil {
return nil, errors.Wrap(err, "creating request")
}
// remote addr
req.RemoteAddr = e.RequestContext.Identity.SourceIP
// header fields
for k, v := range e.Headers {
req.Header.Set(k, v)
}
// content-length
if req.Header.Get("Content-Length") == "" && body != "" {
req.Header.Set("Content-Length", strconv.Itoa(len(body)))
}
// custom fields
req.Header.Set("X-Request-Id", e.RequestContext.RequestID)
req.Header.Set("X-Stage", e.RequestContext.Stage)
// xray support
if traceID := ctx.Value("x-amzn-trace-id"); traceID != nil {
req.Header.Set("X-Amzn-Trace-Id", fmt.Sprintf("%v", traceID))
}
// host
req.URL.Host = req.Header.Get("Host")
req.Host = req.URL.Host
return req, nil
}

View File

@ -1,110 +0,0 @@
package gateway
import (
"bytes"
"encoding/base64"
"net/http"
"strings"
"github.com/aws/aws-lambda-go/events"
)
// ResponseWriter implements the http.ResponseWriter interface
// in order to support the API Gateway Lambda HTTP "protocol".
type ResponseWriter struct {
out events.APIGatewayProxyResponse
buf bytes.Buffer
header http.Header
wroteHeader bool
}
// NewResponse returns a new response writer to capture http output.
func NewResponse() *ResponseWriter {
return &ResponseWriter{}
}
// Header implementation.
func (w *ResponseWriter) Header() http.Header {
if w.header == nil {
w.header = make(http.Header)
}
return w.header
}
// Write implementation.
func (w *ResponseWriter) Write(b []byte) (int, error) {
if !w.wroteHeader {
w.WriteHeader(http.StatusOK)
}
// TODO: HEAD? ignore
return w.buf.Write(b)
}
// WriteHeader implementation.
func (w *ResponseWriter) WriteHeader(status int) {
if w.wroteHeader {
return
}
if w.Header().Get("Content-Type") == "" {
w.Header().Set("Content-Type", "text/plain; charset=utf8")
}
w.out.StatusCode = status
h := make(map[string]string)
for k, v := range w.Header() {
if len(v) > 0 {
h[k] = v[len(v)-1]
}
}
w.out.Headers = h
w.wroteHeader = true
}
// End the request.
func (w *ResponseWriter) End() events.APIGatewayProxyResponse {
w.out.IsBase64Encoded = isBinary(w.header)
if w.out.IsBase64Encoded {
w.out.Body = base64.StdEncoding.EncodeToString(w.buf.Bytes())
} else {
w.out.Body = w.buf.String()
}
return w.out
}
// isBinary returns true if the response reprensents binary.
func isBinary(h http.Header) bool {
if !isTextMime(h.Get("Content-Type")) {
return true
}
if h.Get("Content-Encoding") == "gzip" {
return true
}
return false
}
// isTextMime returns true if the content type represents textual data.
func isTextMime(kind string) bool {
switch {
case strings.HasSuffix(kind, "svg+xml"):
return true
case strings.HasPrefix(kind, "text/"):
return true
case strings.HasPrefix(kind, "application/") && strings.HasSuffix(kind, "json"):
return true
case strings.HasPrefix(kind, "application/") && strings.HasSuffix(kind, "xml"):
return true
default:
return false
}
}

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2016 Bo-Yi Wu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,181 +0,0 @@
package convert
import (
"fmt"
"math"
"strconv"
)
// ToString convert any type to string
func ToString(value interface{}) interface{} {
if v, ok := value.(*string); ok {
return *v
}
return fmt.Sprintf("%v", value)
}
// ToBool convert any type to boolean
func ToBool(value interface{}) interface{} {
switch value := value.(type) {
case bool:
return value
case *bool:
return *value
case string:
switch value {
case "", "false":
return false
}
return true
case *string:
return ToBool(*value)
case float64:
if value != 0 {
return true
}
return false
case *float64:
return ToBool(*value)
case float32:
if value != 0 {
return true
}
return false
case *float32:
return ToBool(*value)
case int:
if value != 0 {
return true
}
return false
case *int:
return ToBool(*value)
}
return false
}
// ToInt convert any type to int
func ToInt(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return 1
}
return 0
case int:
if value < int(math.MinInt32) || value > int(math.MaxInt32) {
return nil
}
return value
case *int:
return ToInt(*value)
case int8:
return int(value)
case *int8:
return int(*value)
case int16:
return int(value)
case *int16:
return int(*value)
case int32:
return int(value)
case *int32:
return int(*value)
case int64:
if value < int64(math.MinInt32) || value > int64(math.MaxInt32) {
return nil
}
return int(value)
case *int64:
return ToInt(*value)
case uint:
if value > math.MaxInt32 {
return nil
}
return int(value)
case *uint:
return ToInt(*value)
case uint8:
return int(value)
case *uint8:
return int(*value)
case uint16:
return int(value)
case *uint16:
return int(*value)
case uint32:
if value > uint32(math.MaxInt32) {
return nil
}
return int(value)
case *uint32:
return ToInt(*value)
case uint64:
if value > uint64(math.MaxInt32) {
return nil
}
return int(value)
case *uint64:
return ToInt(*value)
case float32:
if value < float32(math.MinInt32) || value > float32(math.MaxInt32) {
return nil
}
return int(value)
case *float32:
return ToInt(*value)
case float64:
if value < float64(math.MinInt32) || value > float64(math.MaxInt32) {
return nil
}
return int(value)
case *float64:
return ToInt(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return ToInt(val)
case *string:
return ToInt(*value)
}
// If the value cannot be transformed into an int, return nil instead of '0'
// to denote 'no integer found'
return nil
}
// ToFloat convert any type to float
func ToFloat(value interface{}) interface{} {
switch value := value.(type) {
case bool:
if value == true {
return 1.0
}
return 0.0
case *bool:
return ToFloat(*value)
case int:
return float64(value)
case *int32:
return ToFloat(*value)
case float32:
return value
case *float32:
return ToFloat(*value)
case float64:
return value
case *float64:
return ToFloat(*value)
case string:
val, err := strconv.ParseFloat(value, 0)
if err != nil {
return nil
}
return val
case *string:
return ToFloat(*value)
}
return 0.0
}

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2016 Eduard Ganiukov
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,71 +0,0 @@
# go-fcm
[![GoDoc](https://godoc.org/github.com/appleboy/go-fcm?status.svg)](https://godoc.org/github.com/appleboy/go-fcm)
[![Build Status](https://travis-ci.org/appleboy/go-fcm.svg?branch=master)](https://travis-ci.org/appleboy/go-fcm)
[![Go Report Card](https://goreportcard.com/badge/github.com/appleboy/go-fcm)](https://goreportcard.com/report/github.com/appleboy/go-fcmm)
This project was forked from [github.com/edganiukov/fcm](https://github.com/edganiukov/fcm).
Golang client library for Firebase Cloud Messaging. Implemented only [HTTP client](https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream).
More information on [Firebase Cloud Messaging](https://firebase.google.com/docs/cloud-messaging/)
## Feature
* [x] Send messages to a topic
* [x] Send messages to a device list
* [x] Supports condition attribute (fcm only)
## Getting Started
To install fcm, use `go get`:
```bash
go get github.com/appleboy/go-fcm
```
or `govendor`:
```bash
govendor fetch github.com/appleboy/go-fcm
```
or other tool for vendoring.
## Sample Usage
Here is a simple example illustrating how to use FCM library:
```go
package main
import (
"log"
"github.com/appleboy/go-fcm"
)
func main() {
// Create the message to be sent.
msg := &fcm.Message{
To: "sample_device_token",
Data: map[string]interface{}{
"foo": "bar",
},
}
// Create a FCM client to send the message.
client, err := fcm.NewClient("sample_api_key")
if err != nil {
log.Fatalln(err)
}
// Send the message and receive the response without retries.
response, err := client.Send(msg)
if err != nil {
log.Fatalln(err)
}
log.Printf("%#v\n", response)
}
```

View File

@ -1,133 +0,0 @@
package fcm
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
)
const (
// DefaultEndpoint contains endpoint URL of FCM service.
DefaultEndpoint = "https://fcm.googleapis.com/fcm/send"
)
var (
// ErrInvalidAPIKey occurs if API key is not set.
ErrInvalidAPIKey = errors.New("client API Key is invalid")
)
// Client abstracts the interaction between the application server and the
// FCM server via HTTP protocol. The developer must obtain an API key from the
// Google APIs Console page and pass it to the `Client` so that it can
// perform authorized requests on the application server's behalf.
// To send a message to one or more devices use the Client's Send.
//
// If the `HTTP` field is nil, a zeroed http.Client will be allocated and used
// to send messages.
type Client struct {
apiKey string
client *http.Client
endpoint string
}
// NewClient creates new Firebase Cloud Messaging Client based on API key and
// with default endpoint and http client.
func NewClient(apiKey string, opts ...Option) (*Client, error) {
if apiKey == "" {
return nil, ErrInvalidAPIKey
}
c := &Client{
apiKey: apiKey,
endpoint: DefaultEndpoint,
client: &http.Client{},
}
for _, o := range opts {
if err := o(c); err != nil {
return nil, err
}
}
return c, nil
}
// Send sends a message to the FCM server without retrying in case of service
// unavailability. A non-nil error is returned if a non-recoverable error
// occurs (i.e. if the response status is not "200 OK").
func (c *Client) Send(msg *Message) (*Response, error) {
// validate
if err := msg.Validate(); err != nil {
return nil, err
}
// marshal message
data, err := json.Marshal(msg)
if err != nil {
return nil, err
}
return c.send(data)
}
// SendWithRetry sends a message to the FCM server with defined number of
// retrying in case of temporary error.
func (c *Client) SendWithRetry(msg *Message, retryAttempts int) (*Response, error) {
// validate
if err := msg.Validate(); err != nil {
return nil, err
}
// marshal message
data, err := json.Marshal(msg)
if err != nil {
return nil, err
}
resp := new(Response)
err = retry(func() error {
var err error
resp, err = c.send(data)
return err
}, retryAttempts)
if err != nil {
return nil, err
}
return resp, nil
}
// send sends a request.
func (c *Client) send(data []byte) (*Response, error) {
// create request
req, err := http.NewRequest("POST", c.endpoint, bytes.NewBuffer(data))
if err != nil {
return nil, err
}
// add headers
req.Header.Add("Authorization", fmt.Sprintf("key=%s", c.apiKey))
req.Header.Add("Content-Type", "application/json")
// execute request
resp, err := c.client.Do(req)
if err != nil {
return nil, connectionError(err.Error())
}
defer resp.Body.Close()
// check response status
if resp.StatusCode != http.StatusOK {
if resp.StatusCode >= http.StatusInternalServerError {
return nil, serverError(fmt.Sprintf("%d error: %s", resp.StatusCode, resp.Status))
}
return nil, fmt.Errorf("%d error: %s", resp.StatusCode, resp.Status)
}
// build return
response := new(Response)
if err := json.NewDecoder(resp.Body).Decode(response); err != nil {
return nil, err
}
return response, nil
}

View File

@ -1,79 +0,0 @@
package fcm
import (
"errors"
"strings"
)
var (
// ErrInvalidMessage occurs if push notitication message is nil.
ErrInvalidMessage = errors.New("message is invalid")
// ErrInvalidTarget occurs if message topic is empty.
ErrInvalidTarget = errors.New("topic is invalid or registration ids are not set")
// ErrToManyRegIDs occurs when registration ids more then 1000.
ErrToManyRegIDs = errors.New("too many registrations ids")
// ErrInvalidTimeToLive occurs if TimeToLive more then 2419200.
ErrInvalidTimeToLive = errors.New("messages time-to-live is invalid")
)
// Notification specifies the predefined, user-visible key-value pairs of the
// notification payload.
type Notification struct {
Title string `json:"title,omitempty"`
Body string `json:"body,omitempty"`
ChannelID string `json:"android_channel_id,omitempty"`
Icon string `json:"icon,omitempty"`
Sound string `json:"sound,omitempty"`
Badge string `json:"badge,omitempty"`
Tag string `json:"tag,omitempty"`
Color string `json:"color,omitempty"`
ClickAction string `json:"click_action,omitempty"`
BodyLocKey string `json:"body_loc_key,omitempty"`
BodyLocArgs string `json:"body_loc_args,omitempty"`
TitleLocKey string `json:"title_loc_key,omitempty"`
TitleLocArgs string `json:"title_loc_args,omitempty"`
}
// Message represents list of targets, options, and payload for HTTP JSON
// messages.
type Message struct {
To string `json:"to,omitempty"`
RegistrationIDs []string `json:"registration_ids,omitempty"`
Condition string `json:"condition,omitempty"`
CollapseKey string `json:"collapse_key,omitempty"`
Priority string `json:"priority,omitempty"`
ContentAvailable bool `json:"content_available,omitempty"`
MutableContent bool `json:"mutable_content,omitempty"`
DelayWhileIdle bool `json:"delay_while_idle,omitempty"`
TimeToLive *uint `json:"time_to_live,omitempty"`
DeliveryReceiptRequested bool `json:"delivery_receipt_requested,omitempty"`
DryRun bool `json:"dry_run,omitempty"`
RestrictedPackageName string `json:"restricted_package_name,omitempty"`
Notification *Notification `json:"notification,omitempty"`
Data map[string]interface{} `json:"data,omitempty"`
}
// Validate returns an error if the message is not well-formed.
func (msg *Message) Validate() error {
if msg == nil {
return ErrInvalidMessage
}
// validate target identifier: `to` or `condition`, or `registration_ids`
opCnt := strings.Count(msg.Condition, "&&") + strings.Count(msg.Condition, "||")
if msg.To == "" && (msg.Condition == "" || opCnt > 2) && len(msg.RegistrationIDs) == 0 {
return ErrInvalidTarget
}
if len(msg.RegistrationIDs) > 1000 {
return ErrToManyRegIDs
}
if msg.TimeToLive != nil && *msg.TimeToLive > uint(2419200) {
return ErrInvalidTimeToLive
}
return nil
}

View File

@ -1,28 +0,0 @@
package fcm
import (
"errors"
"net/http"
)
// Option configurates Client with defined option.
type Option func(*Client) error
// WithEndpoint returns Option to configure FCM Endpoint.
func WithEndpoint(endpoint string) Option {
return func(c *Client) error {
if endpoint == "" {
return errors.New("invalid endpoint")
}
c.endpoint = endpoint
return nil
}
}
// WithHTTPClient returns Option to configure HTTP Client.
func WithHTTPClient(httpClient *http.Client) Option {
return func(c *Client) error {
c.client = httpClient
return nil
}
}

View File

@ -1,207 +0,0 @@
package fcm
import (
"encoding/json"
"errors"
)
var (
// ErrMissingRegistration occurs if registration token is not set.
ErrMissingRegistration = errors.New("missing registration token")
// ErrInvalidRegistration occurs if registration token is invalid.
ErrInvalidRegistration = errors.New("invalid registration token")
// ErrNotRegistered occurs when application was deleted from device and
// token is not registered in FCM.
ErrNotRegistered = errors.New("unregistered device")
// ErrInvalidPackageName occurs if package name in message is invalid.
ErrInvalidPackageName = errors.New("invalid package name")
// ErrMismatchSenderID occurs when application has a new registration token.
ErrMismatchSenderID = errors.New("mismatched sender id")
// ErrMessageTooBig occurs when message is too big.
ErrMessageTooBig = errors.New("message is too big")
// ErrInvalidDataKey occurs if data key is invalid.
ErrInvalidDataKey = errors.New("invalid data key")
// ErrInvalidTTL occurs when message has invalid TTL.
ErrInvalidTTL = errors.New("invalid time to live")
// ErrUnavailable occurs when FCM service is unavailable. It makes sense
// to retry after this error.
ErrUnavailable = connectionError("timeout")
// ErrInternalServerError is internal FCM error. It makes sense to retry
// after this error.
ErrInternalServerError = serverError("internal server error")
// ErrDeviceMessageRateExceeded occurs when client sent to many requests to
// the device.
ErrDeviceMessageRateExceeded = errors.New("device message rate exceeded")
// ErrTopicsMessageRateExceeded occurs when client sent to many requests to
// the topics.
ErrTopicsMessageRateExceeded = errors.New("topics message rate exceeded")
// ErrInvalidParameters occurs when provided parameters have the right name and type
ErrInvalidParameters = errors.New("check that the provided parameters have the right name and type")
// ErrUnknown for unknown error type
ErrUnknown = errors.New("unknown error type")
)
var (
errMap = map[string]error{
"MissingRegistration": ErrMissingRegistration,
"InvalidRegistration": ErrInvalidRegistration,
"NotRegistered": ErrNotRegistered,
"InvalidPackageName": ErrInvalidPackageName,
"MismatchSenderId": ErrMismatchSenderID,
"MessageTooBig": ErrMessageTooBig,
"InvalidDataKey": ErrInvalidDataKey,
"InvalidTtl": ErrInvalidTTL,
"Unavailable": ErrUnavailable,
"InternalServerError": ErrInternalServerError,
"DeviceMessageRateExceeded": ErrDeviceMessageRateExceeded,
"TopicsMessageRateExceeded": ErrTopicsMessageRateExceeded,
"InvalidParameters": ErrInvalidParameters,
}
)
// connectionError represents connection errors such as timeout error, etc.
// Implements `net.Error` interface.
type connectionError string
func (err connectionError) Error() string {
return string(err)
}
func (err connectionError) Temporary() bool {
return true
}
func (err connectionError) Timeout() bool {
return true
}
// serverError represents internal server errors.
// Implements `net.Error` interface.
type serverError string
func (err serverError) Error() string {
return string(err)
}
func (serverError) Temporary() bool {
return true
}
func (serverError) Timeout() bool {
return false
}
// Response represents the FCM server's response to the application
// server's sent message.
type Response struct {
MulticastID int64 `json:"multicast_id"`
Success int `json:"success"`
Failure int `json:"failure"`
CanonicalIDs int `json:"canonical_ids"`
Results []Result `json:"results"`
// Device Group HTTP Response
FailedRegistrationIDs []string `json:"failed_registration_ids"`
// Topic HTTP response
MessageID int64 `json:"message_id"`
Error error `json:"error"`
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (r *Response) UnmarshalJSON(data []byte) error {
var response struct {
MulticastID int64 `json:"multicast_id"`
Success int `json:"success"`
Failure int `json:"failure"`
CanonicalIDs int `json:"canonical_ids"`
Results []Result `json:"results"`
// Device Group HTTP Response
FailedRegistrationIDs []string `json:"failed_registration_ids"`
// Topic HTTP response
MessageID int64 `json:"message_id"`
Error string `json:"error"`
}
if err := json.Unmarshal(data, &response); err != nil {
return err
}
r.MulticastID = response.MulticastID
r.Success = response.Success
r.Failure = response.Failure
r.CanonicalIDs = response.CanonicalIDs
r.Results = response.Results
r.Success = response.Success
r.FailedRegistrationIDs = response.FailedRegistrationIDs
r.MessageID = response.MessageID
if response.Error != "" {
if val, ok := errMap[response.Error]; ok {
r.Error = val
} else {
r.Error = ErrUnknown
}
}
return nil
}
// Result represents the status of a processed message.
type Result struct {
MessageID string `json:"message_id"`
RegistrationID string `json:"registration_id"`
Error error `json:"error"`
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (r *Result) UnmarshalJSON(data []byte) error {
var result struct {
MessageID string `json:"message_id"`
RegistrationID string `json:"registration_id"`
Error string `json:"error"`
}
if err := json.Unmarshal(data, &result); err != nil {
return err
}
r.MessageID = result.MessageID
r.RegistrationID = result.RegistrationID
if result.Error != "" {
if val, ok := errMap[result.Error]; ok {
r.Error = val
} else {
r.Error = ErrUnknown
}
}
return nil
}
// Unregistered checks if the device token is unregistered,
// according to response from FCM server. Useful to determine
// if app is uninstalled.
func (r Result) Unregistered() bool {
switch r.Error {
case ErrNotRegistered, ErrMismatchSenderID, ErrMissingRegistration, ErrInvalidRegistration:
return true
default:
return false
}
}

View File

@ -1,34 +0,0 @@
package fcm
import (
"net"
"time"
)
const (
minBackoff = 100 * time.Millisecond
maxBackoff = 1 * time.Minute
factor = 2.7
)
func retry(fn func() error, attempts int) error {
var attempt int
for {
err := fn()
if err == nil {
return nil
}
if tErr, ok := err.(net.Error); !ok || !tErr.Temporary() {
return err
}
attempt++
backoff := minBackoff * time.Duration(attempt*attempt)
if attempt > attempts || backoff > maxBackoff {
return err
}
time.Sleep(backoff)
}
}

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) [2016] [Asdine El Hrychy]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,623 +0,0 @@
# Storm
[![Join the chat at https://gitter.im/asdine/storm](https://badges.gitter.im/asdine/storm.svg)](https://gitter.im/asdine/storm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://travis-ci.org/asdine/storm.svg)](https://travis-ci.org/asdine/storm)
[![GoDoc](https://godoc.org/github.com/asdine/storm?status.svg)](https://godoc.org/github.com/asdine/storm)
[![Go Report Card](https://goreportcard.com/badge/github.com/asdine/storm)](https://goreportcard.com/report/github.com/asdine/storm)
Storm is a simple and powerful ORM for [BoltDB](https://github.com/boltdb/bolt). The goal of this project is to provide a simple way to save any object in BoltDB and to easily retrieve it.
In addition to the examples below, see also the [examples in the GoDoc](https://godoc.org/github.com/asdine/storm#pkg-examples).
## Table of Contents
<!-- TOC depthFrom:2 depthTo:6 withLinks:1 updateOnSave:0 orderedList:0 -->
- [Getting Started](#getting-started)
- [Import Storm](#import-storm)
- [Open a database](#open-a-database)
- [Simple ORM](#simple-orm)
- [Declare your structures](#declare-your-structures)
- [Save your object](#save-your-object)
- [Auto Increment](#auto-increment)
- [Simple queries](#simple-queries)
- [Fetch one object](#fetch-one-object)
- [Fetch multiple objects](#fetch-multiple-objects)
- [Fetch all objects](#fetch-all-objects)
- [Fetch all objects sorted by index](#fetch-all-objects-sorted-by-index)
- [Fetch a range of objects](#fetch-a-range-of-objects)
- [Skip, Limit and Reverse](#skip-limit-and-reverse)
- [Delete an object](#delete-an-object)
- [Update an object](#update-an-object)
- [Initialize buckets and indexes before saving an object](#initialize-buckets-and-indexes-before-saving-an-object)
- [Drop a bucket](#drop-a-bucket)
- [Re-index a bucket](#re-index-a-bucket)
- [Advanced queries](#advanced-queries)
- [Transactions](#transactions)
- [Options](#options)
- [BoltOptions](#boltoptions)
- [MarshalUnmarshaler](#marshalunmarshaler)
- [Provided Codecs](#provided-codecs)
- [Use existing Bolt connection](#use-existing-bolt-connection)
- [Batch mode](#batch-mode)
- [Nodes and nested buckets](#nodes-and-nested-buckets)
- [Node options](#node-options)
- [Simple Key/Value store](#simple-keyvalue-store)
- [BoltDB](#boltdb)
- [Migrations](#migrations)
- [License](#license)
- [Credits](#credits)
<!-- /TOC -->
## Getting Started
```bash
go get -u github.com/asdine/storm
```
## Import Storm
```go
import "github.com/asdine/storm"
```
## Open a database
Quick way of opening a database
```go
db, err := storm.Open("my.db")
defer db.Close()
```
`Open` can receive multiple options to customize the way it behaves. See [Options](#options) below
## Simple ORM
### Declare your structures
```go
type User struct {
ID int // primary key
Group string `storm:"index"` // this field will be indexed
Email string `storm:"unique"` // this field will be indexed with a unique constraint
Name string // this field will not be indexed
Age int `storm:"index"`
}
```
The primary key can be of any type as long as it is not a zero value. Storm will search for the tag `id`, if not present Storm will search for a field named `ID`.
```go
type User struct {
ThePrimaryKey string `storm:"id"`// primary key
Group string `storm:"index"` // this field will be indexed
Email string `storm:"unique"` // this field will be indexed with a unique constraint
Name string // this field will not be indexed
}
```
Storm handles tags in nested structures with the `inline` tag
```go
type Base struct {
Ident bson.ObjectId `storm:"id"`
}
type User struct {
Base `storm:"inline"`
Group string `storm:"index"`
Email string `storm:"unique"`
Name string
CreatedAt time.Time `storm:"index"`
}
```
### Save your object
```go
user := User{
ID: 10,
Group: "staff",
Email: "john@provider.com",
Name: "John",
Age: 21,
CreatedAt: time.Now(),
}
err := db.Save(&user)
// err == nil
user.ID++
err = db.Save(&user)
// err == storm.ErrAlreadyExists
```
That's it.
`Save` creates or updates all the required indexes and buckets, checks the unique constraints and saves the object to the store.
#### Auto Increment
Storm can auto increment integer values so you don't have to worry about that when saving your objects. Also, the new value is automatically inserted in your field.
```go
type Product struct {
Pk int `storm:"id,increment"` // primary key with auto increment
Name string
IntegerField uint64 `storm:"increment"`
IndexedIntegerField uint32 `storm:"index,increment"`
UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set
}
p := Product{Name: "Vaccum Cleaner"}
fmt.Println(p.Pk)
fmt.Println(p.IntegerField)
fmt.Println(p.IndexedIntegerField)
fmt.Println(p.UniqueIntegerField)
// 0
// 0
// 0
// 0
_ = db.Save(&p)
fmt.Println(p.Pk)
fmt.Println(p.IntegerField)
fmt.Println(p.IndexedIntegerField)
fmt.Println(p.UniqueIntegerField)
// 1
// 1
// 1
// 100
```
### Simple queries
Any object can be fetched, indexed or not. Storm uses indexes when available, otherwhise it uses the [query system](#advanced-queries).
#### Fetch one object
```go
var user User
err := db.One("Email", "john@provider.com", &user)
// err == nil
err = db.One("Name", "John", &user)
// err == nil
err = db.One("Name", "Jack", &user)
// err == storm.ErrNotFound
```
#### Fetch multiple objects
```go
var users []User
err := db.Find("Group", "staff", &users)
```
#### Fetch all objects
```go
var users []User
err := db.All(&users)
```
#### Fetch all objects sorted by index
```go
var users []User
err := db.AllByIndex("CreatedAt", &users)
```
#### Fetch a range of objects
```go
var users []User
err := db.Range("Age", 10, 21, &users)
```
#### Skip, Limit and Reverse
```go
var users []User
err := db.Find("Group", "staff", &users, storm.Skip(10))
err = db.Find("Group", "staff", &users, storm.Limit(10))
err = db.Find("Group", "staff", &users, storm.Reverse())
err = db.Find("Group", "staff", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
err = db.All(&users, storm.Limit(10), storm.Skip(10), storm.Reverse())
err = db.AllByIndex("CreatedAt", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
err = db.Range("Age", 10, 21, &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
```
#### Delete an object
```go
err := db.DeleteStruct(&user)
```
#### Update an object
```go
// Update multiple fields
err := db.Update(&User{ID: 10, Name: "Jack", Age: 45})
// Update a single field
err := db.UpdateField(&User{ID: 10}, "Age", 0)
```
#### Initialize buckets and indexes before saving an object
```go
err := db.Init(&User{})
```
Useful when starting your application
#### Drop a bucket
Using the struct
```go
err := db.Drop(&User)
```
Using the bucket name
```go
err := db.Drop("User")
```
#### Re-index a bucket
```go
err := db.ReIndex(&User{})
```
Useful when the structure has changed
### Advanced queries
For more complex queries, you can use the `Select` method.
`Select` takes any number of [`Matcher`](https://godoc.org/github.com/asdine/storm/q#Matcher) from the [`q`](https://godoc.org/github.com/asdine/storm/q) package.
Here are some common Matchers:
```go
// Equality
q.Eq("Name", John)
// Strictly greater than
q.Gt("Age", 7)
// Lesser than or equal to
q.Lte("Age", 77)
// Regex with name that starts with the letter D
q.Re("Name", "^D")
// In the given slice of values
q.In("Group", []string{"Staff", "Admin"})
```
Matchers can also be combined with `And`, `Or` and `Not`:
```go
// Match if all match
q.And(
q.Gt("Age", 7),
q.Re("Name", "^D")
)
// Match if one matches
q.Or(
q.Re("Name", "^A"),
q.Not(
q.Re("Name", "^B")
),
q.Re("Name", "^C"),
q.In("Group", []string{"Staff", "Admin"}),
q.And(
q.StrictEq("Password", []byte(password)),
q.Eq("Registered", true)
)
)
```
You can find the complete list in the [documentation](https://godoc.org/github.com/asdine/storm/q#Matcher).
`Select` takes any number of matchers and wraps them into a `q.And()` so it's not necessary to specify it. It returns a [`Query`](https://godoc.org/github.com/asdine/storm#Query) type.
```go
query := db.Select(q.Gte("Age", 7), q.Lte("Age", 77))
```
The `Query` type contains methods to filter and order the records.
```go
// Limit
query = query.Limit(10)
// Skip
query = query.Skip(20)
// Calls can also be chained
query = query.Limit(10).Skip(20).OrderBy("Age").Reverse()
```
But also to specify how to fetch them.
```go
var users []User
err = query.Find(&users)
var user User
err = query.First(&user)
```
Examples with `Select`:
```go
// Find all users with an ID between 10 and 100
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Find(&users)
// Nested matchers
err = db.Select(q.Or(
q.Gt("ID", 50),
q.Lt("Age", 21),
q.And(
q.Eq("Group", "admin"),
q.Gte("Age", 21),
),
)).Find(&users)
query := db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age")
// Find multiple records
err = query.Find(&users)
// or
err = db.Selectq.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age").Find(&users)
// Find first record
err = query.First(&user)
// or
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age").First(&user)
// Delete all matching records
err = query.Delete(new(User))
// Fetching records one by one (useful when the bucket contains a lot of records)
query = db.Select(q.Gte("ID", 10),q.Lte("ID", 100)).OrderBy("Age")
err = query.Each(new(User), func(record interface{}) error) {
u := record.(*User)
...
return nil
}
```
See the [documentation](https://godoc.org/github.com/asdine/storm#Query) for a complete list of methods.
### Transactions
```go
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
accountA.Amount -= 100
accountB.Amount += 100
err = tx.Save(accountA)
if err != nil {
return err
}
err = tx.Save(accountB)
if err != nil {
return err
}
return tx.Commit()
```
### Options
Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options.
#### BoltOptions
By default, Storm opens a database with the mode `0600` and a timeout of one second.
You can change this behavior by using `BoltOptions`
```go
db, err := storm.Open("my.db", storm.BoltOptions(0600, &bolt.Options{Timeout: 1 * time.Second}))
```
#### MarshalUnmarshaler
To store the data in BoltDB, Storm marshals it in JSON by default. If you wish to change this behavior you can pass a codec that implements [`codec.MarshalUnmarshaler`](https://godoc.org/github.com/asdine/storm/codec#MarshalUnmarshaler) via the [`storm.Codec`](https://godoc.org/github.com/asdine/storm#Codec) option:
```go
db := storm.Open("my.db", storm.Codec(myCodec))
```
##### Provided Codecs
You can easily implement your own `MarshalUnmarshaler`, but Storm comes with built-in support for [JSON](https://godoc.org/github.com/asdine/storm/codec/json) (default), [GOB](https://godoc.org/github.com/asdine/storm/codec/gob), [Sereal](https://godoc.org/github.com/asdine/storm/codec/sereal) and [Protocol Buffers](https://godoc.org/github.com/asdine/storm/codec/protobuf)
These can be used by importing the relevant package and use that codec to configure Storm. The example below shows all three (without proper error handling):
```go
import (
"github.com/asdine/storm"
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json"
"github.com/asdine/storm/codec/sereal"
"github.com/asdine/storm/codec/protobuf"
)
var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
var jsonDb, _ = storm.Open("json.db", storm.Codec(json.Codec))
var serealDb, _ = storm.Open("sereal.db", storm.Codec(sereal.Codec))
var protobufDb, _ = storm.Open("protobuf.db", storm.Codec(protobuf.Codec))
```
#### Use existing Bolt connection
You can use an existing connection and pass it to Storm
```go
bDB, _ := bolt.Open(filepath.Join(dir, "bolt.db"), 0600, &bolt.Options{Timeout: 10 * time.Second})
db := storm.Open("my.db", storm.UseDB(bDB))
```
#### Batch mode
Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/boltdb/bolt#batch-read-write-transactions))
```go
db := storm.Open("my.db", storm.Batch())
```
## Nodes and nested buckets
Storm takes advantage of BoltDB nested buckets feature by using `storm.Node`.
A `storm.Node` is the underlying object used by `storm.DB` to manipulate a bucket.
To create a nested bucket and use the same API as `storm.DB`, you can use the `DB.From` method.
```go
repo := db.From("repo")
err := repo.Save(&Issue{
Title: "I want more features",
Author: user.ID,
})
err = repo.Save(newRelease("0.10"))
var issues []Issue
err = repo.Find("Author", user.ID, &issues)
var release Release
err = repo.One("Tag", "0.10", &release)
```
You can also chain the nodes to create a hierarchy
```go
chars := db.From("characters")
heroes := chars.From("heroes")
enemies := chars.From("enemies")
items := db.From("items")
potions := items.From("consumables").From("medicine").From("potions")
```
You can even pass the entire hierarchy as arguments to `From`:
```go
privateNotes := db.From("notes", "private")
workNotes := db.From("notes", "work")
```
### Node options
A Node can also be configured. Activating an option on a Node creates a copy, so a Node is always thread-safe.
```go
n := db.From("my-node")
```
Give a bolt.Tx transaction to the Node
```go
n = n.WithTransaction(tx)
```
Enable batch mode
```go
n = n.WithBatch(true)
```
Use a Codec
```go
n = n.WithCodec(gob.Codec)
```
## Simple Key/Value store
Storm can be used as a simple, robust, key/value store that can store anything.
The key and the value can be of any type as long as the key is not a zero value.
Saving data :
```go
db.Set("logs", time.Now(), "I'm eating my breakfast man")
db.Set("sessions", bson.NewObjectId(), &someUser)
db.Set("weird storage", "754-3010", map[string]interface{}{
"hair": "blonde",
"likes": []string{"cheese", "star wars"},
})
```
Fetching data :
```go
user := User{}
db.Get("sessions", someObjectId, &user)
var details map[string]interface{}
db.Get("weird storage", "754-3010", &details)
db.Get("sessions", someObjectId, &details)
```
Deleting data :
```go
db.Delete("sessions", someObjectId)
db.Delete("weird storage", "754-3010")
```
## BoltDB
BoltDB is still easily accessible and can be used as usual
```go
db.Bolt.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte("my bucket"))
val := bucket.Get([]byte("any id"))
fmt.Println(string(val))
return nil
})
```
A transaction can be also be passed to Storm
```go
db.Bolt.Update(func(tx *bolt.Tx) error {
...
dbx := db.WithTransaction(tx)
err = dbx.Save(&user)
...
return nil
})
```
## Migrations
You can use the migration tool to migrate databases that use older version of Storm.
See this [README](https://github.com/asdine/storm-migrator) for more informations.
## License
MIT
## Credits
- [Asdine El Hrychy](https://github.com/asdine)
- [Bjørn Erik Pedersen](https://github.com/bep)

View File

@ -1,47 +0,0 @@
package storm
import "github.com/boltdb/bolt"
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
func (n *node) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
var b *bolt.Bucket
var err error
bucketNames := append(n.rootBucket, bucket)
for _, bucketName := range bucketNames {
if b != nil {
if b, err = b.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
return nil, err
}
} else {
if b, err = tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
return nil, err
}
}
}
return b, nil
}
// GetBucket returns the given bucket below the current node.
func (n *node) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
var b *bolt.Bucket
bucketNames := append(n.rootBucket, children...)
for _, bucketName := range bucketNames {
if b != nil {
if b = b.Bucket([]byte(bucketName)); b == nil {
return nil
}
} else {
if b = tx.Bucket([]byte(bucketName)); b == nil {
return nil
}
}
}
return b
}

View File

@ -1,11 +0,0 @@
// Package codec contains sub-packages with different codecs that can be used
// to encode and decode entities in Storm.
package codec
// MarshalUnmarshaler represents a codec used to marshal and unmarshal entities.
type MarshalUnmarshaler interface {
Marshal(v interface{}) ([]byte, error)
Unmarshal(b []byte, v interface{}) error
// name of this codec
Name() string
}

View File

@ -1,25 +0,0 @@
// Package json contains a codec to encode and decode entities in JSON format
package json
import (
"encoding/json"
)
const name = "json"
// Codec that encodes to and decodes from JSON.
var Codec = new(jsonCodec)
type jsonCodec int
func (j jsonCodec) Marshal(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
func (j jsonCodec) Unmarshal(b []byte, v interface{}) error {
return json.Unmarshal(b, v)
}
func (j jsonCodec) Name() string {
return name
}

View File

@ -1,54 +0,0 @@
package storm
import "errors"
// Errors
var (
// ErrNoID is returned when no ID field or id tag is found in the struct.
ErrNoID = errors.New("missing struct tag id or ID field")
// ErrZeroID is returned when the ID field is a zero value.
ErrZeroID = errors.New("id field must not be a zero value")
// ErrBadType is returned when a method receives an unexpected value type.
ErrBadType = errors.New("provided data must be a struct or a pointer to struct")
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
ErrAlreadyExists = errors.New("already exists")
// ErrNilParam is returned when the specified param is expected to be not nil.
ErrNilParam = errors.New("param must not be nil")
// ErrUnknownTag is returned when an unexpected tag is specified.
ErrUnknownTag = errors.New("unknown tag")
// ErrIdxNotFound is returned when the specified index is not found.
ErrIdxNotFound = errors.New("index not found")
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to slice.
ErrSlicePtrNeeded = errors.New("provided target must be a pointer to slice")
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to struct.
ErrStructPtrNeeded = errors.New("provided target must be a pointer to struct")
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer.
ErrPtrNeeded = errors.New("provided target must be a pointer to a valid variable")
// ErrNoName is returned when the specified struct has no name.
ErrNoName = errors.New("provided target must have a name")
// ErrNotFound is returned when the specified record is not saved in the bucket.
ErrNotFound = errors.New("not found")
// ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
ErrNotInTransaction = errors.New("not in transaction")
// ErrUnAddressable is returned when a struct or an exported field of a struct is unaddressable
ErrUnAddressable = errors.New("unaddressable value")
// ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field
ErrIncompatibleValue = errors.New("incompatible value")
// ErrDifferentCodec is returned when using a codec different than the first codec used with the bucket.
ErrDifferentCodec = errors.New("the selected codec is incompatible with this bucket")
)

View File

@ -1,223 +0,0 @@
package storm
import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
)
// Storm tags
const (
tagID = "id"
tagIdx = "index"
tagUniqueIdx = "unique"
tagInline = "inline"
tagIncrement = "increment"
indexPrefix = "__storm_index_"
)
type fieldConfig struct {
Name string
Index string
IsZero bool
IsID bool
Increment bool
IncrementStart int64
IsInteger bool
Value *reflect.Value
}
// structConfig is a structure gathering all the relevant informations about a model
type structConfig struct {
Name string
Fields map[string]*fieldConfig
ID *fieldConfig
}
func extract(s *reflect.Value, mi ...*structConfig) (*structConfig, error) {
if s.Kind() == reflect.Ptr {
e := s.Elem()
s = &e
}
if s.Kind() != reflect.Struct {
return nil, ErrBadType
}
typ := s.Type()
var child bool
var m *structConfig
if len(mi) > 0 {
m = mi[0]
child = true
} else {
m = &structConfig{}
m.Fields = make(map[string]*fieldConfig)
}
if m.Name == "" {
m.Name = typ.Name()
}
numFields := s.NumField()
for i := 0; i < numFields; i++ {
field := typ.Field(i)
value := s.Field(i)
if field.PkgPath != "" {
continue
}
err := extractField(&value, &field, m, child)
if err != nil {
return nil, err
}
}
if child {
return m, nil
}
if m.ID == nil {
return nil, ErrNoID
}
if m.Name == "" {
return nil, ErrNoName
}
return m, nil
}
func extractField(value *reflect.Value, field *reflect.StructField, m *structConfig, isChild bool) error {
var f *fieldConfig
var err error
tag := field.Tag.Get("storm")
if tag != "" {
f = &fieldConfig{
Name: field.Name,
IsZero: isZero(value),
IsInteger: isInteger(value),
Value: value,
IncrementStart: 1,
}
tags := strings.Split(tag, ",")
for _, tag := range tags {
switch tag {
case "id":
f.IsID = true
case tagUniqueIdx, tagIdx:
f.Index = tag
case tagInline:
if value.Kind() == reflect.Ptr {
e := value.Elem()
value = &e
}
if value.Kind() == reflect.Struct {
a := value.Addr()
_, err := extract(&a, m)
if err != nil {
return err
}
}
// we don't need to save this field
return nil
default:
if strings.HasPrefix(tag, tagIncrement) {
f.Increment = true
parts := strings.Split(tag, "=")
if parts[0] != tagIncrement {
return ErrUnknownTag
}
if len(parts) > 1 {
f.IncrementStart, err = strconv.ParseInt(parts[1], 0, 64)
if err != nil {
return err
}
}
} else {
return ErrUnknownTag
}
}
}
if _, ok := m.Fields[f.Name]; !ok || !isChild {
m.Fields[f.Name] = f
}
}
if m.ID == nil && f != nil && f.IsID {
m.ID = f
}
// the field is named ID and no ID field has been detected before
if m.ID == nil && field.Name == "ID" {
if f == nil {
f = &fieldConfig{
Name: field.Name,
IsZero: isZero(value),
IsInteger: isInteger(value),
IsID: true,
Value: value,
IncrementStart: 1,
}
m.Fields[field.Name] = f
}
m.ID = f
}
return nil
}
func extractSingleField(ref *reflect.Value, fieldName string) (*structConfig, error) {
var cfg structConfig
cfg.Fields = make(map[string]*fieldConfig)
f, ok := ref.Type().FieldByName(fieldName)
if !ok || f.PkgPath != "" {
return nil, fmt.Errorf("field %s not found", fieldName)
}
v := ref.FieldByName(fieldName)
err := extractField(&v, &f, &cfg, false)
if err != nil {
return nil, err
}
return &cfg, nil
}
func getIndex(bucket *bolt.Bucket, idxKind string, fieldName string) (index.Index, error) {
var idx index.Index
var err error
switch idxKind {
case tagUniqueIdx:
idx, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
case tagIdx:
idx, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
default:
err = ErrIdxNotFound
}
return idx, err
}
func isZero(v *reflect.Value) bool {
zero := reflect.Zero(v.Type()).Interface()
current := v.Interface()
return reflect.DeepEqual(current, zero)
}
func isInteger(v *reflect.Value) bool {
kind := v.Kind()
return v != nil && kind >= reflect.Int && kind <= reflect.Uint64
}

View File

@ -1,416 +0,0 @@
package storm
import (
"reflect"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
// A Finder can fetch types from BoltDB
type Finder interface {
// One returns one record by the specified index
One(fieldName string, value interface{}, to interface{}) error
// Find returns one or more records by the specified index
Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error
// AllByIndex gets all the records of a bucket that are indexed in the specified index
AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error
// All gets all the records of a bucket.
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
All(to interface{}, options ...func(*index.Options)) error
// Select a list of records that match a list of matchers. Doesn't use indexes.
Select(matchers ...q.Matcher) Query
// Range returns one or more records by the specified index within the specified range
Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error
// Count counts all the records of a bucket
Count(data interface{}) (int, error)
}
// One returns one record by the specified index
func (n *node) One(fieldName string, value interface{}, to interface{}) error {
sink, err := newFirstSink(n, to)
if err != nil {
return err
}
bucketName := sink.bucketName()
if bucketName == "" {
return ErrNoName
}
if fieldName == "" {
return ErrNotFound
}
ref := reflect.Indirect(sink.ref)
cfg, err := extractSingleField(&ref, fieldName)
if err != nil {
return err
}
field, ok := cfg.Fields[fieldName]
if !ok || (!field.IsID && field.Index == "") {
query := newQuery(n, q.StrictEq(fieldName, value))
if n.tx != nil {
err = query.query(n.tx, sink)
} else {
err = n.s.Bolt.View(func(tx *bolt.Tx) error {
return query.query(tx, sink)
})
}
if err != nil {
return err
}
return sink.flush()
}
val, err := toBytes(value, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
return n.one(tx, bucketName, fieldName, cfg, to, val, field.IsID)
})
}
func (n *node) one(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, to interface{}, val []byte, skipIndex bool) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return ErrNotFound
}
var id []byte
if !skipIndex {
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
id = idx.Get(val)
} else {
id = val
}
if id == nil {
return ErrNotFound
}
raw := bucket.Get(id)
if raw == nil {
return ErrNotFound
}
return n.s.codec.Unmarshal(raw, to)
}
// Find returns one or more records by the specified index
func (n *node) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
sink, err := newListSink(n, to)
if err != nil {
return err
}
bucketName := sink.bucketName()
if bucketName == "" {
return ErrNoName
}
ref := reflect.Indirect(reflect.New(sink.elemType))
cfg, err := extractSingleField(&ref, fieldName)
if err != nil {
return err
}
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
field, ok := cfg.Fields[fieldName]
if !ok || (!field.IsID && (field.Index == "" || value == nil)) {
sink.limit = opts.Limit
sink.skip = opts.Skip
query := newQuery(n, q.Eq(fieldName, value))
if opts.Reverse {
query.Reverse()
}
err = n.readTx(func(tx *bolt.Tx) error {
return query.query(tx, sink)
})
if err != nil {
return err
}
return sink.flush()
}
val, err := toBytes(value, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
return n.find(tx, bucketName, fieldName, cfg, sink, val, opts)
})
}
func (n *node) find(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, val []byte, opts *index.Options) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return ErrNotFound
}
sorter := newSorter(n)
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
if err != nil {
return err
}
list, err := idx.All(val, opts)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
for i := range list {
raw := bucket.Get(list[i])
if raw == nil {
return ErrNotFound
}
_, err = sorter.filter(sink, nil, bucket, list[i], raw)
if err != nil {
return err
}
}
return sink.flush()
}
// AllByIndex gets all the records of a bucket that are indexed in the specified index
func (n *node) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
if fieldName == "" {
return n.All(to, options...)
}
ref := reflect.ValueOf(to)
if ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Slice {
return ErrSlicePtrNeeded
}
typ := reflect.Indirect(ref).Type().Elem()
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
newElem := reflect.New(typ)
cfg, err := extract(&newElem)
if err != nil {
return err
}
if cfg.ID.Name == fieldName {
return n.All(to, options...)
}
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
return n.readTx(func(tx *bolt.Tx) error {
return n.allByIndex(tx, fieldName, cfg, &ref, opts)
})
}
func (n *node) allByIndex(tx *bolt.Tx, fieldName string, cfg *structConfig, ref *reflect.Value, opts *index.Options) error {
bucket := n.GetBucket(tx, cfg.Name)
if bucket == nil {
return ErrNotFound
}
fieldCfg, ok := cfg.Fields[fieldName]
if !ok {
return ErrNotFound
}
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
if err != nil {
return err
}
list, err := idx.AllRecords(opts)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
for i := range list {
raw := bucket.Get(list[i])
if raw == nil {
return ErrNotFound
}
err = n.s.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
if err != nil {
return err
}
}
reflect.Indirect(*ref).Set(results)
return nil
}
// All gets all the records of a bucket.
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
func (n *node) All(to interface{}, options ...func(*index.Options)) error {
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
query := newQuery(n, nil).Limit(opts.Limit).Skip(opts.Skip)
if opts.Reverse {
query.Reverse()
}
err := query.Find(to)
if err != nil && err != ErrNotFound {
return err
}
if err == ErrNotFound {
ref := reflect.ValueOf(to)
results := reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0)
reflect.Indirect(ref).Set(results)
}
return nil
}
// Range returns one or more records by the specified index within the specified range
func (n *node) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
sink, err := newListSink(n, to)
if err != nil {
return err
}
bucketName := sink.bucketName()
if bucketName == "" {
return ErrNoName
}
ref := reflect.Indirect(reflect.New(sink.elemType))
cfg, err := extractSingleField(&ref, fieldName)
if err != nil {
return err
}
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
field, ok := cfg.Fields[fieldName]
if !ok || (!field.IsID && field.Index == "") {
sink.limit = opts.Limit
sink.skip = opts.Skip
query := newQuery(n, q.And(q.Gte(fieldName, min), q.Lte(fieldName, max)))
if opts.Reverse {
query.Reverse()
}
err = n.readTx(func(tx *bolt.Tx) error {
return query.query(tx, sink)
})
if err != nil {
return err
}
return sink.flush()
}
mn, err := toBytes(min, n.s.codec)
if err != nil {
return err
}
mx, err := toBytes(max, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
return n.rnge(tx, bucketName, fieldName, cfg, sink, mn, mx, opts)
})
}
func (n *node) rnge(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, min, max []byte, opts *index.Options) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
reflect.Indirect(sink.ref).SetLen(0)
return nil
}
sorter := newSorter(n)
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
if err != nil {
return err
}
list, err := idx.Range(min, max, opts)
if err != nil {
return err
}
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
for i := range list {
raw := bucket.Get(list[i])
if raw == nil {
return ErrNotFound
}
_, err = sorter.filter(sink, nil, bucket, list[i], raw)
if err != nil {
return err
}
}
return sink.flush()
}
// Count counts all the records of a bucket
func (n *node) Count(data interface{}) (int, error) {
return n.Select().Count(data)
}

View File

@ -1,14 +0,0 @@
package index
import "errors"
var (
// ErrNotFound is returned when the specified record is not saved in the bucket.
ErrNotFound = errors.New("not found")
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
ErrAlreadyExists = errors.New("already exists")
// ErrNilParam is returned when the specified param is expected to be not nil.
ErrNilParam = errors.New("param must not be nil")
)

View File

@ -1,13 +0,0 @@
// Package index contains Index engines used to store values and their corresponding IDs
package index
// Index interface
type Index interface {
Add(value []byte, targetID []byte) error
Remove(value []byte) error
RemoveID(id []byte) error
Get(value []byte) []byte
All(value []byte, opts *Options) ([][]byte, error)
AllRecords(opts *Options) ([][]byte, error)
Range(min []byte, max []byte, opts *Options) ([][]byte, error)
}

View File

@ -1,245 +0,0 @@
package index
import (
"bytes"
"github.com/asdine/storm/internal"
"github.com/boltdb/bolt"
)
// NewListIndex loads a ListIndex
func NewListIndex(parent *bolt.Bucket, indexName []byte) (*ListIndex, error) {
var err error
b := parent.Bucket(indexName)
if b == nil {
if !parent.Writable() {
return nil, ErrNotFound
}
b, err = parent.CreateBucket(indexName)
if err != nil {
return nil, err
}
}
ids, err := NewUniqueIndex(b, []byte("storm__ids"))
if err != nil {
return nil, err
}
return &ListIndex{
IndexBucket: b,
Parent: parent,
IDs: ids,
}, nil
}
// ListIndex is an index that references values and the corresponding IDs.
type ListIndex struct {
Parent *bolt.Bucket
IndexBucket *bolt.Bucket
IDs *UniqueIndex
}
// Add a value to the list index
func (idx *ListIndex) Add(newValue []byte, targetID []byte) error {
if newValue == nil || len(newValue) == 0 {
return ErrNilParam
}
if targetID == nil || len(targetID) == 0 {
return ErrNilParam
}
key := idx.IDs.Get(targetID)
if key != nil {
err := idx.IndexBucket.Delete(key)
if err != nil {
return err
}
err = idx.IDs.Remove(targetID)
if err != nil {
return err
}
key = key[:0]
}
key = append(key, newValue...)
key = append(key, '_')
key = append(key, '_')
key = append(key, targetID...)
err := idx.IDs.Add(targetID, key)
if err != nil {
return err
}
return idx.IndexBucket.Put(key, targetID)
}
// Remove a value from the unique index
func (idx *ListIndex) Remove(value []byte) error {
var err error
var keys [][]byte
c := idx.IndexBucket.Cursor()
prefix := generatePrefix(value)
for k, _ := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, _ = c.Next() {
keys = append(keys, k)
}
for _, k := range keys {
err = idx.IndexBucket.Delete(k)
if err != nil {
return err
}
}
return idx.IDs.RemoveID(value)
}
// RemoveID removes an ID from the list index
func (idx *ListIndex) RemoveID(targetID []byte) error {
value := idx.IDs.Get(targetID)
if value == nil {
return nil
}
err := idx.IndexBucket.Delete(value)
if err != nil {
return err
}
return idx.IDs.Remove(targetID)
}
// Get the first ID corresponding to the given value
func (idx *ListIndex) Get(value []byte) []byte {
c := idx.IndexBucket.Cursor()
prefix := generatePrefix(value)
for k, id := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, id = c.Next() {
return id
}
return nil
}
// All the IDs corresponding to the given value
func (idx *ListIndex) All(value []byte, opts *Options) ([][]byte, error) {
var list [][]byte
c := idx.IndexBucket.Cursor()
cur := internal.Cursor{C: c, Reverse: opts != nil && opts.Reverse}
prefix := generatePrefix(value)
k, id := c.Seek(prefix)
if cur.Reverse {
var count int
for ; bytes.HasPrefix(k, prefix) && k != nil; k, _ = c.Next() {
count++
}
k, id = c.Prev()
list = make([][]byte, 0, count)
}
for ; bytes.HasPrefix(k, prefix); k, id = cur.Next() {
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, id)
}
return list, nil
}
// AllRecords returns all the IDs of this index
func (idx *ListIndex) AllRecords(opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
for k, id := c.First(); k != nil; k, id = c.Next() {
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
continue
}
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, id)
}
return list, nil
}
// Range returns the ids corresponding to the given range of values
func (idx *ListIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.RangeCursor{
C: idx.IndexBucket.Cursor(),
Reverse: opts != nil && opts.Reverse,
Min: min,
Max: max,
CompareFn: func(val, limit []byte) int {
pos := bytes.LastIndex(val, []byte("__"))
return bytes.Compare(val[:pos], limit)
},
}
for k, id := c.First(); c.Continue(k); k, id = c.Next() {
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
continue
}
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, id)
}
return list, nil
}
func generatePrefix(value []byte) []byte {
prefix := make([]byte, len(value)+2)
var i int
for i = range value {
prefix[i] = value[i]
}
prefix[i+1] = '_'
prefix[i+2] = '_'
return prefix
}

View File

@ -1,15 +0,0 @@
package index
// NewOptions creates initialized Options
func NewOptions() *Options {
return &Options{
Limit: -1,
}
}
// Options are used to customize queries
type Options struct {
Limit int
Skip int
Reverse bool
}

View File

@ -1,154 +0,0 @@
package index
import (
"bytes"
"github.com/asdine/storm/internal"
"github.com/boltdb/bolt"
)
// NewUniqueIndex loads a UniqueIndex
func NewUniqueIndex(parent *bolt.Bucket, indexName []byte) (*UniqueIndex, error) {
var err error
b := parent.Bucket(indexName)
if b == nil {
if !parent.Writable() {
return nil, ErrNotFound
}
b, err = parent.CreateBucket(indexName)
if err != nil {
return nil, err
}
}
return &UniqueIndex{
IndexBucket: b,
Parent: parent,
}, nil
}
// UniqueIndex is an index that references unique values and the corresponding ID.
type UniqueIndex struct {
Parent *bolt.Bucket
IndexBucket *bolt.Bucket
}
// Add a value to the unique index
func (idx *UniqueIndex) Add(value []byte, targetID []byte) error {
if value == nil || len(value) == 0 {
return ErrNilParam
}
if targetID == nil || len(targetID) == 0 {
return ErrNilParam
}
exists := idx.IndexBucket.Get(value)
if exists != nil {
if bytes.Equal(exists, targetID) {
return nil
}
return ErrAlreadyExists
}
return idx.IndexBucket.Put(value, targetID)
}
// Remove a value from the unique index
func (idx *UniqueIndex) Remove(value []byte) error {
return idx.IndexBucket.Delete(value)
}
// RemoveID removes an ID from the unique index
func (idx *UniqueIndex) RemoveID(id []byte) error {
c := idx.IndexBucket.Cursor()
for val, ident := c.First(); val != nil; val, ident = c.Next() {
if bytes.Equal(ident, id) {
return idx.Remove(val)
}
}
return nil
}
// Get the id corresponding to the given value
func (idx *UniqueIndex) Get(value []byte) []byte {
return idx.IndexBucket.Get(value)
}
// All returns all the ids corresponding to the given value
func (idx *UniqueIndex) All(value []byte, opts *Options) ([][]byte, error) {
id := idx.IndexBucket.Get(value)
if id != nil {
return [][]byte{id}, nil
}
return nil, nil
}
// AllRecords returns all the IDs of this index
func (idx *UniqueIndex) AllRecords(opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
for val, ident := c.First(); val != nil; val, ident = c.Next() {
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, ident)
}
return list, nil
}
// Range returns the ids corresponding to the given range of values
func (idx *UniqueIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.RangeCursor{
C: idx.IndexBucket.Cursor(),
Reverse: opts != nil && opts.Reverse,
Min: min,
Max: max,
CompareFn: func(val, limit []byte) int {
return bytes.Compare(val, limit)
},
}
for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, ident)
}
return list, nil
}
// first returns the first ID of this index
func (idx *UniqueIndex) first() []byte {
c := idx.IndexBucket.Cursor()
for val, ident := c.First(); val != nil; val, ident = c.Next() {
return ident
}
return nil
}

View File

@ -1,63 +0,0 @@
package internal
import "github.com/boltdb/bolt"
// Cursor that can be reversed
type Cursor struct {
C *bolt.Cursor
Reverse bool
}
// First element
func (c *Cursor) First() ([]byte, []byte) {
if c.Reverse {
return c.C.Last()
}
return c.C.First()
}
// Next element
func (c *Cursor) Next() ([]byte, []byte) {
if c.Reverse {
return c.C.Prev()
}
return c.C.Next()
}
// RangeCursor that can be reversed
type RangeCursor struct {
C *bolt.Cursor
Reverse bool
Min []byte
Max []byte
CompareFn func([]byte, []byte) int
}
// First element
func (c *RangeCursor) First() ([]byte, []byte) {
if c.Reverse {
return c.C.Seek(c.Max)
}
return c.C.Seek(c.Min)
}
// Next element
func (c *RangeCursor) Next() ([]byte, []byte) {
if c.Reverse {
return c.C.Prev()
}
return c.C.Next()
}
// Continue tells if the loop needs to continue
func (c *RangeCursor) Continue(val []byte) bool {
if c.Reverse {
return val != nil && c.CompareFn(val, c.Min) >= 0
}
return val != nil && c.CompareFn(val, c.Max) <= 0
}

145
vendor/github.com/asdine/storm/kv.go generated vendored
View File

@ -1,145 +0,0 @@
package storm
import (
"reflect"
"github.com/boltdb/bolt"
)
// KeyValueStore can store and fetch values by key
type KeyValueStore interface {
// Get a value from a bucket
Get(bucketName string, key interface{}, to interface{}) error
// Set a key/value pair into a bucket
Set(bucketName string, key interface{}, value interface{}) error
// Delete deletes a key from a bucket
Delete(bucketName string, key interface{}) error
// GetBytes gets a raw value from a bucket.
GetBytes(bucketName string, key interface{}) ([]byte, error)
// SetBytes sets a raw value into a bucket.
SetBytes(bucketName string, key interface{}, value []byte) error
}
// GetBytes gets a raw value from a bucket.
func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) {
id, err := toBytes(key, n.s.codec)
if err != nil {
return nil, err
}
var val []byte
return val, n.readTx(func(tx *bolt.Tx) error {
raw, err := n.getBytes(tx, bucketName, id)
if err != nil {
return err
}
val = make([]byte, len(raw))
copy(val, raw)
return nil
})
}
// GetBytes gets a raw value from a bucket.
func (n *node) getBytes(tx *bolt.Tx, bucketName string, id []byte) ([]byte, error) {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return nil, ErrNotFound
}
raw := bucket.Get(id)
if raw == nil {
return nil, ErrNotFound
}
return raw, nil
}
// SetBytes sets a raw value into a bucket.
func (n *node) SetBytes(bucketName string, key interface{}, value []byte) error {
if key == nil {
return ErrNilParam
}
id, err := toBytes(key, n.s.codec)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.setBytes(tx, bucketName, id, value)
})
}
func (n *node) setBytes(tx *bolt.Tx, bucketName string, id, data []byte) error {
bucket, err := n.CreateBucketIfNotExists(tx, bucketName)
if err != nil {
return err
}
// save node configuration in the bucket
_, err = newMeta(bucket, n)
if err != nil {
return err
}
return bucket.Put(id, data)
}
// Get a value from a bucket
func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
ref := reflect.ValueOf(to)
if !ref.IsValid() || ref.Kind() != reflect.Ptr {
return ErrPtrNeeded
}
id, err := toBytes(key, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
raw, err := n.getBytes(tx, bucketName, id)
if err != nil {
return err
}
return n.s.codec.Unmarshal(raw, to)
})
}
// Set a key/value pair into a bucket
func (n *node) Set(bucketName string, key interface{}, value interface{}) error {
var data []byte
var err error
if value != nil {
data, err = n.s.codec.Marshal(value)
if err != nil {
return err
}
}
return n.SetBytes(bucketName, key, data)
}
// Delete deletes a key from a bucket
func (n *node) Delete(bucketName string, key interface{}) error {
id, err := toBytes(key, n.s.codec)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.delete(tx, bucketName, id)
})
}
func (n *node) delete(tx *bolt.Tx, bucketName string, id []byte) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return ErrNotFound
}
return bucket.Delete(id)
}

View File

@ -1,69 +0,0 @@
package storm
import (
"reflect"
"github.com/boltdb/bolt"
)
const (
metaCodec = "codec"
)
func newMeta(b *bolt.Bucket, n Node) (*meta, error) {
m := b.Bucket([]byte(metadataBucket))
if m != nil {
name := m.Get([]byte(metaCodec))
if string(name) != n.Codec().Name() {
return nil, ErrDifferentCodec
}
return &meta{
node: n,
bucket: m,
}, nil
}
m, err := b.CreateBucket([]byte(metadataBucket))
if err != nil {
return nil, err
}
m.Put([]byte(metaCodec), []byte(n.Codec().Name()))
return &meta{
node: n,
bucket: m,
}, nil
}
type meta struct {
node Node
bucket *bolt.Bucket
}
func (m *meta) increment(field *fieldConfig) error {
var err error
counter := field.IncrementStart
raw := m.bucket.Get([]byte(field.Name + "counter"))
if raw != nil {
counter, err = numberfromb(raw)
if err != nil {
return err
}
counter++
}
raw, err = numbertob(counter)
if err != nil {
return err
}
err = m.bucket.Put([]byte(field.Name+"counter"), raw)
if err != nil {
return err
}
field.Value.Set(reflect.ValueOf(counter).Convert(field.Value.Type()))
field.IsZero = false
return nil
}

View File

@ -1,125 +0,0 @@
package storm
import (
"github.com/asdine/storm/codec"
"github.com/boltdb/bolt"
)
// A Node in Storm represents the API to a BoltDB bucket.
type Node interface {
Tx
TypeStore
KeyValueStore
BucketScanner
// From returns a new Storm node with a new bucket root below the current.
// All DB operations on the new node will be executed relative to this bucket.
From(addend ...string) Node
// Bucket returns the bucket name as a slice from the root.
// In the normal, simple case this will be empty.
Bucket() []string
// GetBucket returns the given bucket below the current node.
GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error)
// WithTransaction returns a New Storm node that will use the given transaction.
WithTransaction(tx *bolt.Tx) Node
// Begin starts a new transaction.
Begin(writable bool) (Node, error)
// Codec used by this instance of Storm
Codec() codec.MarshalUnmarshaler
// WithCodec returns a New Storm Node that will use the given Codec.
WithCodec(codec codec.MarshalUnmarshaler) Node
// WithBatch returns a new Storm Node with the batch mode enabled.
WithBatch(enabled bool) Node
}
// A Node in Storm represents the API to a BoltDB bucket.
type node struct {
s *DB
// The root bucket. In the normal, simple case this will be empty.
rootBucket []string
// Transaction object. Nil if not in transaction
tx *bolt.Tx
// Codec of this node
codec codec.MarshalUnmarshaler
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
}
// From returns a new Storm Node with a new bucket root below the current.
// All DB operations on the new node will be executed relative to this bucket.
func (n node) From(addend ...string) Node {
n.rootBucket = append(n.rootBucket, addend...)
return &n
}
// WithTransaction returns a new Storm Node that will use the given transaction.
func (n node) WithTransaction(tx *bolt.Tx) Node {
n.tx = tx
return &n
}
// WithCodec returns a new Storm Node that will use the given Codec.
func (n node) WithCodec(codec codec.MarshalUnmarshaler) Node {
n.codec = codec
return &n
}
// WithBatch returns a new Storm Node with the batch mode enabled.
func (n node) WithBatch(enabled bool) Node {
n.batchMode = enabled
return &n
}
// Bucket returns the bucket name as a slice from the root.
// In the normal, simple case this will be empty.
func (n *node) Bucket() []string {
return n.rootBucket
}
// Codec returns the EncodeDecoder used by this instance of Storm
func (n *node) Codec() codec.MarshalUnmarshaler {
return n.codec
}
// Detects if already in transaction or runs a read write transaction.
// Uses batch mode if enabled.
func (n *node) readWriteTx(fn func(tx *bolt.Tx) error) error {
if n.tx != nil {
return fn(n.tx)
}
if n.batchMode {
return n.s.Bolt.Batch(func(tx *bolt.Tx) error {
return fn(tx)
})
}
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
return fn(tx)
})
}
// Detects if already in transaction or runs a read transaction.
func (n *node) readTx(fn func(tx *bolt.Tx) error) error {
if n.tx != nil {
return fn(n.tx)
}
return n.s.Bolt.View(func(tx *bolt.Tx) error {
return fn(tx)
})
}

View File

@ -1,82 +0,0 @@
package storm
import (
"os"
"github.com/asdine/storm/codec"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
)
// BoltOptions used to pass options to BoltDB.
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*DB) error {
return func(d *DB) error {
d.boltMode = mode
d.boltOptions = options
return nil
}
}
// Codec used to set a custom encoder and decoder. The default is JSON.
func Codec(c codec.MarshalUnmarshaler) func(*DB) error {
return func(d *DB) error {
d.codec = c
return nil
}
}
// Batch enables the use of batch instead of update for read-write transactions.
func Batch() func(*DB) error {
return func(d *DB) error {
d.batchMode = true
return nil
}
}
// AutoIncrement used to enable bolt.NextSequence on empty integer ids.
// Deprecated: Set the increment tag to the id field instead.
func AutoIncrement() func(*DB) error {
return func(d *DB) error {
d.autoIncrement = true
return nil
}
}
// Root used to set the root bucket. See also the From method.
func Root(root ...string) func(*DB) error {
return func(d *DB) error {
d.rootBucket = root
return nil
}
}
// UseDB allow Storm to use an existing open Bolt.DB.
// Warning: storm.DB.Close() will close the bolt.DB instance.
func UseDB(b *bolt.DB) func(*DB) error {
return func(d *DB) error {
d.Path = b.Path()
d.Bolt = b
return nil
}
}
// Limit sets the maximum number of records to return
func Limit(limit int) func(*index.Options) {
return func(opts *index.Options) {
opts.Limit = limit
}
}
// Skip sets the number of records to skip
func Skip(offset int) func(*index.Options) {
return func(opts *index.Options) {
opts.Skip = offset
}
}
// Reverse will return the results in descending order
func Reverse() func(*index.Options) {
return func(opts *index.Options) {
opts.Reverse = true
}
}

View File

@ -1,78 +0,0 @@
package q
import (
"go/constant"
"go/token"
"reflect"
"strconv"
)
func compare(a, b interface{}, tok token.Token) bool {
vala := reflect.ValueOf(a)
valb := reflect.ValueOf(b)
ak := vala.Kind()
bk := valb.Kind()
switch {
// comparing nil values
case (ak == reflect.Ptr || ak == reflect.Slice || ak == reflect.Interface || ak == reflect.Invalid) &&
(bk == reflect.Ptr || ak == reflect.Slice || bk == reflect.Interface || bk == reflect.Invalid) &&
(!vala.IsValid() || vala.IsNil()) && (!valb.IsValid() || valb.IsNil()):
return true
case ak >= reflect.Int && ak <= reflect.Int64:
if bk >= reflect.Int && bk <= reflect.Int64 {
return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(valb.Int()))
}
if bk == reflect.Float32 || bk == reflect.Float64 {
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(valb.Float()))
}
if bk == reflect.String {
bla, err := strconv.ParseFloat(valb.String(), 64)
if err != nil {
return false
}
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(bla))
}
case ak == reflect.Float32 || ak == reflect.Float64:
if bk == reflect.Float32 || bk == reflect.Float64 {
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(valb.Float()))
}
if bk >= reflect.Int && bk <= reflect.Int64 {
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Int())))
}
if bk == reflect.String {
bla, err := strconv.ParseFloat(valb.String(), 64)
if err != nil {
return false
}
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(bla))
}
case ak == reflect.String:
if bk == reflect.String {
return constant.Compare(constant.MakeString(vala.String()), tok, constant.MakeString(valb.String()))
}
}
if reflect.TypeOf(a).String() == "time.Time" && reflect.TypeOf(b).String() == "time.Time" {
var x, y int64
x = 1
if vala.MethodByName("Equal").Call([]reflect.Value{valb})[0].Bool() {
y = 1
} else if vala.MethodByName("Before").Call([]reflect.Value{valb})[0].Bool() {
y = 2
}
return constant.Compare(constant.MakeInt64(x), tok, constant.MakeInt64(y))
}
if tok == token.EQL {
return reflect.DeepEqual(a, b)
}
return false
}

View File

@ -1,32 +0,0 @@
package q
import (
"reflect"
)
type fieldMatcherDelegate struct {
FieldMatcher
Field string
}
// NewFieldMatcher creates a Matcher for a given field.
func NewFieldMatcher(field string, fm FieldMatcher) Matcher {
return fieldMatcherDelegate{Field: field, FieldMatcher: fm}
}
// FieldMatcher can be used in NewFieldMatcher as a simple way to create the
// most common Matcher: A Matcher that evaluates one field's value.
// For more complex scenarios, implement the Matcher interface directly.
type FieldMatcher interface {
MatchField(v interface{}) (bool, error)
}
func (r fieldMatcherDelegate) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return r.MatchValue(&v)
}
func (r fieldMatcherDelegate) MatchValue(v *reflect.Value) (bool, error) {
field := v.FieldByName(r.Field).Interface()
return r.MatchField(field)
}

View File

@ -1,51 +0,0 @@
package q
import (
"fmt"
"regexp"
"sync"
)
// Re creates a regexp matcher. It checks if the given field matches the given regexp.
// Note that this only supports fields of type string or []byte.
func Re(field string, re string) Matcher {
regexpCache.RLock()
if r, ok := regexpCache.m[re]; ok {
regexpCache.RUnlock()
return NewFieldMatcher(field, &regexpMatcher{r: r})
}
regexpCache.RUnlock()
regexpCache.Lock()
r, err := regexp.Compile(re)
if err == nil {
regexpCache.m[re] = r
}
regexpCache.Unlock()
return NewFieldMatcher(field, &regexpMatcher{r: r, err: err})
}
var regexpCache = struct {
sync.RWMutex
m map[string]*regexp.Regexp
}{m: make(map[string]*regexp.Regexp)}
type regexpMatcher struct {
r *regexp.Regexp
err error
}
func (r *regexpMatcher) MatchField(v interface{}) (bool, error) {
if r.err != nil {
return false, r.err
}
switch fieldValue := v.(type) {
case string:
return r.r.MatchString(fieldValue), nil
case []byte:
return r.r.Match(fieldValue), nil
default:
return false, fmt.Errorf("Only string and []byte supported for regexp matcher, got %T", fieldValue)
}
}

View File

@ -1,222 +0,0 @@
// Package q contains a list of Matchers used to compare struct fields with values
package q
import (
"go/token"
"reflect"
)
// A Matcher is used to test against a record to see if it matches.
type Matcher interface {
// Match is used to test the criteria against a structure.
Match(interface{}) (bool, error)
}
// A ValueMatcher is used to test against a reflect.Value.
type ValueMatcher interface {
// MatchValue tests if the given reflect.Value matches.
// It is useful when the reflect.Value of an object already exists.
MatchValue(*reflect.Value) (bool, error)
}
type cmp struct {
value interface{}
token token.Token
}
func (c *cmp) MatchField(v interface{}) (bool, error) {
return compare(v, c.value, c.token), nil
}
type trueMatcher struct{}
func (*trueMatcher) Match(i interface{}) (bool, error) {
return true, nil
}
func (*trueMatcher) MatchValue(v *reflect.Value) (bool, error) {
return true, nil
}
type or struct {
children []Matcher
}
func (c *or) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return c.MatchValue(&v)
}
func (c *or) MatchValue(v *reflect.Value) (bool, error) {
for _, matcher := range c.children {
if vm, ok := matcher.(ValueMatcher); ok {
ok, err := vm.MatchValue(v)
if err != nil {
return false, err
}
if ok {
return true, nil
}
continue
}
ok, err := matcher.Match(v.Interface())
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
type and struct {
children []Matcher
}
func (c *and) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return c.MatchValue(&v)
}
func (c *and) MatchValue(v *reflect.Value) (bool, error) {
for _, matcher := range c.children {
if vm, ok := matcher.(ValueMatcher); ok {
ok, err := vm.MatchValue(v)
if err != nil {
return false, err
}
if !ok {
return false, nil
}
continue
}
ok, err := matcher.Match(v.Interface())
if err != nil {
return false, err
}
if !ok {
return false, nil
}
}
return true, nil
}
type strictEq struct {
field string
value interface{}
}
func (s *strictEq) MatchField(v interface{}) (bool, error) {
return reflect.DeepEqual(v, s.value), nil
}
type in struct {
list interface{}
}
func (i *in) MatchField(v interface{}) (bool, error) {
ref := reflect.ValueOf(i.list)
if ref.Kind() != reflect.Slice {
return false, nil
}
c := cmp{
token: token.EQL,
}
for i := 0; i < ref.Len(); i++ {
c.value = ref.Index(i).Interface()
ok, err := c.MatchField(v)
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
type not struct {
children []Matcher
}
func (n *not) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return n.MatchValue(&v)
}
func (n *not) MatchValue(v *reflect.Value) (bool, error) {
var err error
for _, matcher := range n.children {
vm, ok := matcher.(ValueMatcher)
if ok {
ok, err = vm.MatchValue(v)
} else {
ok, err = matcher.Match(v.Interface())
}
if err != nil {
return false, err
}
if ok {
return false, nil
}
}
return true, nil
}
// Eq matcher, checks if the given field is equal to the given value
func Eq(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.EQL})
}
// StrictEq matcher, checks if the given field is deeply equal to the given value
func StrictEq(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &strictEq{value: v})
}
// Gt matcher, checks if the given field is greater than the given value
func Gt(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.GTR})
}
// Gte matcher, checks if the given field is greater than or equal to the given value
func Gte(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.GEQ})
}
// Lt matcher, checks if the given field is lesser than the given value
func Lt(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.LSS})
}
// Lte matcher, checks if the given field is lesser than or equal to the given value
func Lte(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.LEQ})
}
// In matcher, checks if the given field matches one of the value of the given slice.
// v must be a slice.
func In(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &in{list: v})
}
// True matcher, always returns true
func True() Matcher { return &trueMatcher{} }
// Or matcher, checks if at least one of the given matchers matches the record
func Or(matchers ...Matcher) Matcher { return &or{children: matchers} }
// And matcher, checks if all of the given matchers matches the record
func And(matchers ...Matcher) Matcher { return &and{children: matchers} }
// Not matcher, checks if all of the given matchers return false
func Not(matchers ...Matcher) Matcher { return &not{children: matchers} }

View File

@ -1,231 +0,0 @@
package storm
import (
"github.com/asdine/storm/internal"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
// Select a list of records that match a list of matchers. Doesn't use indexes.
func (n *node) Select(matchers ...q.Matcher) Query {
tree := q.And(matchers...)
return newQuery(n, tree)
}
// Query is the low level query engine used by Storm. It allows to operate searches through an entire bucket.
type Query interface {
// Skip matching records by the given number
Skip(int) Query
// Limit the results by the given number
Limit(int) Query
// Order by the given field.
OrderBy(string) Query
// Reverse the order of the results
Reverse() Query
// Bucket specifies the bucket name
Bucket(string) Query
// Find a list of matching records
Find(interface{}) error
// First gets the first matching record
First(interface{}) error
// Delete all matching records
Delete(interface{}) error
// Count all the matching records
Count(interface{}) (int, error)
// Returns all the records without decoding them
Raw() ([][]byte, error)
// Execute the given function for each raw element
RawEach(func([]byte, []byte) error) error
// Execute the given function for each element
Each(interface{}, func(interface{}) error) error
}
func newQuery(n *node, tree q.Matcher) *query {
return &query{
skip: 0,
limit: -1,
node: n,
tree: tree,
sorter: newSorter(n),
}
}
type query struct {
limit int
skip int
reverse bool
tree q.Matcher
node *node
bucket string
sorter *sorter
}
func (q *query) Skip(nb int) Query {
q.skip = nb
return q
}
func (q *query) Limit(nb int) Query {
q.limit = nb
return q
}
func (q *query) OrderBy(field string) Query {
q.sorter.orderBy = field
return q
}
func (q *query) Reverse() Query {
q.reverse = true
q.sorter.reverse = true
return q
}
func (q *query) Bucket(bucketName string) Query {
q.bucket = bucketName
return q
}
func (q *query) Find(to interface{}) error {
sink, err := newListSink(q.node, to)
if err != nil {
return err
}
sink.limit = q.limit
sink.skip = q.skip
return q.runQuery(sink)
}
func (q *query) First(to interface{}) error {
sink, err := newFirstSink(q.node, to)
if err != nil {
return err
}
sink.skip = q.skip
return q.runQuery(sink)
}
func (q *query) Delete(kind interface{}) error {
sink, err := newDeleteSink(q.node, kind)
if err != nil {
return err
}
sink.limit = q.limit
sink.skip = q.skip
return q.runQuery(sink)
}
func (q *query) Count(kind interface{}) (int, error) {
sink, err := newCountSink(q.node, kind)
if err != nil {
return 0, err
}
sink.limit = q.limit
sink.skip = q.skip
err = q.runQuery(sink)
if err != nil {
return 0, err
}
return sink.counter, nil
}
func (q *query) Raw() ([][]byte, error) {
sink := newRawSink()
sink.limit = q.limit
sink.skip = q.skip
err := q.runQuery(sink)
if err != nil {
return nil, err
}
return sink.results, nil
}
func (q *query) RawEach(fn func([]byte, []byte) error) error {
sink := newRawSink()
sink.limit = q.limit
sink.skip = q.skip
sink.execFn = fn
return q.runQuery(sink)
}
func (q *query) Each(kind interface{}, fn func(interface{}) error) error {
sink, err := newEachSink(kind)
if err != nil {
return err
}
sink.limit = q.limit
sink.skip = q.skip
sink.execFn = fn
return q.runQuery(sink)
}
func (q *query) runQuery(sink sink) error {
var err error
if q.node.tx != nil {
err = q.query(q.node.tx, sink)
} else {
err = q.node.s.Bolt.Update(func(tx *bolt.Tx) error {
return q.query(tx, sink)
})
}
return err
}
func (q *query) query(tx *bolt.Tx, sink sink) error {
bucketName := q.bucket
if bucketName == "" {
bucketName = sink.bucketName()
}
bucket := q.node.GetBucket(tx, bucketName)
if q.limit == 0 {
return q.sorter.flush(sink)
}
if bucket != nil {
c := internal.Cursor{C: bucket.Cursor(), Reverse: q.reverse}
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
continue
}
stop, err := q.sorter.filter(sink, q.tree, bucket, k, v)
if err != nil {
return err
}
if stop {
break
}
}
}
return q.sorter.flush(sink)
}

View File

@ -1,99 +0,0 @@
package storm
import (
"bytes"
"github.com/boltdb/bolt"
)
// A BucketScanner scans a Node for a list of buckets
type BucketScanner interface {
// PrefixScan scans the root buckets for keys matching the given prefix.
PrefixScan(prefix string) []Node
// PrefixScan scans the buckets in this node for keys matching the given prefix.
RangeScan(min, max string) []Node
}
// PrefixScan scans the buckets in this node for keys matching the given prefix.
func (n *node) PrefixScan(prefix string) []Node {
if n.tx != nil {
return n.prefixScan(n.tx, prefix)
}
var nodes []Node
n.readTx(func(tx *bolt.Tx) error {
nodes = n.prefixScan(tx, prefix)
return nil
})
return nodes
}
func (n *node) prefixScan(tx *bolt.Tx, prefix string) []Node {
var (
prefixBytes = []byte(prefix)
nodes []Node
c = n.cursor(tx)
)
for k, v := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, v = c.Next() {
if v != nil {
continue
}
nodes = append(nodes, n.From(string(k)))
}
return nodes
}
// RangeScan scans the buckets in this node over a range such as a sortable time range.
func (n *node) RangeScan(min, max string) []Node {
if n.tx != nil {
return n.rangeScan(n.tx, min, max)
}
var nodes []Node
n.readTx(func(tx *bolt.Tx) error {
nodes = n.rangeScan(tx, min, max)
return nil
})
return nodes
}
func (n *node) rangeScan(tx *bolt.Tx, min, max string) []Node {
var (
minBytes = []byte(min)
maxBytes = []byte(max)
nodes []Node
c = n.cursor(tx)
)
for k, v := c.Seek(minBytes); k != nil && bytes.Compare(k, maxBytes) <= 0; k, v = c.Next() {
if v != nil {
continue
}
nodes = append(nodes, n.From(string(k)))
}
return nodes
}
func (n *node) cursor(tx *bolt.Tx) *bolt.Cursor {
var c *bolt.Cursor
if len(n.rootBucket) > 0 {
c = n.GetBucket(tx).Cursor()
} else {
c = tx.Cursor()
}
return c
}

View File

@ -1,480 +0,0 @@
package storm
import (
"reflect"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
rbt "github.com/emirpasic/gods/trees/redblacktree"
)
type item struct {
value *reflect.Value
bucket *bolt.Bucket
k []byte
v []byte
}
func newSorter(node Node) *sorter {
return &sorter{
node: node,
rbTree: rbt.NewWithStringComparator(),
}
}
type sorter struct {
node Node
rbTree *rbt.Tree
orderBy string
reverse bool
}
func (s *sorter) filter(snk sink, tree q.Matcher, bucket *bolt.Bucket, k, v []byte) (bool, error) {
rsnk, ok := snk.(reflectSink)
if !ok {
return snk.add(&item{
bucket: bucket,
k: k,
v: v,
})
}
newElem := rsnk.elem()
err := s.node.Codec().Unmarshal(v, newElem.Interface())
if err != nil {
return false, err
}
ok = tree == nil
if !ok {
ok, err = tree.Match(newElem.Interface())
if err != nil {
return false, err
}
}
if ok {
it := item{
bucket: bucket,
value: &newElem,
k: k,
v: v,
}
if s.orderBy != "" {
elm := reflect.Indirect(newElem).FieldByName(s.orderBy)
if !elm.IsValid() {
return false, ErrNotFound
}
raw, err := toBytes(elm.Interface(), s.node.Codec())
if err != nil {
return false, err
}
s.rbTree.Put(string(raw), &it)
return false, nil
}
return snk.add(&it)
}
return false, nil
}
func (s *sorter) flush(snk sink) error {
if s.orderBy == "" {
return snk.flush()
}
s.orderBy = ""
var err error
var stop bool
it := s.rbTree.Iterator()
if s.reverse {
it.End()
} else {
it.Begin()
}
for (s.reverse && it.Prev()) || (!s.reverse && it.Next()) {
item := it.Value().(*item)
stop, err = snk.add(item)
if err != nil {
return err
}
if stop {
break
}
}
return snk.flush()
}
type sink interface {
bucketName() string
flush() error
add(*item) (bool, error)
}
type reflectSink interface {
elem() reflect.Value
}
func newListSink(node Node, to interface{}) (*listSink, error) {
ref := reflect.ValueOf(to)
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
return nil, ErrSlicePtrNeeded
}
sliceType := reflect.Indirect(ref).Type()
elemType := sliceType.Elem()
if elemType.Kind() == reflect.Ptr {
elemType = elemType.Elem()
}
if elemType.Name() == "" {
return nil, ErrNoName
}
return &listSink{
node: node,
ref: ref,
isPtr: sliceType.Elem().Kind() == reflect.Ptr,
elemType: elemType,
name: elemType.Name(),
limit: -1,
}, nil
}
type listSink struct {
node Node
ref reflect.Value
results reflect.Value
elemType reflect.Type
name string
isPtr bool
skip int
limit int
idx int
}
func (l *listSink) elem() reflect.Value {
if l.results.IsValid() && l.idx < l.results.Len() {
return l.results.Index(l.idx).Addr()
}
return reflect.New(l.elemType)
}
func (l *listSink) bucketName() string {
return l.name
}
func (l *listSink) add(i *item) (bool, error) {
if l.limit == 0 {
return true, nil
}
if l.skip > 0 {
l.skip--
return false, nil
}
if !l.results.IsValid() {
l.results = reflect.MakeSlice(reflect.Indirect(l.ref).Type(), 0, 0)
}
if l.limit > 0 {
l.limit--
}
if l.idx == l.results.Len() {
if l.isPtr {
l.results = reflect.Append(l.results, *i.value)
} else {
l.results = reflect.Append(l.results, reflect.Indirect(*i.value))
}
}
l.idx++
return l.limit == 0, nil
}
func (l *listSink) flush() error {
if l.results.IsValid() && l.results.Len() > 0 {
reflect.Indirect(l.ref).Set(l.results)
return nil
}
return ErrNotFound
}
func newFirstSink(node Node, to interface{}) (*firstSink, error) {
ref := reflect.ValueOf(to)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &firstSink{
node: node,
ref: ref,
}, nil
}
type firstSink struct {
node Node
ref reflect.Value
skip int
found bool
}
func (f *firstSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(f.ref).Type())
}
func (f *firstSink) bucketName() string {
return reflect.Indirect(f.ref).Type().Name()
}
func (f *firstSink) add(i *item) (bool, error) {
if f.skip > 0 {
f.skip--
return false, nil
}
reflect.Indirect(f.ref).Set(i.value.Elem())
f.found = true
return true, nil
}
func (f *firstSink) flush() error {
if !f.found {
return ErrNotFound
}
return nil
}
func newDeleteSink(node Node, kind interface{}) (*deleteSink, error) {
ref := reflect.ValueOf(kind)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &deleteSink{
node: node,
ref: ref,
}, nil
}
type deleteSink struct {
node Node
ref reflect.Value
skip int
limit int
removed int
}
func (d *deleteSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(d.ref).Type())
}
func (d *deleteSink) bucketName() string {
return reflect.Indirect(d.ref).Type().Name()
}
func (d *deleteSink) add(i *item) (bool, error) {
if d.skip > 0 {
d.skip--
return false, nil
}
if d.limit > 0 {
d.limit--
}
info, err := extract(&d.ref)
if err != nil {
return false, err
}
for fieldName, fieldCfg := range info.Fields {
if fieldCfg.Index == "" {
continue
}
idx, err := getIndex(i.bucket, fieldCfg.Index, fieldName)
if err != nil {
return false, err
}
err = idx.RemoveID(i.k)
if err != nil {
if err == index.ErrNotFound {
return false, ErrNotFound
}
return false, err
}
}
d.removed++
return d.limit == 0, i.bucket.Delete(i.k)
}
func (d *deleteSink) flush() error {
if d.removed == 0 {
return ErrNotFound
}
return nil
}
func newCountSink(node Node, kind interface{}) (*countSink, error) {
ref := reflect.ValueOf(kind)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &countSink{
node: node,
ref: ref,
}, nil
}
type countSink struct {
node Node
ref reflect.Value
skip int
limit int
counter int
}
func (c *countSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(c.ref).Type())
}
func (c *countSink) bucketName() string {
return reflect.Indirect(c.ref).Type().Name()
}
func (c *countSink) add(i *item) (bool, error) {
if c.skip > 0 {
c.skip--
return false, nil
}
if c.limit > 0 {
c.limit--
}
c.counter++
return c.limit == 0, nil
}
func (c *countSink) flush() error {
return nil
}
func newRawSink() *rawSink {
return &rawSink{
limit: -1,
}
}
type rawSink struct {
results [][]byte
skip int
limit int
execFn func([]byte, []byte) error
}
func (r *rawSink) add(i *item) (bool, error) {
if r.limit == 0 {
return true, nil
}
if r.skip > 0 {
r.skip--
return false, nil
}
if r.limit > 0 {
r.limit--
}
if r.execFn != nil {
err := r.execFn(i.k, i.v)
if err != nil {
return false, err
}
} else {
r.results = append(r.results, i.v)
}
return r.limit == 0, nil
}
func (r *rawSink) bucketName() string {
return ""
}
func (r *rawSink) flush() error {
return nil
}
func newEachSink(to interface{}) (*eachSink, error) {
ref := reflect.ValueOf(to)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &eachSink{
ref: ref,
}, nil
}
type eachSink struct {
skip int
limit int
ref reflect.Value
execFn func(interface{}) error
}
func (e *eachSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(e.ref).Type())
}
func (e *eachSink) bucketName() string {
return reflect.Indirect(e.ref).Type().Name()
}
func (e *eachSink) add(i *item) (bool, error) {
if e.limit == 0 {
return true, nil
}
if e.skip > 0 {
e.skip--
return false, nil
}
if e.limit > 0 {
e.limit--
}
err := e.execFn(i.value.Interface())
if err != nil {
return false, err
}
return e.limit == 0, nil
}
func (e *eachSink) flush() error {
return nil
}

View File

@ -1,431 +0,0 @@
package storm
import (
"bytes"
"reflect"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
// TypeStore stores user defined types in BoltDB
type TypeStore interface {
Finder
// Init creates the indexes and buckets for a given structure
Init(data interface{}) error
// ReIndex rebuilds all the indexes of a bucket
ReIndex(data interface{}) error
// Save a structure
Save(data interface{}) error
// Update a structure
Update(data interface{}) error
// UpdateField updates a single field
UpdateField(data interface{}, fieldName string, value interface{}) error
// Drop a bucket
Drop(data interface{}) error
// DeleteStruct deletes a structure from the associated bucket
DeleteStruct(data interface{}) error
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
Remove(data interface{}) error
}
// Init creates the indexes and buckets for a given structure
func (n *node) Init(data interface{}) error {
v := reflect.ValueOf(data)
cfg, err := extract(&v)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.init(tx, cfg)
})
}
func (n *node) init(tx *bolt.Tx, cfg *structConfig) error {
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
if err != nil {
return err
}
// save node configuration in the bucket
_, err = newMeta(bucket, n)
if err != nil {
return err
}
for fieldName, fieldCfg := range cfg.Fields {
if fieldCfg.Index == "" {
continue
}
switch fieldCfg.Index {
case tagUniqueIdx:
_, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
case tagIdx:
_, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
default:
err = ErrIdxNotFound
}
if err != nil {
return err
}
}
return nil
}
func (n *node) ReIndex(data interface{}) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.reIndex(tx, data, cfg)
})
}
func (n *node) reIndex(tx *bolt.Tx, data interface{}, cfg *structConfig) error {
root := n.WithTransaction(tx)
nodes := root.From(cfg.Name).PrefixScan(indexPrefix)
bucket := root.GetBucket(tx, cfg.Name)
if bucket == nil {
return ErrNotFound
}
for _, node := range nodes {
buckets := node.Bucket()
name := buckets[len(buckets)-1]
err := bucket.DeleteBucket([]byte(name))
if err != nil {
return err
}
}
total, err := root.Count(data)
if err != nil {
return err
}
for i := 0; i < total; i++ {
err = root.Select(q.True()).Skip(i).First(data)
if err != nil {
return err
}
err = root.Update(data)
if err != nil {
return err
}
}
return nil
}
// Save a structure
func (n *node) Save(data interface{}) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
if cfg.ID.IsZero {
if !cfg.ID.IsInteger || (!n.s.autoIncrement && !cfg.ID.Increment) {
return ErrZeroID
}
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.save(tx, cfg, data, true)
})
}
func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, edit bool) error {
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
if err != nil {
return err
}
// save node configuration in the bucket
meta, err := newMeta(bucket, n)
if err != nil {
return err
}
if cfg.ID.IsZero {
err = meta.increment(cfg.ID)
if err != nil {
return err
}
}
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec)
if err != nil {
return err
}
for fieldName, fieldCfg := range cfg.Fields {
if edit && !fieldCfg.IsID && fieldCfg.Increment && fieldCfg.IsInteger && fieldCfg.IsZero {
err = meta.increment(fieldCfg)
if err != nil {
return err
}
}
if fieldCfg.Index == "" {
continue
}
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
if err != nil {
return err
}
if fieldCfg.IsZero {
err = idx.RemoveID(id)
if err != nil {
return err
}
continue
}
value, err := toBytes(fieldCfg.Value.Interface(), n.s.codec)
if err != nil {
return err
}
var found bool
idsSaved, err := idx.All(value, nil)
if err != nil {
return err
}
for _, idSaved := range idsSaved {
if bytes.Compare(idSaved, id) == 0 {
found = true
break
}
}
if found {
continue
}
err = idx.RemoveID(id)
if err != nil {
return err
}
err = idx.Add(value, id)
if err != nil {
if err == index.ErrAlreadyExists {
return ErrAlreadyExists
}
return err
}
}
raw, err := n.s.codec.Marshal(data)
if err != nil {
return err
}
return bucket.Put(id, raw)
}
// Update a structure
func (n *node) Update(data interface{}) error {
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
numfield := ref.NumField()
for i := 0; i < numfield; i++ {
f := ref.Field(i)
if ref.Type().Field(i).PkgPath != "" {
continue
}
zero := reflect.Zero(f.Type()).Interface()
actual := f.Interface()
if !reflect.DeepEqual(actual, zero) {
cf := current.Field(i)
cf.Set(f)
idxInfo, ok := cfg.Fields[ref.Type().Field(i).Name]
if ok {
idxInfo.Value = &cf
}
}
}
return nil
})
}
// UpdateField updates a single field
func (n *node) UpdateField(data interface{}, fieldName string, value interface{}) error {
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
f := current.FieldByName(fieldName)
if !f.IsValid() {
return ErrNotFound
}
tf, _ := current.Type().FieldByName(fieldName)
if tf.PkgPath != "" {
return ErrNotFound
}
v := reflect.ValueOf(value)
if v.Kind() != f.Kind() {
return ErrIncompatibleValue
}
f.Set(v)
idxInfo, ok := cfg.Fields[fieldName]
if ok {
idxInfo.Value = &f
idxInfo.IsZero = isZero(idxInfo.Value)
}
return nil
})
}
func (n *node) update(data interface{}, fn func(*reflect.Value, *reflect.Value, *structConfig) error) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
if cfg.ID.IsZero {
return ErrNoID
}
current := reflect.New(reflect.Indirect(ref).Type())
return n.readWriteTx(func(tx *bolt.Tx) error {
err = n.WithTransaction(tx).One(cfg.ID.Name, cfg.ID.Value.Interface(), current.Interface())
if err != nil {
return err
}
ref = ref.Elem()
cref := current.Elem()
err = fn(&ref, &cref, cfg)
if err != nil {
return err
}
return n.save(tx, cfg, current.Interface(), false)
})
}
// Drop a bucket
func (n *node) Drop(data interface{}) error {
var bucketName string
v := reflect.ValueOf(data)
if v.Kind() != reflect.String {
info, err := extract(&v)
if err != nil {
return err
}
bucketName = info.Name
} else {
bucketName = v.Interface().(string)
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.drop(tx, bucketName)
})
}
func (n *node) drop(tx *bolt.Tx, bucketName string) error {
bucket := n.GetBucket(tx)
if bucket == nil {
return tx.DeleteBucket([]byte(bucketName))
}
return bucket.DeleteBucket([]byte(bucketName))
}
// DeleteStruct deletes a structure from the associated bucket
func (n *node) DeleteStruct(data interface{}) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.deleteStruct(tx, cfg, id)
})
}
func (n *node) deleteStruct(tx *bolt.Tx, cfg *structConfig, id []byte) error {
bucket := n.GetBucket(tx, cfg.Name)
if bucket == nil {
return ErrNotFound
}
for fieldName, fieldCfg := range cfg.Fields {
if fieldCfg.Index == "" {
continue
}
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
if err != nil {
return err
}
err = idx.RemoveID(id)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
}
raw := bucket.Get(id)
if raw == nil {
return ErrNotFound
}
return bucket.Delete(id)
}
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (n *node) Remove(data interface{}) error {
return n.DeleteStruct(data)
}

View File

@ -1,330 +0,0 @@
package storm
import (
"bytes"
"encoding/binary"
"os"
"time"
"github.com/asdine/storm/codec"
"github.com/asdine/storm/codec/json"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
const (
dbinfo = "__storm_db"
metadataBucket = "__storm_metadata"
)
// Defaults to json
var defaultCodec = json.Codec
// Open opens a database at the given path with optional Storm options.
func Open(path string, stormOptions ...func(*DB) error) (*DB, error) {
var err error
s := &DB{
Path: path,
codec: defaultCodec,
}
for _, option := range stormOptions {
if err = option(s); err != nil {
return nil, err
}
}
if s.boltMode == 0 {
s.boltMode = 0600
}
if s.boltOptions == nil {
s.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
}
s.root = &node{s: s, rootBucket: s.rootBucket, codec: s.codec, batchMode: s.batchMode}
// skip if UseDB option is used
if s.Bolt == nil {
s.Bolt, err = bolt.Open(path, s.boltMode, s.boltOptions)
if err != nil {
return nil, err
}
err = s.checkVersion()
if err != nil {
return nil, err
}
}
return s, nil
}
// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
// needed operations
type DB struct {
// Path of the database file
Path string
// Handles encoding and decoding of objects
codec codec.MarshalUnmarshaler
// Bolt is still easily accessible
Bolt *bolt.DB
// Bolt file mode
boltMode os.FileMode
// Bolt options
boltOptions *bolt.Options
// Enable auto increment on empty integer fields
autoIncrement bool
// The root node that points to the root bucket.
root *node
// The root bucket name
rootBucket []string
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
}
// From returns a new Storm node with a new bucket root.
// All DB operations on the new node will be executed relative to the given
// bucket.
func (s *DB) From(root ...string) Node {
newNode := *s.root
newNode.rootBucket = root
return &newNode
}
// WithTransaction returns a New Storm node that will use the given transaction.
func (s *DB) WithTransaction(tx *bolt.Tx) Node {
return s.root.WithTransaction(tx)
}
// Bucket returns the root bucket name as a slice.
// In the normal, simple case this will be empty.
func (s *DB) Bucket() []string {
return s.root.Bucket()
}
// Close the database
func (s *DB) Close() error {
return s.Bolt.Close()
}
// Codec returns the EncodeDecoder used by this instance of Storm
func (s *DB) Codec() codec.MarshalUnmarshaler {
return s.codec
}
// WithCodec returns a New Storm Node that will use the given Codec.
func (s *DB) WithCodec(codec codec.MarshalUnmarshaler) Node {
n := s.From().(*node)
n.codec = codec
return n
}
// WithBatch returns a new Storm Node with the batch mode enabled.
func (s *DB) WithBatch(enabled bool) Node {
n := s.From().(*node)
n.batchMode = enabled
return n
}
// Get a value from a bucket
func (s *DB) Get(bucketName string, key interface{}, to interface{}) error {
return s.root.Get(bucketName, key, to)
}
// Set a key/value pair into a bucket
func (s *DB) Set(bucketName string, key interface{}, value interface{}) error {
return s.root.Set(bucketName, key, value)
}
// Delete deletes a key from a bucket
func (s *DB) Delete(bucketName string, key interface{}) error {
return s.root.Delete(bucketName, key)
}
// GetBytes gets a raw value from a bucket.
func (s *DB) GetBytes(bucketName string, key interface{}) ([]byte, error) {
return s.root.GetBytes(bucketName, key)
}
// SetBytes sets a raw value into a bucket.
func (s *DB) SetBytes(bucketName string, key interface{}, value []byte) error {
return s.root.SetBytes(bucketName, key, value)
}
// Save a structure
func (s *DB) Save(data interface{}) error {
return s.root.Save(data)
}
// PrefixScan scans the root buckets for keys matching the given prefix.
func (s *DB) PrefixScan(prefix string) []Node {
return s.root.PrefixScan(prefix)
}
// RangeScan scans the root buckets over a range such as a sortable time range.
func (s *DB) RangeScan(min, max string) []Node {
return s.root.RangeScan(min, max)
}
// Select a list of records that match a list of matchers. Doesn't use indexes.
func (s *DB) Select(matchers ...q.Matcher) Query {
return s.root.Select(matchers...)
}
// Range returns one or more records by the specified index within the specified range
func (s *DB) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
return s.root.Range(fieldName, min, max, to, options...)
}
// AllByIndex gets all the records of a bucket that are indexed in the specified index
func (s *DB) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
return s.root.AllByIndex(fieldName, to, options...)
}
// All get all the records of a bucket
func (s *DB) All(to interface{}, options ...func(*index.Options)) error {
return s.root.All(to, options...)
}
// Count counts all the records of a bucket
func (s *DB) Count(data interface{}) (int, error) {
return s.root.Count(data)
}
// DeleteStruct deletes a structure from the associated bucket
func (s *DB) DeleteStruct(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (s *DB) Remove(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Drop a bucket
func (s *DB) Drop(data interface{}) error {
return s.root.Drop(data)
}
// Find returns one or more records by the specified index
func (s *DB) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
return s.root.Find(fieldName, value, to, options...)
}
// Init creates the indexes and buckets for a given structure
func (s *DB) Init(data interface{}) error {
return s.root.Init(data)
}
// ReIndex rebuilds all the indexes of a bucket
func (s *DB) ReIndex(data interface{}) error {
return s.root.ReIndex(data)
}
// One returns one record by the specified index
func (s *DB) One(fieldName string, value interface{}, to interface{}) error {
return s.root.One(fieldName, value, to)
}
// Begin starts a new transaction.
func (s *DB) Begin(writable bool) (Node, error) {
return s.root.Begin(writable)
}
// Rollback closes the transaction and ignores all previous updates.
func (s *DB) Rollback() error {
return s.root.Rollback()
}
// Commit writes all changes to disk.
func (s *DB) Commit() error {
return s.root.Rollback()
}
// Update a structure
func (s *DB) Update(data interface{}) error {
return s.root.Update(data)
}
// UpdateField updates a single field
func (s *DB) UpdateField(data interface{}, fieldName string, value interface{}) error {
return s.root.UpdateField(data, fieldName, value)
}
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
func (s *DB) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
return s.root.CreateBucketIfNotExists(tx, bucket)
}
// GetBucket returns the given bucket below the current node.
func (s *DB) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
return s.root.GetBucket(tx, children...)
}
func (s *DB) checkVersion() error {
var v string
err := s.Get(dbinfo, "version", &v)
if err != nil && err != ErrNotFound {
return err
}
// for now, we only set the current version if it doesn't exist or if v0.5.0
if v == "" || v == "0.5.0" || v == "0.6.0" {
return s.Set(dbinfo, "version", Version)
}
return nil
}
// toBytes turns an interface into a slice of bytes
func toBytes(key interface{}, codec codec.MarshalUnmarshaler) ([]byte, error) {
if key == nil {
return nil, nil
}
switch t := key.(type) {
case []byte:
return t, nil
case string:
return []byte(t), nil
case int:
return numbertob(int64(t))
case uint:
return numbertob(uint64(t))
case int8, int16, int32, int64, uint8, uint16, uint32, uint64:
return numbertob(t)
default:
return codec.Marshal(key)
}
}
func numbertob(v interface{}) ([]byte, error) {
var buf bytes.Buffer
err := binary.Write(&buf, binary.BigEndian, v)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func numberfromb(raw []byte) (int64, error) {
r := bytes.NewReader(raw)
var to int64
err := binary.Read(r, binary.BigEndian, &to)
if err != nil {
return 0, err
}
return to, nil
}

View File

@ -1,52 +0,0 @@
package storm
import "github.com/boltdb/bolt"
// Tx is a transaction
type Tx interface {
// Commit writes all changes to disk.
Commit() error
// Rollback closes the transaction and ignores all previous updates.
Rollback() error
}
// Begin starts a new transaction.
func (n node) Begin(writable bool) (Node, error) {
var err error
n.tx, err = n.s.Bolt.Begin(writable)
if err != nil {
return nil, err
}
return &n, nil
}
// Rollback closes the transaction and ignores all previous updates.
func (n *node) Rollback() error {
if n.tx == nil {
return ErrNotInTransaction
}
err := n.tx.Rollback()
if err == bolt.ErrTxClosed {
return ErrNotInTransaction
}
return err
}
// Commit writes all changes to disk.
func (n *node) Commit() error {
if n.tx == nil {
return ErrNotInTransaction
}
err := n.tx.Commit()
if err == bolt.ErrTxClosed {
return ErrNotInTransaction
}
return err
}

View File

@ -1,4 +0,0 @@
package storm
// Version of Storm
const Version = "0.8.0-DEV"

View File

@ -1,203 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,15 +0,0 @@
MIT No Attribution
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,7 +0,0 @@
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Lambda functions are made available under a modified MIT license.
See LICENSE-LAMBDACODE for details.
The remainder of the project is made available under the terms of the
Apache License, version 2.0. See LICENSE for details.

View File

@ -1,21 +0,0 @@
# Overview
This package provides input types for Lambda functions that process AWS events.
# Samples
[API Gateway](README_ApiGatewayEvent.md)
[Cognito Events](README_Cognito.md)
[Config Events](README_Config.md)
[DynamoDB Events](README_DynamoDB.md)
[Kinesis Events](README_Kinesis.md)
[Kinesis Firehose Events](README_KinesisFirehose.md)
[S3 Events](README_S3.md)
[SNS Events](README_SNS.md)

View File

@ -1,36 +0,0 @@
# Overview
API Gateway events consist of a request that was routed to a Lambda function by API Gateway. When this happens, API Gateway expects the result of the function to be the response that API Gateway should respond with.
# Sample Function
The following is a sample class and Lambda function that receives Amazon API Gateway event record data as an input, writes some of the record data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.)
```go
package main
import (
"context"
"fmt"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
)
func handleRequest(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
fmt.Printf("Processing request data for request %s.\n", request.RequestContext.RequestID)
fmt.Printf("Body size = %d.\n", len(request.Body))
fmt.Println("Headers:")
for key, value := range request.Headers {
fmt.Printf(" %s: %s\n", key, value)
}
return events.APIGatewayProxyResponse{Body: request.Body, StatusCode: 200}, nil
}
func main() {
lambda.Start(handleRequest)
}
```

View File

@ -1,22 +0,0 @@
# Sample Function
The following is a sample Lambda function that receives Amazon Cognito event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.)
```go
import (
"strings"
"github.com/aws/aws-lambda-go/events"
)
func handleRequest(ctx context.Context, cognitoEvent events.CognitoEvent) {
for datasetName, datasetRecord := range cognitoEvent.DatasetRecords {
fmt.Printf("[%s -- %s] %s -> %s -> %s \n",
cognitoEvent.EventType,
datasetName,
datasetRecord.OldValue,
datasetRecord.Op,
datasetRecord.NewValue)
}
}
```

View File

@ -1,18 +0,0 @@
# Sample Function
The following is a sample Lambda function that receives Amazon Config event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.)
```go
import (
"strings"
"github.com/aws/aws-lambda-go/events"
)
func handleRequest(ctx context.Context, configEvent events.ConfigEvent) {
fmt.Printf("AWS Config rule: %s\n", configEvent.ConfigRuleName)
fmt.Printf("Invoking event JSON: %s\n", configEvent.InvokingEvent)
fmt.Printf("Event version: %s\n", configEvent.Version)
}
```

View File

@ -1,79 +0,0 @@
# Sample Function
The following is a sample Lambda function that receives DynamoDB event data as input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs.)
```go
import (
"context"
"fmt"
"github.com/aws/aws-lambda-go/events"
)
func handleRequest(ctx context.Context, e events.DynamoDBEvent) {
for _, record := range e.Records {
fmt.Printf("Processing request data for event ID %s, type %s.\n", record.EventID, record.EventName)
// Print new values for attributes of type String
for name, value := range record.Change.NewImage {
if value.DataType() == events.DataTypeString {
fmt.Printf("Attribute name: %s, value: %s\n", name, value.String())
}
}
}
```
# Reading attribute values
Stream notifications are delivered to the Lambda handler whenever data in the DynamoDB table is modified.
Depending on the Stream settings, a StreamRecord may contain the following data:
* Keys: key attributes of the modified item.
* NewImage: the entire item, as it appears after it was modified.
* OldImage: the entire item, as it appeared before it was modified.
The values for the attributes can be accessed using the AttributeValue type. For each type
supported natively by DynamoDB, there is a corresponding accessor method:
DynamoDB type | AttributeValue accessor method | Return type | DataType constant
---------------|--------------------------------|---------------------------|------------------
B (Binary) | Binary() | []byte | DataTypeBinary
BOOL (Boolean) | Boolean() | bool | DataTypeBoolean
BS (Binary Set)| BinarySet() | [][]byte | DataTypeBinarySet
L (List) | List() | []AttributeValue | DataTypeList
M (Map) | Map() | map[string]AttributeValue | DataTypeMap
N (Number) | Number() / Integer() / Float() | string / int64 / float64 | DataTypeNumber
NS (Number Set)| NumberSet() | []string | DataTypeNumberSet
NULL (Null) | IsNull() | bool | DataTypeNull
S (String) | String() | string | DataTypeString
SS (String Set)| StringSet() | []string | DataTypeStringSet
Calling the accessor method for the incorrect type will result in a panic. If the type needs to
be discovered in runtime, the method DataType() can be used in order to determine the correct accessor.
More information about DynamoDB data types can be seen [in this documentation](http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html).
The following example reads values of attributes name and age, for which types are known to be String and Number:
```go
import (
"context"
"fmt"
"github.com/aws/aws-lambda-go/events"
)
func handleRequest(ctx context.Context, e events.DynamoDBEvent) {
for _, record := range e.Records {
fmt.Printf("Processing request data for event ID %s, type %s.\n", record.EventID, record.EventName)
// Print new values for attributes name and age
name := record.Change.NewImage["name"].String()
age, _ := record.Change.NewImage["age"].Integer()
fmt.Printf("Name: %s, age: %d\n", name, age)
}
}
```

View File

@ -1,21 +0,0 @@
# Sample Function
The following is a sample class and Lambda function that receives Amazon Kinesis event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.)
```go
import (
"strings"
"github.com/aws/aws-lambda-go/events")
func handler(ctx context.Context, kinesisEvent events.KinesisEvent) {
for _, record := range kinesisEvent.Records {
kinesisRecord := record.Kinesis
dataBytes := kinesisRecordData.Data
dataText := string(dataBytes)
fmt.Printf("%s Data = %s \n", record.EventName, dataText)
}
}
```

View File

@ -1,36 +0,0 @@
# Sample Function
The following is a sample Lambda function that transforms Kinesis Firehose records by doing a ToUpper on the data.
```go
import (
"fmt"
"strings"
"github.com/aws/aws-lambda-go/events"
)
func handleRequest(evnt events.KinesisFirehoseEvent) events.KinesisFirehoseResponse {
fmt.Printf("InvocationId: %s\n", evnt.InvocationId)
fmt.Printf("DeliveryStreamArn: %s\n", evnt.DeliveryStreamArn)
fmt.Printf("Region: %s\n", evnt.Region)
var response events.KinesisFirehoseResponse
for _, record := range evnt.Records {
fmt.Printf("RecordId: %s\n", record.RecordId)
fmt.Printf("ApproximateArrivalTimestamp: %s\n", record.ApproximateArrivalTimestamp)
// Transform data: ToUpper the data
var transformedRecord kinesisfhevents.FirehoseResponseRecord
transformedRecord.RecordId = record.RecordId
transformedRecord.Result = kinesisfhevents.TransformedStateOk
transformedRecord.Data = strings.ToUpper(string(record.Data))
response.Records = append(response.Records, transformedRecord)
}
return response
}
```

View File

@ -1,18 +0,0 @@
# Sample Function
The following is a sample class and Lambda function that receives Amazon S3 event record data as an input and writes some of the record data to CloudWatch Logs. (Note that by default anything written to Console will be logged as CloudWatch Logs events.)
```go
import (
"strings"
"github.com/aws/aws-lambda-go/events")
func handler(ctx context.Context, s3Event events.S3Event) {
for _, record := range s3Event.Records {
s3 := record.S3
fmt.Printf("[%s - %s] Bucket = %s, Key = %s \n", record.EventSource, record.EventTime, s3.Bucket.Name, s3.Object.Key)
}
}
```

View File

@ -1,21 +0,0 @@
# Sample Function
The following is a sample class and Lambda function that receives Amazon SNS event record data as input, writes some of the record data to CloudWatch Logs, and responds with a 200 status and the same body as the request. (Note that by default anything written to Console will be logged as CloudWatch Logs events.)
```go
import (
"context"
"fmt"
"github.com/aws/aws-lambda-go/events"
)
func handler(ctx context.Context, snsEvent events.SNSEvent) {
for _, record := range snsEvent.Records {
snsRecord := record.SNS
fmt.Printf("[%s %s] Message = %s \n", record.EventSource, snsRecord.Timestamp, snsRecord.Message)
}
}
```

View File

@ -1,63 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
// APIGatewayProxyRequest contains data coming from the API Gateway proxy
type APIGatewayProxyRequest struct {
Resource string `json:"resource"` // The resource path defined in API Gateway
Path string `json:"path"` // The url path for the caller
HTTPMethod string `json:"httpMethod"`
Headers map[string]string `json:"headers"`
QueryStringParameters map[string]string `json:"queryStringParameters"`
PathParameters map[string]string `json:"pathParameters"`
StageVariables map[string]string `json:"stageVariables"`
RequestContext APIGatewayProxyRequestContext `json:"requestContext"`
Body string `json:"body"`
IsBase64Encoded bool `json:"isBase64Encoded,omitempty"`
}
// APIGatewayProxyResponse configures the response to be returned by API Gateway for the request
type APIGatewayProxyResponse struct {
StatusCode int `json:"statusCode"`
Headers map[string]string `json:"headers"`
Body string `json:"body"`
IsBase64Encoded bool `json:"isBase64Encoded,omitempty"`
}
// APIGatewayProxyRequestContext contains the information to identify the AWS account and resources invoking the
// Lambda function. It also includes Cognito identity information for the caller.
type APIGatewayProxyRequestContext struct {
AccountID string `json:"accountId"`
ResourceID string `json:"resourceId"`
Stage string `json:"stage"`
RequestID string `json:"requestId"`
Identity APIGatewayRequestIdentity `json:"identity"`
ResourcePath string `json:"resourcePath"`
Authorizer map[string]interface{} `json:"authorizer"`
HTTPMethod string `json:"httpMethod"`
APIID string `json:"apiId"` // The API Gateway rest API Id
}
// APIGatewayRequestIdentity contains identity information for the request caller.
type APIGatewayRequestIdentity struct {
CognitoIdentityPoolID string `json:"cognitoIdentityPoolId"`
AccountID string `json:"accountId"`
CognitoIdentityID string `json:"cognitoIdentityId"`
Caller string `json:"caller"`
APIKey string `json:"apiKey"`
SourceIP string `json:"sourceIp"`
CognitoAuthenticationType string `json:"cognitoAuthenticationType"`
CognitoAuthenticationProvider string `json:"cognitoAuthenticationProvider"`
UserArn string `json:"userArn"`
UserAgent string `json:"userAgent"`
User string `json:"user"`
}
// APIGatewayCustomAuthorizerContext represents the expected format of an API Gateway custom authorizer response.
// Deprecated. Code should be updated to use the Authorizer map from APIGatewayRequestIdentity. Ex: Authorizer["principalId"]
type APIGatewayCustomAuthorizerContext struct {
PrincipalID *string `json:"principalId"`
StringKey *string `json:"stringKey,omitempty"`
NumKey *int `json:"numKey,omitempty"`
BoolKey *bool `json:"boolKey,omitempty"`
}

View File

@ -1,457 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strconv"
)
// DynamoDBAttributeValue provides convenient access for a value stored in DynamoDB.
// For more information, please see http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html
type DynamoDBAttributeValue struct {
value anyValue
dataType DynamoDBDataType
}
// Binary provides access to an attribute of type Binary.
// Method panics if the attribute is not of type Binary.
func (av DynamoDBAttributeValue) Binary() []byte {
av.ensureType(DataTypeBinary)
return av.value.([]byte)
}
// Boolean provides access to an attribute of type Boolean.
// Method panics if the attribute is not of type Boolean.
func (av DynamoDBAttributeValue) Boolean() bool {
av.ensureType(DataTypeBoolean)
return av.value.(bool)
}
// BinarySet provides access to an attribute of type Binary Set.
// Method panics if the attribute is not of type BinarySet.
func (av DynamoDBAttributeValue) BinarySet() [][]byte {
av.ensureType(DataTypeBinarySet)
return av.value.([][]byte)
}
// List provides access to an attribute of type List. Each element
// of the list is an DynamoDBAttributeValue itself.
// Method panics if the attribute is not of type List.
func (av DynamoDBAttributeValue) List() []DynamoDBAttributeValue {
av.ensureType(DataTypeList)
return av.value.([]DynamoDBAttributeValue)
}
// Map provides access to an attribute of type Map. They Keys are strings
// and the values are DynamoDBAttributeValue instances.
// Method panics if the attribute is not of type Map.
func (av DynamoDBAttributeValue) Map() map[string]DynamoDBAttributeValue {
av.ensureType(DataTypeMap)
return av.value.(map[string]DynamoDBAttributeValue)
}
// Number provides access to an attribute of type Number.
// DynamoDB sends the values as strings. For convenience please see also
// the methods Integer() and Float().
// Method panics if the attribute is not of type Number.
func (av DynamoDBAttributeValue) Number() string {
av.ensureType(DataTypeNumber)
return av.value.(string)
}
// Integer provides access to an attribute of type Number.
// DynamoDB sends the values as strings. For convenience this method
// provides conversion to int. If the value cannot be represented by
// a signed integer, err.Err = ErrRange and the returned value is the maximum magnitude integer
// of an int64 of the appropriate sign.
// Method panics if the attribute is not of type Number.
func (av DynamoDBAttributeValue) Integer() (int64, error) {
s, err := strconv.ParseFloat(av.Number(), 64)
return int64(s), err
}
// Float provides access to an attribute of type Number.
// DynamoDB sends the values as strings. For convenience this method
// provides conversion to float64.
// The returned value is the nearest floating point number rounded using IEEE754 unbiased rounding.
// If the number is more than 1/2 ULP away from the largest floating point number of the given size,
// the value returned is ±Inf, err.Err = ErrRange.
// Method panics if the attribute is not of type Number.
func (av DynamoDBAttributeValue) Float() (float64, error) {
s, err := strconv.ParseFloat(av.Number(), 64)
return s, err
}
// NumberSet provides access to an attribute of type Number Set.
// DynamoDB sends the numbers as strings.
// Method panics if the attribute is not of type Number.
func (av DynamoDBAttributeValue) NumberSet() []string {
av.ensureType(DataTypeNumberSet)
return av.value.([]string)
}
// String provides access to an attribute of type String.
// Method panics if the attribute is not of type String.
func (av DynamoDBAttributeValue) String() string {
av.ensureType(DataTypeString)
return av.value.(string)
}
// StringSet provides access to an attribute of type String Set.
// Method panics if the attribute is not of type String Set.
func (av DynamoDBAttributeValue) StringSet() []string {
av.ensureType(DataTypeStringSet)
return av.value.([]string)
}
// IsNull returns true if the attribute is of type Null.
func (av DynamoDBAttributeValue) IsNull() bool {
return av.value == nil
}
// DataType provides access to the DynamoDB type of the attribute
func (av DynamoDBAttributeValue) DataType() DynamoDBDataType {
return av.dataType
}
// NewStringAttribute creates an DynamoDBAttributeValue containing a String
func NewStringAttribute(value string) DynamoDBAttributeValue {
var av DynamoDBAttributeValue
av.value = value
av.dataType = DataTypeString
return av
}
// DynamoDBDataType specifies the type supported natively by DynamoDB for an attribute
type DynamoDBDataType int
const (
DataTypeBinary DynamoDBDataType = iota
DataTypeBoolean
DataTypeBinarySet
DataTypeList
DataTypeMap
DataTypeNumber
DataTypeNumberSet
DataTypeNull
DataTypeString
DataTypeStringSet
)
type anyValue interface{}
// UnsupportedDynamoDBTypeError is the error returned when trying to unmarshal a DynamoDB Attribute type not recognized by this library
type UnsupportedDynamoDBTypeError struct {
Type string
}
func (e UnsupportedDynamoDBTypeError) Error() string {
return fmt.Sprintf("unsupported DynamoDB attribute type, %v", e.Type)
}
// IncompatibleDynamoDBTypeError is the error passed in a panic when calling an accessor for an incompatible type
type IncompatibleDynamoDBTypeError struct {
Requested DynamoDBDataType
Actual DynamoDBDataType
}
func (e IncompatibleDynamoDBTypeError) Error() string {
return fmt.Sprintf("accessor called for incompatible type, requested type %v but actual type was %v", e.Requested, e.Actual)
}
func (av *DynamoDBAttributeValue) ensureType(expectedType DynamoDBDataType) {
if av.dataType != expectedType {
panic(IncompatibleDynamoDBTypeError{Requested: expectedType, Actual: av.dataType})
}
}
// MarshalJSON implements custom marshaling to be used by the standard json/encoding package
func (av DynamoDBAttributeValue) MarshalJSON() ([]byte, error) {
var buff bytes.Buffer
var err error
var b []byte
switch av.dataType {
case DataTypeBinary:
buff.WriteString(`{ "B":`)
b, err = json.Marshal(av.value.([]byte))
buff.Write(b)
case DataTypeBoolean:
buff.WriteString(`{ "BOOL":`)
b, err = json.Marshal(av.value.(bool))
buff.Write(b)
case DataTypeBinarySet:
buff.WriteString(`{ "BS":`)
b, err = json.Marshal(av.value.([][]byte))
buff.Write(b)
case DataTypeList:
buff.WriteString(`{ "L":`)
b, err = json.Marshal(av.value.([]DynamoDBAttributeValue))
buff.Write(b)
case DataTypeMap:
buff.WriteString(`{ "M":`)
b, err = json.Marshal(av.value.(map[string]DynamoDBAttributeValue))
buff.Write(b)
case DataTypeNumber:
buff.WriteString(`{ "N":`)
b, err = json.Marshal(av.value.(string))
buff.Write(b)
case DataTypeNumberSet:
buff.WriteString(`{ "NS":`)
b, err = json.Marshal(av.value.([]string))
buff.Write(b)
case DataTypeNull:
buff.WriteString(`{ "NULL": true `)
case DataTypeString:
buff.WriteString(`{ "S":`)
b, err = json.Marshal(av.value.(string))
buff.Write(b)
case DataTypeStringSet:
buff.WriteString(`{ "SS":`)
b, err = json.Marshal(av.value.([]string))
buff.Write(b)
}
buff.WriteString(`}`)
return buff.Bytes(), err
}
func unmarshalNull(target *DynamoDBAttributeValue) error {
target.value = nil
target.dataType = DataTypeNull
return nil
}
func unmarshalString(target *DynamoDBAttributeValue, value interface{}) error {
var ok bool
target.value, ok = value.(string)
target.dataType = DataTypeString
if !ok {
return errors.New("DynamoDBAttributeValue: S type should contain a string")
}
return nil
}
func unmarshalBinary(target *DynamoDBAttributeValue, value interface{}) error {
stringValue, ok := value.(string)
if !ok {
return errors.New("DynamoDBAttributeValue: B type should contain a base64 string")
}
binaryValue, err := base64.StdEncoding.DecodeString(stringValue)
if err != nil {
return err
}
target.value = binaryValue
target.dataType = DataTypeBinary
return nil
}
func unmarshalBoolean(target *DynamoDBAttributeValue, value interface{}) error {
booleanValue, ok := value.(bool)
if !ok {
return errors.New("DynamoDBAttributeValue: BOOL type should contain a boolean")
}
target.value = booleanValue
target.dataType = DataTypeBoolean
return nil
}
func unmarshalBinarySet(target *DynamoDBAttributeValue, value interface{}) error {
list, ok := value.([]interface{})
if !ok {
return errors.New("DynamoDBAttributeValue: BS type should contain a list of base64 strings")
}
binarySet := make([][]byte, len(list))
for index, element := range list {
var err error
elementString := element.(string)
binarySet[index], err = base64.StdEncoding.DecodeString(elementString)
if err != nil {
return err
}
}
target.value = binarySet
target.dataType = DataTypeBinarySet
return nil
}
func unmarshalList(target *DynamoDBAttributeValue, value interface{}) error {
list, ok := value.([]interface{})
if !ok {
return errors.New("DynamoDBAttributeValue: L type should contain a list")
}
DynamoDBAttributeValues := make([]DynamoDBAttributeValue, len(list))
for index, element := range list {
elementMap, ok := element.(map[string]interface{})
if !ok {
return errors.New("DynamoDBAttributeValue: element of a list is not an DynamoDBAttributeValue")
}
var elementDynamoDBAttributeValue DynamoDBAttributeValue
err := unmarshalDynamoDBAttributeValueMap(&elementDynamoDBAttributeValue, elementMap)
if err != nil {
return errors.New("DynamoDBAttributeValue: unmarshal of child DynamoDBAttributeValue failed")
}
DynamoDBAttributeValues[index] = elementDynamoDBAttributeValue
}
target.value = DynamoDBAttributeValues
target.dataType = DataTypeList
return nil
}
func unmarshalMap(target *DynamoDBAttributeValue, value interface{}) error {
m, ok := value.(map[string]interface{})
if !ok {
return errors.New("DynamoDBAttributeValue: M type should contain a map")
}
DynamoDBAttributeValues := make(map[string]DynamoDBAttributeValue)
for k, v := range m {
elementMap, ok := v.(map[string]interface{})
if !ok {
return errors.New("DynamoDBAttributeValue: element of a map is not an DynamoDBAttributeValue")
}
var elementDynamoDBAttributeValue DynamoDBAttributeValue
err := unmarshalDynamoDBAttributeValueMap(&elementDynamoDBAttributeValue, elementMap)
if err != nil {
return errors.New("DynamoDBAttributeValue: unmarshal of child DynamoDBAttributeValue failed")
}
DynamoDBAttributeValues[k] = elementDynamoDBAttributeValue
}
target.value = DynamoDBAttributeValues
target.dataType = DataTypeMap
return nil
}
func unmarshalNumber(target *DynamoDBAttributeValue, value interface{}) error {
var ok bool
target.value, ok = value.(string)
target.dataType = DataTypeNumber
if !ok {
return errors.New("DynamoDBAttributeValue: N type should contain a string")
}
return nil
}
func unmarshalNumberSet(target *DynamoDBAttributeValue, value interface{}) error {
list, ok := value.([]interface{})
if !ok {
return errors.New("DynamoDBAttributeValue: NS type should contain a list of strings")
}
numberSet := make([]string, len(list))
for index, element := range list {
numberSet[index], ok = element.(string)
if !ok {
return errors.New("DynamoDBAttributeValue: NS type should contain a list of strings")
}
}
target.value = numberSet
target.dataType = DataTypeNumberSet
return nil
}
func unmarshalStringSet(target *DynamoDBAttributeValue, value interface{}) error {
list, ok := value.([]interface{})
if !ok {
return errors.New("DynamoDBAttributeValue: SS type should contain a list of strings")
}
stringSet := make([]string, len(list))
for index, element := range list {
stringSet[index], ok = element.(string)
if !ok {
return errors.New("DynamoDBAttributeValue: SS type should contain a list of strings")
}
}
target.value = stringSet
target.dataType = DataTypeStringSet
return nil
}
func unmarshalDynamoDBAttributeValue(target *DynamoDBAttributeValue, typeLabel string, jsonValue interface{}) error {
switch typeLabel {
case "NULL":
return unmarshalNull(target)
case "B":
return unmarshalBinary(target, jsonValue)
case "BOOL":
return unmarshalBoolean(target, jsonValue)
case "BS":
return unmarshalBinarySet(target, jsonValue)
case "L":
return unmarshalList(target, jsonValue)
case "M":
return unmarshalMap(target, jsonValue)
case "N":
return unmarshalNumber(target, jsonValue)
case "NS":
return unmarshalNumberSet(target, jsonValue)
case "S":
return unmarshalString(target, jsonValue)
case "SS":
return unmarshalStringSet(target, jsonValue)
default:
target.value = nil
target.dataType = DataTypeNull
return UnsupportedDynamoDBTypeError{typeLabel}
}
}
// UnmarshalJSON unmarshals a JSON description of this DynamoDBAttributeValue
func (av *DynamoDBAttributeValue) UnmarshalJSON(b []byte) error {
var m map[string]interface{}
err := json.Unmarshal(b, &m)
if err != nil {
return err
}
return unmarshalDynamoDBAttributeValueMap(av, m)
}
func unmarshalDynamoDBAttributeValueMap(target *DynamoDBAttributeValue, m map[string]interface{}) error {
if m == nil {
return errors.New("DynamoDBAttributeValue: does not contain a map")
}
if len(m) != 1 {
return errors.New("DynamoDBAttributeValue: map must contain a single type")
}
for k, v := range m {
return unmarshalDynamoDBAttributeValue(target, k, v)
}
return nil
}

View File

@ -1,55 +0,0 @@
package events
import (
"bytes"
"compress/gzip"
"encoding/base64"
"encoding/json"
)
// CloudwatchLogsEvent represents raw data from a cloudwatch logs event
type CloudwatchLogsEvent struct {
AWSLogs CloudwatchLogsRawData `json:"awslogs"`
}
// CloudwatchLogsRawData contains gzipped base64 json representing the bulk
// of a cloudwatch logs event
type CloudwatchLogsRawData struct {
Data string `json:"data"`
}
// Parse returns a slice of structs represting a usable CloudwatchLogs event
func (c CloudwatchLogsRawData) Parse() (d CloudwatchLogsData, err error) {
data, err := base64.StdEncoding.DecodeString(c.Data)
if err != nil {
return
}
zr, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return
}
buf := &bytes.Buffer{}
buf.ReadFrom(zr)
err = json.Unmarshal(buf.Bytes(), &d)
return
}
// CloudwatchLogsData is an unmarshal'd, ungzip'd, cloudwatch logs event
type CloudwatchLogsData struct {
Owner string `json:"owner"`
LogGroup string `json:"logGroup"`
LogStream string `json:"logStream"`
SubscriptionFilters []string `json:"subscriptionFilters"`
MessageType string `json:"messageType"`
LogEvents []CloudwatchLogsLogEvent `json:"logEvents"`
}
// LogEvent represents a log entry from cloudwatch logs
type CloudwatchLogsLogEvent struct {
ID string `json:"id"`
Timestamp int64 `json:"timestamp"`
Message string `json:"message"`
}

View File

@ -1,21 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
// CognitoEvent contains data from an event sent from AWS Cognito
type CognitoEvent struct {
DatasetName string `json:"datasetName"`
DatasetRecords map[string]CognitoDatasetRecord `json:"datasetRecords"`
EventType string `json:"eventType"`
IdentityID string `json:"identityId"`
IdentityPoolID string `json:"identityPoolId"`
Region string `json:"region"`
Version int `json:"version"`
}
// CognitoDatasetRecord represents a record from an AWS Cognito event
type CognitoDatasetRecord struct {
NewValue string `json:"newValue"`
OldValue string `json:"oldValue"`
Op string `json:"op"`
}

View File

@ -1,17 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
// ConfigEvent contains data from an event sent from AWS Config
type ConfigEvent struct {
AccountID string `json:"accountId"` // The ID of the AWS account that owns the rule
ConfigRuleArn string `json:"configRuleArn"` // The ARN that AWS Config assigned to the rule
ConfigRuleID string `json:"configRuleId"`
ConfigRuleName string `json:"configRuleName"` // The name that you assigned to the rule that caused AWS Config to publish the event
EventLeftScope bool `json:"eventLeftScope"` // A boolean value that indicates whether the AWS resource to be evaluated has been removed from the rule's scope
ExecutionRoleArn string `json:"executionRoleArn"`
InvokingEvent string `json:"invokingEvent"` // If the event is published in response to a resource configuration change, this value contains a JSON configuration item
ResultToken string `json:"resultToken"` // A token that the function must pass to AWS Config with the PutEvaluations call
RuleParameters string `json:"ruleParameters"` // Key/value pairs that the function processes as part of its evaluation logic
Version string `json:"version"`
}

View File

@ -1,117 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
// The DynamoDBEvent stream event handled to Lambda
// http://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-ddb-update
type DynamoDBEvent struct {
Records []DynamoDBEventRecord `json:"Records"`
}
// DynamoDbEventRecord stores information about each record of a DynamoDb stream event
type DynamoDBEventRecord struct {
// The region in which the GetRecords request was received.
AWSRegion string `json:"awsRegion"`
// The main body of the stream record, containing all of the DynamoDB-specific
// fields.
Change DynamoDBStreamRecord `json:"dynamodb"`
// A globally unique identifier for the event that was recorded in this stream
// record.
EventID string `json:"eventID"`
// The type of data modification that was performed on the DynamoDB table:
//
// * INSERT - a new item was added to the table.
//
// * MODIFY - one or more of an existing item's attributes were modified.
//
// * REMOVE - the item was deleted from the table
EventName string `json:"eventName"`
// The AWS service from which the stream record originated. For DynamoDB Streams,
// this is aws:dynamodb.
EventSource string `json:"eventSource"`
// The version number of the stream record format. This number is updated whenever
// the structure of Record is modified.
//
// Client applications must not assume that eventVersion will remain at a particular
// value, as this number is subject to change at any time. In general, eventVersion
// will only increase as the low-level DynamoDB Streams API evolves.
EventVersion string `json:"eventVersion"`
// The event source ARN of DynamoDB
EventSourceArn string `json:"eventSourceARN"`
}
// A description of a single data modification that was performed on an item
// in a DynamoDB table.
type DynamoDBStreamRecord struct {
// The approximate date and time when the stream record was created, in UNIX
// epoch time (http://www.epochconverter.com/) format.
ApproximateCreationDateTime SecondsEpochTime `json:"ApproximateCreationDateTime,omitempty"`
// The primary key attribute(s) for the DynamoDB item that was modified.
Keys map[string]DynamoDBAttributeValue `json:"Keys,omitempty"`
// The item in the DynamoDB table as it appeared after it was modified.
NewImage map[string]DynamoDBAttributeValue `json:"NewImage,omitempty"`
// The item in the DynamoDB table as it appeared before it was modified.
OldImage map[string]DynamoDBAttributeValue `json:"OldImage,omitempty"`
// The sequence number of the stream record.
SequenceNumber string `json:"SequenceNumber"`
// The size of the stream record, in bytes.
SizeBytes int64 `json:"SizeBytes"`
// The type of data from the modified DynamoDB item that was captured in this
// stream record.
StreamViewType string `json:"StreamViewType"`
}
type DynamoDBKeyType string
const (
DynamoDBKeyTypeHash DynamoDBKeyType = "HASH"
DynamoDBKeyTypeRange DynamoDBKeyType = "RANGE"
)
type DynamoDBOperationType string
const (
DynamoDBOperationTypeInsert DynamoDBOperationType = "INSERT"
DynamoDBOperationTypeModify DynamoDBOperationType = "MODIFY"
DynamoDBOperationTypeRemove DynamoDBOperationType = "REMOVE"
)
type DynamoDBSharedIteratorType string
const (
DynamoDBShardIteratorTypeTrimHorizon DynamoDBSharedIteratorType = "TRIM_HORIZON"
DynamoDBShardIteratorTypeLatest DynamoDBSharedIteratorType = "LATEST"
DynamoDBShardIteratorTypeAtSequenceNumber DynamoDBSharedIteratorType = "AT_SEQUENCE_NUMBER"
DynamoDBShardIteratorTypeAfterSequenceNumber DynamoDBSharedIteratorType = "AFTER_SEQUENCE_NUMBER"
)
type DynamoDBStreamStatus string
const (
DynamoDBStreamStatusEnabling DynamoDBStreamStatus = "ENABLING"
DynamoDBStreamStatusEnabled DynamoDBStreamStatus = "ENABLED"
DynamoDBStreamStatusDisabling DynamoDBStreamStatus = "DISABLING"
DynamoDBStreamStatusDisabled DynamoDBStreamStatus = "DISABLED"
)
type DynamoDBStreamViewType string
const (
DynamoDBStreamViewTypeNewImage DynamoDBStreamViewType = "NEW_IMAGE" // the entire item, as it appeared after it was modified.
DynamoDBStreamViewTypeOldImage DynamoDBStreamViewType = "OLD_IMAGE" // the entire item, as it appeared before it was modified.
DynamoDBStreamViewTypeNewAndOldImages DynamoDBStreamViewType = "NEW_AND_OLD_IMAGES" // both the new and the old item images of the item.
DynamoDBStreamViewTypeKeysOnly DynamoDBStreamViewType = "KEYS_ONLY" // only the key attributes of the modified item.
)

View File

@ -1,59 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
import (
"encoding/json"
"time"
)
// SecondsEpochTime serializes a time.Time in JSON as a UNIX epoch time in seconds
type SecondsEpochTime struct {
time.Time
}
// MilliSecondsEpochTime serializes a time.Time in JSON as a UNIX epoch time in milliseconds.
type MilliSecondsEpochTime struct {
time.Time
}
const secondsToNanoSecondsFactor = 1000000000
const milliSecondsToNanoSecondsFactor = 1000000
func (e SecondsEpochTime) MarshalJSON() ([]byte, error) {
// UnixNano() returns the epoch in nanoseconds
unixTime := float64(e.UnixNano()) / float64(secondsToNanoSecondsFactor)
return json.Marshal(unixTime)
}
func (e *SecondsEpochTime) UnmarshalJSON(b []byte) error {
var epoch float64
err := json.Unmarshal(b, &epoch)
if err != nil {
return err
}
epochSec := int64(epoch)
epochNano := int64((epoch - float64(epochSec)) * float64(secondsToNanoSecondsFactor))
// time.Unix(sec, nsec) expects the epoch integral seconds in the first parameter
// and remaining nanoseconds in the second parameter
*e = SecondsEpochTime{time.Unix(epochSec, epochNano)}
return nil
}
func (e MilliSecondsEpochTime) MarshalJSON() ([]byte, error) {
// UnixNano() returns the epoch in nanoseconds
unixTimeMs := e.UnixNano() / milliSecondsToNanoSecondsFactor
return json.Marshal(unixTimeMs)
}
func (e *MilliSecondsEpochTime) UnmarshalJSON(b []byte) error {
var epoch int64
err := json.Unmarshal(b, &epoch)
if err != nil {
return err
}
*e = MilliSecondsEpochTime{time.Unix(epoch/1000, (epoch%1000)*1000000)}
return nil
}

View File

@ -1,34 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
// KinesisFirehoseEvent represents the input event from Amazon Kinesis Firehose. It is used as the input parameter.
type KinesisFirehoseEvent struct {
InvocationID string `json:"invocationId"`
DeliveryStreamArn string `json:"deliveryStreamArn"`
Region string `json:"region"`
Records []KinesisFirehoseEventRecord `json:"records"`
}
type KinesisFirehoseEventRecord struct {
RecordID string `json:"recordId"`
ApproximateArrivalTimestamp MilliSecondsEpochTime `json:"approximateArrivalTimestamp"`
Data []byte `json:"data"`
}
// Constants used for describing the transformation result
const (
KinesisFirehoseTransformedStateOk = "TRANSFORMED_STATE_OK"
KinesisFirehoseTransformedStateDropped = "TRANSFORMED_STATE_DROPPED"
KinesisFirehoseTransformedStateProcessingFailed = "TRANSFORMED_STATE_PROCESSINGFAILED"
)
type KinesisFirehoseResponse struct {
Records []KinesisFirehoseResponseRecord `json:"records"`
}
type KinesisFirehoseResponseRecord struct {
RecordID string `json:"recordId"`
Result string `json:"result"` // The status of the transformation. May be TransformedStateOk, TransformedStateDropped or TransformedStateProcessingFailed
Data []byte `json:"data"`
}

View File

@ -1,27 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
type KinesisEvent struct {
Records []KinesisEventRecord `json:"Records"`
}
type KinesisEventRecord struct {
AwsRegion string `json:"awsRegion"`
EventID string `json:"eventID"`
EventName string `json:"eventName"`
EventSource string `json:"eventSource"`
EventSourceArn string `json:"eventSourceARN"`
EventVersion string `json:"eventVersion"`
InvokeIdentityArn string `json:"invokeIdentityArn"`
Kinesis KinesisRecord `json:"kinesis"`
}
type KinesisRecord struct {
ApproximateArrivalTimestamp SecondsEpochTime `json:"approximateArrivalTimestamp"`
Data []byte `json:"data"`
EncryptionType string `json:"encryptionType,omitempty"`
PartitionKey string `json:"partitionKey"`
SequenceNumber string `json:"sequenceNumber"`
KinesisSchemaVersion string `json:"kinesisSchemaVersion"`
}

View File

@ -1,53 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
import (
"time"
)
type S3Event struct {
Records []S3EventRecord `json:"Records"`
}
type S3EventRecord struct {
EventVersion string `json:"eventVersion"`
EventSource string `json:"eventSource"`
AWSRegion string `json:"awsRegion"`
EventTime time.Time `json:"eventTime"`
EventName string `json:"eventName"`
PrincipalID S3UserIdentity `json:"userIdentity"`
RequestParameters S3RequestParameters `json:"requestParameters"`
ResponseElements map[string]string `json:"responseElements"`
S3 S3Entity `json:"s3"`
}
type S3UserIdentity struct {
PrincipalID string `json:"principalId"`
}
type S3RequestParameters struct {
SourceIPAddress string `json:"sourceIPAddress"`
}
type S3Entity struct {
SchemaVersion string `json:"s3SchemaVersion"`
ConfigurationID string `json:"configurationId"`
Bucket S3Bucket `json:"bucket"`
Object S3Object `json:"object"`
}
type S3Bucket struct {
Name string `json:"name"`
OwnerIdentity S3UserIdentity `json:"ownerIdentity"`
Arn string `json:"arn"`
}
type S3Object struct {
Key string `json:"key"`
Size int64 `json:"size"`
URLDecodedKey string `json:"urlDecodedKey"`
VersionID string `json:"versionId"`
ETag string `json:"eTag"`
Sequencer string `json:"sequencer"`
}

View File

@ -1,32 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package events
import (
"time"
)
type SNSEvent struct {
Records []SNSEventRecord `json:"Records"`
}
type SNSEventRecord struct {
EventVersion string `json:"EventVersion"`
EventSubscriptionArn string `json:"EventSubscriptionArn"`
EventSource string `json:"EventSource"`
SNS SNSEntity `json:"Sns"`
}
type SNSEntity struct {
Signature string `json:"Signature"`
MessageID string `json:"MessageId"`
Type string `json:"Type"`
TopicArn string `json:"TopicArn"`
MessageAttributes map[string]interface{} `json:"MessageAttributes"`
SignatureVersion string `json:"SignatureVersion"`
Timestamp time.Time `json:"Timestamp"`
SigningCertURL string `json:"SigningCertUrl"`
Message string `json:"Message"`
UnsubscribeURL string `json:"UnsubscribeUrl"`
Subject string `json:"Subject"`
}

View File

@ -1,52 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package lambda
import (
"fmt"
"log"
"net"
"net/rpc"
"os"
)
// Start takes a handler, and talks to and internal Lambda endpoint to pass Invoke requests to the handler. If a
// handler does not match one of the supported types, the lambda package will respond to new invokes served by in
// internal endpoint with an appropriate error message. Start blocks, and does not return after being called.
//
// Rules:
// * handler must be a function
// * handler may take between 0 and two arguments.
// * If there are two arguments, the first argument must implement "context.Context".
// * handler may return between 0 and two arguments.
// * If there are two return values, the second argument must implement "error".
// * If there is one return value it must implement "error".
//
// func ()
// func () error
// func (TIn) error
// func () (TOut, error)
// func (TIn) (TOut, error)
// func (context.Context) error
// func (context.Context, TIn) error
// func (context.Context) (TOut, error)
// func (context.Context, TIn) (TOut, error)
//
// Where '''TIn''' and '''TOut''' are types compatible with the ''encoding/json'' standard library.
// See https://golang.org/pkg/encoding/json/#Unmarshal for how deserialization behaves
func Start(handler interface{}) {
port := os.Getenv("_LAMBDA_SERVER_PORT")
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%s", port))
if err != nil {
log.Fatal(err)
}
wrappedHandler := newHandler(handler)
function := new(Function)
function.handler = wrappedHandler
err = rpc.Register(function)
if err != nil {
log.Fatal("failed to register handler function")
}
rpc.Accept(lis)
log.Fatal("accept should not have returned")
}

View File

@ -1,83 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package lambda
import (
"context"
"encoding/json"
"reflect"
"time"
"github.com/aws/aws-lambda-go/lambda/messages"
"github.com/aws/aws-lambda-go/lambdacontext"
)
type Function struct {
handler lambdaHandler
}
func (fn *Function) Ping(req *messages.PingRequest, response *messages.PingResponse) error {
*response = messages.PingResponse{}
return nil
}
func (fn *Function) Invoke(req *messages.InvokeRequest, response *messages.InvokeResponse) error {
defer func() {
if err := recover(); err != nil {
panicInfo := getPanicInfo(err)
response.Error = &messages.InvokeResponse_Error{
Message: panicInfo.Message,
Type: getErrorType(err),
StackTrace: panicInfo.StackTrace,
ShouldExit: true,
}
}
}()
deadline := time.Unix(req.Deadline.Seconds, req.Deadline.Nanos).UTC()
invokeContext, cancel := context.WithDeadline(context.Background(), deadline)
defer cancel()
lc := &lambdacontext.LambdaContext{
AwsRequestID: req.RequestId,
InvokedFunctionArn: req.InvokedFunctionArn,
}
if len(req.ClientContext) > 0 {
if err := json.Unmarshal(req.ClientContext, &lc.ClientContext); err != nil {
response.Error = lambdaErrorResponse(err)
return nil
}
}
invokeContext = lambdacontext.NewContext(invokeContext, lc)
invokeContext = context.WithValue(invokeContext, "x-amzn-trace-id", req.XAmznTraceId)
payload, err := fn.handler.Invoke(invokeContext, req.Payload)
if err != nil {
response.Error = lambdaErrorResponse(err)
return nil
}
response.Payload = payload
return nil
}
func getErrorType(err interface{}) string {
errorType := reflect.TypeOf(err)
if errorType.Kind() == reflect.Ptr {
return errorType.Elem().Name()
}
return errorType.Name()
}
func lambdaErrorResponse(invokeError error) *messages.InvokeResponse_Error {
var errorName string
if errorType := reflect.TypeOf(invokeError); errorType.Kind() == reflect.Ptr {
errorName = errorType.Elem().Name()
} else {
errorName = errorType.Name()
}
return &messages.InvokeResponse_Error{
Message: invokeError.Error(),
Type: errorName,
}
}

View File

@ -1,123 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package lambda
import (
"context"
"encoding/json"
"fmt"
"reflect"
)
// lambdaHandler is the generic function type
type lambdaHandler func(context.Context, []byte) (interface{}, error)
// Invoke calls the handler, and serializes the response.
// If the underlying handler returned an error, or an error occurs during serialization, error is returned.
func (handler lambdaHandler) Invoke(ctx context.Context, payload []byte) ([]byte, error) {
response, err := handler(ctx, payload)
if err != nil {
return nil, err
}
responseBytes, err := json.Marshal(response)
if err != nil {
return nil, err
}
return responseBytes, nil
}
func errorHandler(e error) lambdaHandler {
return func(ctx context.Context, event []byte) (interface{}, error) {
return nil, e
}
}
func validateArguments(handler reflect.Type) (bool, error) {
handlerTakesContext := false
if handler.NumIn() > 2 {
return false, fmt.Errorf("handlers may not take more than two arguments, but handler takes %d", handler.NumIn())
} else if handler.NumIn() > 0 {
contextType := reflect.TypeOf((*context.Context)(nil)).Elem()
argumentType := handler.In(0)
handlerTakesContext = argumentType.Implements(contextType)
if handler.NumIn() > 1 && !handlerTakesContext {
return false, fmt.Errorf("handler takes two arguments, but the first is not Context. got %s", argumentType.Kind())
}
}
return handlerTakesContext, nil
}
func validateReturns(handler reflect.Type) error {
errorType := reflect.TypeOf((*error)(nil)).Elem()
if handler.NumOut() > 2 {
return fmt.Errorf("handler may not return more than two values")
} else if handler.NumOut() > 1 {
if !handler.Out(1).Implements(errorType) {
return fmt.Errorf("handler returns two values, but the second does not implement error")
}
} else if handler.NumOut() == 1 {
if !handler.Out(0).Implements(errorType) {
return fmt.Errorf("handler returns a single value, but it does not implement error")
}
}
return nil
}
// newHandler Creates the base lambda handler, which will do basic payload unmarshaling before defering to handlerSymbol.
// If handlerSymbol is not a valid handler, the returned function will be a handler that just reports the validation error.
func newHandler(handlerSymbol interface{}) lambdaHandler {
if handlerSymbol == nil {
return errorHandler(fmt.Errorf("handler is nil"))
}
handler := reflect.ValueOf(handlerSymbol)
handlerType := reflect.TypeOf(handlerSymbol)
if handlerType.Kind() != reflect.Func {
return errorHandler(fmt.Errorf("handler kind %s is not %s", handlerType.Kind(), reflect.Func))
}
takesContext, err := validateArguments(handlerType)
if err != nil {
return errorHandler(err)
}
if err := validateReturns(handlerType); err != nil {
return errorHandler(err)
}
return func(ctx context.Context, payload []byte) (interface{}, error) {
// construct arguments
var args []reflect.Value
if takesContext {
args = append(args, reflect.ValueOf(ctx))
}
if (handlerType.NumIn() == 1 && !takesContext) || handlerType.NumIn() == 2 {
eventType := handlerType.In(handlerType.NumIn() - 1)
event := reflect.New(eventType)
if err := json.Unmarshal(payload, event.Interface()); err != nil {
return nil, err
}
args = append(args, event.Elem())
}
response := handler.Call(args)
// convert return values into (interface{}, error)
var err error
if len(response) > 0 {
if errVal, ok := response[len(response)-1].Interface().(error); ok {
err = errVal
}
}
var val interface{}
if len(response) > 1 {
val = response[0].Interface()
}
return val, err
}
}

View File

@ -1,43 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package messages
type PingRequest struct {
}
type PingResponse struct {
}
type InvokeRequest_Timestamp struct {
Seconds int64
Nanos int64
}
type InvokeRequest struct {
Payload []byte
RequestId string
XAmznTraceId string
Deadline InvokeRequest_Timestamp
InvokedFunctionArn string
CognitoIdentityId string
CognitoIdentityPoolId string
ClientContext []byte
}
type InvokeResponse struct {
Payload []byte
Error *InvokeResponse_Error
}
type InvokeResponse_Error struct {
Message string
Type string
StackTrace []*InvokeResponse_Error_StackFrame
ShouldExit bool
}
type InvokeResponse_Error_StackFrame struct {
Path string `json:"path"`
Line int32 `json:"line"`
Label string `json:"label"`
}

View File

@ -1,99 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
package lambda
import (
"fmt"
"runtime"
"strings"
"github.com/aws/aws-lambda-go/lambda/messages"
)
type panicInfo struct {
Message string // Value passed to panic call, converted to string
StackTrace []*messages.InvokeResponse_Error_StackFrame // Stack trace of the panic
}
func getPanicInfo(value interface{}) panicInfo {
message := getPanicMessage(value)
stack := getPanicStack()
return panicInfo{Message: message, StackTrace: stack}
}
func getPanicMessage(value interface{}) string {
return fmt.Sprintf("%v", value)
}
var defaultErrorFrameCount = 32
func getPanicStack() []*messages.InvokeResponse_Error_StackFrame {
s := make([]uintptr, defaultErrorFrameCount)
const framesToHide = 3 // this (getPanicStack) -> getPanicInfo -> handler defer func
n := runtime.Callers(framesToHide, s)
if n == 0 {
return make([]*messages.InvokeResponse_Error_StackFrame, 0)
}
s = s[:n]
return convertStack(s)
}
func convertStack(s []uintptr) []*messages.InvokeResponse_Error_StackFrame {
var converted []*messages.InvokeResponse_Error_StackFrame
frames := runtime.CallersFrames(s)
for {
frame, more := frames.Next()
formattedFrame := formatFrame(frame)
converted = append(converted, formattedFrame)
if !more {
break
}
}
return converted
}
func formatFrame(inputFrame runtime.Frame) *messages.InvokeResponse_Error_StackFrame {
path := inputFrame.File
line := int32(inputFrame.Line)
label := inputFrame.Function
// Strip GOPATH from path by counting the number of seperators in label & path
//
// For example given this:
// GOPATH = /home/user
// path = /home/user/src/pkg/sub/file.go
// label = pkg/sub.Type.Method
//
// We want to set:
// path = pkg/sub/file.go
// label = Type.Method
i := len(path)
for n, g := 0, strings.Count(label, "/")+2; n < g; n++ {
i = strings.LastIndex(path[:i], "/")
if i == -1 {
// Something went wrong and path has less seperators than we expected
// Abort and leave i as -1 to counteract the +1 below
break
}
}
path = path[i+1:] // Trim the initial /
// Strip the path from the function name as it's already in the path
label = label[strings.LastIndex(label, "/")+1:]
// Likewise strip the package name
label = label[strings.Index(label, ".")+1:]
return &messages.InvokeResponse_Error_StackFrame{
Path: path,
Line: line,
Label: label,
}
}

View File

@ -1,89 +0,0 @@
// Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Helpers for accessing context information from an Invoke request. Context information
// is stored in a https://golang.org/pkg/context/#Context. The functions FromContext and NewContext
// are used to retrieving and inserting an isntance of LambdaContext.
package lambdacontext
import (
"context"
"os"
"strconv"
)
// LogGroupName is the name of the log group that contains the log streams of the current Lambda Function
var LogGroupName string
// LogStreamName name of the log stream that the current Lambda Function's logs will be sent to
var LogStreamName string
// FunctionName the name of the current Lambda Function
var FunctionName string
// MemoryLimitInMB is the configured memory limit for the current instance of the Lambda Function
var MemoryLimitInMB int
// FunctionVersion is the published version of the current instance of the Lambda Function
var FunctionVersion string
func init() {
LogGroupName = os.Getenv("AWS_LAMBDA_LOG_GROUP_NAME")
LogStreamName = os.Getenv("AWS_LAMBDA_LOG_STREAM_NAME")
FunctionName = os.Getenv("AWS_LAMBDA_FUNCTION_NAME")
if limit, err := strconv.Atoi(os.Getenv("AWS_LAMBDA_FUNCTION_MEMORY_SIZE")); err != nil {
MemoryLimitInMB = 0
} else {
MemoryLimitInMB = limit
}
FunctionVersion = os.Getenv("AWS_LAMBDA_FUNCTION_VERSION")
}
// ClientApplication is metadata about the calling application.
type ClientApplication struct {
InstallationID string `json:"installation_id"`
AppTitle string `json:"app_title"`
AppVersionCode string `json:"app_version_code"`
AppPackageName string `json:"app_package_name"`
}
// ClientContext is information about the client application passed by the calling application.
type ClientContext struct {
Client ClientApplication
Env map[string]string `json:"env"`
Custom map[string]string `json:"custom"`
}
// CognitoIdentity is the cognito identity used by the calling application.
type CognitoIdentity struct {
CognitoIdentityID string
CognitoIdentityPoolID string
}
// LambdaContext is the set of metadata that is passed for every Invoke.
type LambdaContext struct {
AwsRequestID string
InvokedFunctionArn string
Identity CognitoIdentity
ClientContext ClientContext
}
// An unexported type to be used as the key for types in this package.
// This prevents collisions with keys defined in other packages.
type key struct{}
// The key for a LambdaContext in Contexts.
// Users of this package must use lambdacontext.NewContext and lambdacontext.FromContext
// instead of using this key directly.
var contextKey = &key{}
// NewContext returns a new Context that carries value lc.
func NewContext(parent context.Context, lc *LambdaContext) context.Context {
return context.WithValue(parent, contextKey, lc)
}
// FromContext returns the LambdaContext value stored in ctx, if any.
func FromContext(ctx context.Context) (*LambdaContext, bool) {
lc, ok := ctx.Value(contextKey).(*LambdaContext)
return lc, ok
}

View File

@ -1,20 +0,0 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@ -1,292 +0,0 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targets map[float64]float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for quantile, epsilon := range targets {
if quantile*s.n <= r {
f = (2 * epsilon * r) / quantile
} else {
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

View File

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Ben Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,18 +0,0 @@
BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
default: build
race:
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
# go get github.com/kisielk/errcheck
errcheck:
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
test:
@go test -v -cover .
@go test -v ./cmd/bolt
.PHONY: fmt test

View File

@ -1,915 +0,0 @@
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
====
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
[LMDB project][lmdb]. The goal of the project is to provide a simple,
fast, and reliable database for projects that don't require a full database
server such as Postgres or MySQL.
Since Bolt is meant to be used as such a low-level piece of functionality,
simplicity is key. The API will be small and only focus on getting values
and setting values. That's it.
[hyc_symas]: https://twitter.com/hyc_symas
[lmdb]: http://symas.com/mdb/
## Project Status
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
test coverage and randomized black box testing are used to ensure database
consistency and thread safety. Bolt is currently used in high-load production
environments serving databases as large as 1TB. Many companies such as
Shopify and Heroku use Bolt-backed services every day.
## Table of Contents
- [Getting Started](#getting-started)
- [Installing](#installing)
- [Opening a database](#opening-a-database)
- [Transactions](#transactions)
- [Read-write transactions](#read-write-transactions)
- [Read-only transactions](#read-only-transactions)
- [Batch read-write transactions](#batch-read-write-transactions)
- [Managing transactions manually](#managing-transactions-manually)
- [Using buckets](#using-buckets)
- [Using key/value pairs](#using-keyvalue-pairs)
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- [Iterating over keys](#iterating-over-keys)
- [Prefix scans](#prefix-scans)
- [Range scans](#range-scans)
- [ForEach()](#foreach)
- [Nested buckets](#nested-buckets)
- [Database backups](#database-backups)
- [Statistics](#statistics)
- [Read-Only Mode](#read-only-mode)
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
- [Resources](#resources)
- [Comparison with other databases](#comparison-with-other-databases)
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- [LevelDB, RocksDB](#leveldb-rocksdb)
- [LMDB](#lmdb)
- [Caveats & Limitations](#caveats--limitations)
- [Reading the Source](#reading-the-source)
- [Other Projects Using Bolt](#other-projects-using-bolt)
## Getting Started
### Installing
To start using Bolt, install Go and run `go get`:
```sh
$ go get github.com/boltdb/bolt/...
```
This will retrieve the library and install the `bolt` command line utility into
your `$GOBIN` path.
### Opening a database
The top-level object in Bolt is a `DB`. It is represented as a single file on
your disk and represents a consistent snapshot of your data.
To open your database, simply use the `bolt.Open()` function:
```go
package main
import (
"log"
"github.com/boltdb/bolt"
)
func main() {
// Open the my.db data file in your current directory.
// It will be created if it doesn't exist.
db, err := bolt.Open("my.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
defer db.Close()
...
}
```
Please note that Bolt obtains a file lock on the data file so multiple processes
cannot open the same database at the same time. Opening an already open Bolt
database will cause it to hang until the other process closes it. To prevent
an indefinite wait you can pass a timeout option to the `Open()` function:
```go
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
```
### Transactions
Bolt allows only one read-write transaction at a time but allows as many
read-only transactions as you want at a time. Each transaction has a consistent
view of the data as it existed when the transaction started.
Individual transactions and all objects created from them (e.g. buckets, keys)
are not thread safe. To work with data in multiple goroutines you must start
a transaction for each one or use locking to ensure only one goroutine accesses
a transaction at a time. Creating transaction from the `DB` is thread safe.
Read-only transactions and read-write transactions should not depend on one
another and generally shouldn't be opened simultaneously in the same goroutine.
This can cause a deadlock as the read-write transaction needs to periodically
re-map the data file but it cannot do so while a read-only transaction is open.
#### Read-write transactions
To start a read-write transaction, you can use the `DB.Update()` function:
```go
err := db.Update(func(tx *bolt.Tx) error {
...
return nil
})
```
Inside the closure, you have a consistent view of the database. You commit the
transaction by returning `nil` at the end. You can also rollback the transaction
at any point by returning an error. All database operations are allowed inside
a read-write transaction.
Always check the return error as it will report any disk failures that can cause
your transaction to not complete. If you return an error within your closure
it will be passed through.
#### Read-only transactions
To start a read-only transaction, you can use the `DB.View()` function:
```go
err := db.View(func(tx *bolt.Tx) error {
...
return nil
})
```
You also get a consistent view of the database within this closure, however,
no mutating operations are allowed within a read-only transaction. You can only
retrieve buckets, retrieve values, and copy the database within a read-only
transaction.
#### Batch read-write transactions
Each `DB.Update()` waits for disk to commit the writes. This overhead
can be minimized by combining multiple updates with the `DB.Batch()`
function:
```go
err := db.Batch(func(tx *bolt.Tx) error {
...
return nil
})
```
Concurrent Batch calls are opportunistically combined into larger
transactions. Batch is only useful when there are multiple goroutines
calling it.
The trade-off is that `Batch` can call the given
function multiple times, if parts of the transaction fail. The
function must be idempotent and side effects must take effect only
after a successful return from `DB.Batch()`.
For example: don't display messages from inside the function, instead
set variables in the enclosing scope:
```go
var id uint64
err := db.Batch(func(tx *bolt.Tx) error {
// Find last key in bucket, decode as bigendian uint64, increment
// by one, encode back to []byte, and add new key.
...
id = newValue
return nil
})
if err != nil {
return ...
}
fmt.Println("Allocated ID %d", id)
```
#### Managing transactions manually
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
function. These helper functions will start the transaction, execute a function,
and then safely close your transaction if an error is returned. This is the
recommended way to use Bolt transactions.
However, sometimes you may want to manually start and end your transactions.
You can use the `DB.Begin()` function directly but **please** be sure to close
the transaction.
```go
// Start a writable transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Use the transaction...
_, err := tx.CreateBucket([]byte("MyBucket"))
if err != nil {
return err
}
// Commit the transaction and check for error.
if err := tx.Commit(); err != nil {
return err
}
```
The first argument to `DB.Begin()` is a boolean stating if the transaction
should be writable.
### Using buckets
Buckets are collections of key/value pairs within the database. All keys in a
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
function:
```go
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("MyBucket"))
if err != nil {
return fmt.Errorf("create bucket: %s", err)
}
return nil
})
```
You can also create a bucket only if it doesn't exist by using the
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
function for all your top-level buckets after you open your database so you can
guarantee that they exist for future transactions.
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
### Using key/value pairs
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
```go
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("MyBucket"))
err := b.Put([]byte("answer"), []byte("42"))
return err
})
```
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
```go
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("MyBucket"))
v := b.Get([]byte("answer"))
fmt.Printf("The answer is: %s\n", v)
return nil
})
```
The `Get()` function does not return an error because its operation is
guaranteed to work (unless there is some kind of system failure). If the key
exists then it will return its byte slice value. If it doesn't exist then it
will return `nil`. It's important to note that you can have a zero-length value
set to a key which is different than the key not existing.
Use the `Bucket.Delete()` function to delete a key from the bucket.
Please note that values returned from `Get()` are only valid while the
transaction is open. If you need to use a value outside of the transaction
then you must use `copy()` to copy it to another byte slice.
### Autoincrementing integer for the bucket
By using the `NextSequence()` function, you can let Bolt determine a sequence
which can be used as the unique identifier for your key/value pairs. See the
example below.
```go
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
func (s *Store) CreateUser(u *User) error {
return s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the users bucket.
// This should be created when the DB is first opened.
b := tx.Bucket([]byte("users"))
// Generate ID for the user.
// This returns an error only if the Tx is closed or not writeable.
// That can't happen in an Update() call so I ignore the error check.
id, _ := b.NextSequence()
u.ID = int(id)
// Marshal user data into bytes.
buf, err := json.Marshal(u)
if err != nil {
return err
}
// Persist bytes to users bucket.
return b.Put(itob(u.ID), buf)
})
}
// itob returns an 8-byte big endian representation of v.
func itob(v int) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
return b
}
type User struct {
ID int
...
}
```
### Iterating over keys
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
iteration over these keys extremely fast. To iterate over keys we'll use a
`Cursor`:
```go
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket"))
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
}
return nil
})
```
The cursor allows you to move to a specific point in the list of keys and move
forward or backward through the keys one at a time.
The following functions are available on the cursor:
```
First() Move to the first key.
Last() Move to the last key.
Seek() Move to a specific key.
Next() Move to the next key.
Prev() Move to the previous key.
```
Each of those functions has a return signature of `(key []byte, value []byte)`.
When you have iterated to the end of the cursor then `Next()` will return a
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
before calling `Next()` or `Prev()`. If you do not seek to a position then
these functions will return a `nil` key.
During iteration, if the key is non-`nil` but the value is `nil`, that means
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
access the sub-bucket.
#### Prefix scans
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
```go
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
c := tx.Bucket([]byte("MyBucket")).Cursor()
prefix := []byte("1234")
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
}
return nil
})
```
#### Range scans
Another common use case is scanning over a range such as a time range. If you
use a sortable time encoding such as RFC3339 then you can query a specific
date range like this:
```go
db.View(func(tx *bolt.Tx) error {
// Assume our events bucket exists and has RFC3339 encoded time keys.
c := tx.Bucket([]byte("Events")).Cursor()
// Our time range spans the 90's decade.
min := []byte("1990-01-01T00:00:00Z")
max := []byte("2000-01-01T00:00:00Z")
// Iterate over the 90's.
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
fmt.Printf("%s: %s\n", k, v)
}
return nil
})
```
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
#### ForEach()
You can also use the function `ForEach()` if you know you'll be iterating over
all the keys in a bucket:
```go
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket"))
b.ForEach(func(k, v []byte) error {
fmt.Printf("key=%s, value=%s\n", k, v)
return nil
})
return nil
})
```
Please note that keys and values in `ForEach()` are only valid while
the transaction is open. If you need to use a key or value outside of
the transaction, you must use `copy()` to copy it to another byte
slice.
### Nested buckets
You can also store a bucket in a key to create nested buckets. The API is the
same as the bucket management API on the `DB` object:
```go
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
func (*Bucket) DeleteBucket(key []byte) error
```
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
```go
// createUser creates a new user in the given account.
func createUser(accountID int, u *User) error {
// Start the transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Retrieve the root bucket for the account.
// Assume this has already been created when the account was set up.
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
// Setup the users bucket.
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
if err != nil {
return err
}
// Generate an ID for the new user.
userID, err := bkt.NextSequence()
if err != nil {
return err
}
u.ID = userID
// Marshal and save the encoded user.
if buf, err := json.Marshal(u); err != nil {
return err
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
return err
}
// Commit the transaction.
if err := tx.Commit(); err != nil {
return err
}
return nil
}
```
### Database backups
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
function to write a consistent view of the database to a writer. If you call
this from a read-only transaction, it will perform a hot backup and not block
your other database reads and writes.
By default, it will use a regular file handle which will utilize the operating
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
documentation for information about optimizing for larger-than-RAM datasets.
One common use case is to backup over HTTP so you can use tools like `cURL` to
do database backups:
```go
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
err := db.View(func(tx *bolt.Tx) error {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
_, err := tx.WriteTo(w)
return err
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
```
Then you can backup using this command:
```sh
$ curl http://localhost/backup > my.db
```
Or you can open your browser to `http://localhost/backup` and it will download
automatically.
If you want to backup to another file you can use the `Tx.CopyFile()` helper
function.
### Statistics
The database keeps a running count of many of the internal operations it
performs so you can better understand what's going on. By grabbing a snapshot
of these stats at two points in time we can see what operations were performed
in that time range.
For example, we could start a goroutine to log stats every 10 seconds:
```go
go func() {
// Grab the initial stats.
prev := db.Stats()
for {
// Wait for 10s.
time.Sleep(10 * time.Second)
// Grab the current stats and diff them.
stats := db.Stats()
diff := stats.Sub(&prev)
// Encode stats to JSON and print to STDERR.
json.NewEncoder(os.Stderr).Encode(diff)
// Save stats for the next loop.
prev = stats
}
}()
```
It's also useful to pipe these stats to a service such as statsd for monitoring
or to provide an HTTP endpoint that will perform a fixed-length sample.
### Read-Only Mode
Sometimes it is useful to create a shared, read-only Bolt database. To this,
set the `Options.ReadOnly` flag when opening your database. Read-only mode
uses a shared lock to allow multiple processes to read from the database but
it will block any processes from opening the database in read-write mode.
```go
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
if err != nil {
log.Fatal(err)
}
```
### Mobile Use (iOS/Android)
Bolt is able to run on mobile devices by leveraging the binding feature of the
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
contain your database logic and a reference to a `*bolt.DB` with a initializing
constructor that takes in a filepath where the database file will be stored.
Neither Android nor iOS require extra permissions or cleanup from using this method.
```go
func NewBoltDB(filepath string) *BoltDB {
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
return &BoltDB{db}
}
type BoltDB struct {
db *bolt.DB
...
}
func (b *BoltDB) Path() string {
return b.db.Path()
}
func (b *BoltDB) Close() {
b.db.Close()
}
```
Database logic should be defined as methods on this wrapper struct.
To initialize this struct from the native language (both platforms now sync
their local storage to the cloud. These snippets disable that functionality for the
database file):
#### Android
```java
String path;
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
path = getNoBackupFilesDir().getAbsolutePath();
} else{
path = getFilesDir().getAbsolutePath();
}
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
```
#### iOS
```objc
- (void)demo {
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
NSUserDomainMask,
YES) objectAtIndex:0];
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
[self addSkipBackupAttributeToItemAtPath:demo.path];
//Some DB Logic would go here
[demo close];
}
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
{
NSURL* URL= [NSURL fileURLWithPath: filePathString];
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
NSError *error = nil;
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
forKey: NSURLIsExcludedFromBackupKey error: &error];
if(!success){
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
}
return success;
}
```
## Resources
For more information on getting started with Bolt, check out the following articles:
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
## Comparison with other databases
### Postgres, MySQL, & other relational databases
Relational databases structure data into rows and are only accessible through
the use of SQL. This approach provides flexibility in how you store and query
your data but also incurs overhead in parsing and planning SQL statements. Bolt
accesses all data by a byte slice key. This makes Bolt fast to read and write
data by key but provides no built-in support for joining values together.
Most relational databases (with the exception of SQLite) are standalone servers
that run separately from your application. This gives your systems
flexibility to connect multiple application servers to a single database
server but also adds overhead in serializing and transporting data over the
network. Bolt runs as a library included in your application so all data access
has to go through your application's process. This brings data closer to your
application but limits multi-process access to the data.
### LevelDB, RocksDB
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
they are libraries bundled into the application, however, their underlying
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
random writes by using a write ahead log and multi-tiered, sorted files called
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
have trade-offs.
If you require a high random write throughput (>10,000 w/sec) or you need to use
spinning disks then LevelDB could be a good choice. If your application is
read-heavy or does a lot of range scans then Bolt could be a good choice.
One other important consideration is that LevelDB does not have transactions.
It supports batch writing of key/values pairs and it supports read snapshots
but it will not give you the ability to do a compare-and-swap operation safely.
Bolt supports fully serializable ACID transactions.
### LMDB
Bolt was originally a port of LMDB so it is architecturally similar. Both use
a B+tree, have ACID semantics with fully serializable transactions, and support
lock-free MVCC using a single writer and multiple readers.
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
several unsafe actions such as direct writes for the sake of performance. Bolt
opts to disallow actions which can leave the database in a corrupted state. The
only exception to this in Bolt is `DB.NoSync`.
There are also a few differences in API. LMDB requires a maximum mmap size when
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
automatically. LMDB overloads the getter and setter functions with multiple
flags whereas Bolt splits these specialized cases into their own functions.
## Caveats & Limitations
It's important to pick the right tool for the job and Bolt is no exception.
Here are a few things to note when evaluating and using Bolt:
* Bolt is good for read intensive workloads. Sequential write performance is
also fast but random writes can be slow. You can use `DB.Batch()` or add a
write-ahead log to help mitigate this issue.
* Bolt uses a B+tree internally so there can be a lot of random page access.
SSDs provide a significant performance boost over spinning disks.
* Try to avoid long running read transactions. Bolt uses copy-on-write so
old pages cannot be reclaimed while an old transaction is using them.
* Byte slices returned from Bolt are only valid during a transaction. Once the
transaction has been committed or rolled back then the memory they point to
can be reused by a new page or can be unmapped from virtual memory and you'll
see an `unexpected fault address` panic when accessing it.
* Bolt uses an exclusive write lock on the database file so it cannot be
shared by multiple processes.
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
buckets that have random inserts will cause your database to have very poor
page utilization.
* Use larger buckets in general. Smaller buckets causes poor page utilization
once they become larger than the page size (typically 4KB).
* Bulk loading a lot of random writes into a new bucket can be slow as the
page will not split until the transaction is committed. Randomly inserting
more than 100,000 key/value pairs into a single new bucket in a single
transaction is not advised.
* Bolt uses a memory-mapped file so the underlying operating system handles the
caching of the data. Typically, the OS will cache as much of the file as it
can in memory and will release memory as needed to other processes. This means
that Bolt can show very high memory usage when working with large databases.
However, this is expected and the OS will release memory as needed. Bolt can
handle databases much larger than the available physical RAM, provided its
memory-map fits in the process virtual address space. It may be problematic
on 32-bits systems.
* The data structures in the Bolt database are memory mapped so the data file
will be endian specific. This means that you cannot copy a Bolt file from a
little endian machine to a big endian machine and have it work. For most
users this is not a concern since most modern CPUs are little endian.
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
and return free pages back to the disk. Instead, Bolt maintains a free list
of unused pages within its data file. These free pages can be reused by later
transactions. This works well for many use cases as databases generally tend
to grow. However, it's important to note that deleting large chunks of data
will not allow you to reclaim that space on disk.
For more information on page allocation, [see this comment][page-allocation].
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
## Reading the Source
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
transactional key/value database so it can be a good starting point for people
interested in how databases work.
The best places to start are the main entry points into Bolt:
- `Open()` - Initializes the reference to the database. It's responsible for
creating the database if it doesn't exist, obtaining an exclusive lock on the
file, reading the meta pages, & memory-mapping the file.
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
value of the `writable` argument. This requires briefly obtaining the "meta"
lock to keep track of open transactions. Only one read-write transaction can
exist at a time so the "rwlock" is acquired during the life of a read-write
transaction.
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
arguments, a cursor is used to traverse the B+tree to the page and position
where they key & value will be written. Once the position is found, the bucket
materializes the underlying page and the page's parent pages into memory as
"nodes". These nodes are where mutations occur during read-write transactions.
These changes get flushed to disk during commit.
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
to move to the page & position of a key/value pair. During a read-only
transaction, the key and value data is returned as a direct reference to the
underlying mmap file so there's no allocation overhead. For read-write
transactions, this data may reference the mmap file or one of the in-memory
node values.
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
or in-memory nodes. It can seek to a specific key, move to the first or last
value, or it can move forward or backward. The cursor handles the movement up
and down the B+tree transparently to the end user.
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
into pages to be written to disk. Writing to disk then occurs in two phases.
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
new meta page with an incremented transaction ID is written and another
`fsync()` occurs. This two phase write ensures that partially written data
pages are ignored in the event of a crash since the meta page pointing to them
is never written. Partially written meta pages are invalidated because they
are written with a checksum.
If you have additional notes that could be helpful for others, please submit
them via pull request.
## Other Projects Using Bolt
Below is a list of public, open source projects that use Bolt:
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
backed by boltdb.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
If you are using Bolt in a project please send a pull request to add it to the list.

View File

@ -1,18 +0,0 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\boltdb\bolt
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -v -t ./...
build_script:
- go test -v ./...

View File

@ -1,10 +0,0 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,10 +0,0 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,28 +0,0 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View File

@ -1,12 +0,0 @@
// +build arm64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,10 +0,0 @@
package bolt
import (
"syscall"
)
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return syscall.Fdatasync(int(db.file.Fd()))
}

View File

@ -1,27 +0,0 @@
package bolt
import (
"syscall"
"unsafe"
)
const (
msAsync = 1 << iota // perform asynchronous writes
msSync // perform synchronous writes
msInvalidate // invalidate cached data
)
func msync(db *DB) error {
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
if errno != 0 {
return errno
}
return nil
}
func fdatasync(db *DB) error {
if db.data != nil {
return msync(db)
}
return db.file.Sync()
}

View File

@ -1,9 +0,0 @@
// +build ppc
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

View File

@ -1,12 +0,0 @@
// +build ppc64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,12 +0,0 @@
// +build ppc64le
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,12 +0,0 @@
// +build s390x
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -1,89 +0,0 @@
// +build !windows,!plan9,!solaris
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
flag := syscall.LOCK_SH
if exclusive {
flag = syscall.LOCK_EX
}
// Otherwise attempt to obtain an exclusive lock.
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := syscall.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}
// NOTE: This function is copied from stdlib because it is not available on darwin.
func madvise(b []byte, advice int) (err error) {
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
if e1 != 0 {
err = e1
}
return
}

View File

@ -1,90 +0,0 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Whence = 0
lock.Pid = 0
if exclusive {
lock.Type = syscall.F_WRLCK
} else {
lock.Type = syscall.F_RDLCK
}
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

View File

@ -1,144 +0,0 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
)
const (
lockExt = ".lock"
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r == 0 {
return err
}
return nil
}
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
if r == 0 {
return err
}
return nil
}
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
// Create a separate lock file on windows because a process
// cannot share an exclusive lock on the same file. This is
// needed during Tx.WriteTo().
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
if err != nil {
return err
}
db.lockfile = f
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var flag uint32 = flagLockFailImmediately
if exclusive {
flag |= flagLockExclusive
}
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path + lockExt)
return err
}
// mmap memory maps a DB's data file.
// Based on: https://github.com/edsrzf/mmap-go
func mmap(db *DB, sz int) error {
if !db.readOnly {
// Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err)
}
}
// Open a file mapping handle.
sizelo := uint32(sz >> 32)
sizehi := uint32(sz) & 0xffffffff
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
if h == 0 {
return os.NewSyscallError("CreateFileMapping", errno)
}
// Create the memory map.
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
if addr == 0 {
return os.NewSyscallError("MapViewOfFile", errno)
}
// Close mapping handle.
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
return os.NewSyscallError("CloseHandle", err)
}
// Convert to a byte array.
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
db.datasz = sz
return nil
}
// munmap unmaps a pointer from a file.
// Based on: https://github.com/edsrzf/mmap-go
func munmap(db *DB) error {
if db.data == nil {
return nil
}
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
if err := syscall.UnmapViewOfFile(addr); err != nil {
return os.NewSyscallError("UnmapViewOfFile", err)
}
return nil
}

View File

@ -1,8 +0,0 @@
// +build !windows,!plan9,!linux,!openbsd
package bolt
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}

View File

@ -1,777 +0,0 @@
package bolt
import (
"bytes"
"fmt"
"unsafe"
)
const (
// MaxKeySize is the maximum length of a key, in bytes.
MaxKeySize = 32768
// MaxValueSize is the maximum length of a value, in bytes.
MaxValueSize = (1 << 31) - 2
)
const (
maxUint = ^uint(0)
minUint = 0
maxInt = int(^uint(0) >> 1)
minInt = -maxInt - 1
)
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
const (
minFillPercent = 0.1
maxFillPercent = 1.0
)
// DefaultFillPercent is the percentage that split pages are filled.
// This value can be changed by setting Bucket.FillPercent.
const DefaultFillPercent = 0.5
// Bucket represents a collection of key/value pairs inside the database.
type Bucket struct {
*bucket
tx *Tx // the associated transaction
buckets map[string]*Bucket // subbucket cache
page *page // inline page reference
rootNode *node // materialized node for the root page.
nodes map[pgid]*node // node cache
// Sets the threshold for filling nodes when they split. By default,
// the bucket will fill to 50% but it can be useful to increase this
// amount if you know that your write workloads are mostly append-only.
//
// This is non-persisted across transactions so it must be set in every Tx.
FillPercent float64
}
// bucket represents the on-file representation of a bucket.
// This is stored as the "value" of a bucket key. If the bucket is small enough,
// then its root page can be stored inline in the "value", after the bucket
// header. In the case of inline buckets, the "root" will be 0.
type bucket struct {
root pgid // page id of the bucket's root-level page
sequence uint64 // monotonically incrementing, used by NextSequence()
}
// newBucket returns a new bucket associated with a transaction.
func newBucket(tx *Tx) Bucket {
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
if tx.writable {
b.buckets = make(map[string]*Bucket)
b.nodes = make(map[pgid]*node)
}
return b
}
// Tx returns the tx of the bucket.
func (b *Bucket) Tx() *Tx {
return b.tx
}
// Root returns the root of the bucket.
func (b *Bucket) Root() pgid {
return b.root
}
// Writable returns whether the bucket is writable.
func (b *Bucket) Writable() bool {
return b.tx.writable
}
// Cursor creates a cursor associated with the bucket.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (b *Bucket) Cursor() *Cursor {
// Update transaction statistics.
b.tx.stats.CursorCount++
// Allocate and return a cursor.
return &Cursor{
bucket: b,
stack: make([]elemRef, 0),
}
}
// Bucket retrieves a nested bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) Bucket(name []byte) *Bucket {
if b.buckets != nil {
if child := b.buckets[string(name)]; child != nil {
return child
}
}
// Move cursor to key.
c := b.Cursor()
k, v, flags := c.seek(name)
// Return nil if the key doesn't exist or it is not a bucket.
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
return nil
}
// Otherwise create a bucket and cache it.
var child = b.openBucket(v)
if b.buckets != nil {
b.buckets[string(name)] = child
}
return child
}
// Helper method that re-interprets a sub-bucket value
// from a parent into a Bucket
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
}
// Save a reference to the inline page if the bucket is inline.
if child.root == 0 {
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
}
return &child
}
// CreateBucket creates a new bucket at the given key and returns the new bucket.
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if b.tx.db == nil {
return nil, ErrTxClosed
} else if !b.tx.writable {
return nil, ErrTxNotWritable
} else if len(key) == 0 {
return nil, ErrBucketNameRequired
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key.
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
var bucket = Bucket{
bucket: &bucket{},
rootNode: &node{isLeaf: true},
FillPercent: DefaultFillPercent,
}
var value = bucket.write()
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, bucketLeafFlag)
// Since subbuckets are not allowed on inline buckets, we need to
// dereference the inline page, if it exists. This will cause the bucket
// to be treated as a regular, non-inline bucket for the rest of the tx.
b.page = nil
return b.Bucket(key), nil
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
child, err := b.CreateBucket(key)
if err == ErrBucketExists {
return b.Bucket(key), nil
} else if err != nil {
return nil, err
}
return child, nil
}
// DeleteBucket deletes a bucket at the given key.
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
func (b *Bucket) DeleteBucket(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if bucket doesn't exist or is not a bucket.
if !bytes.Equal(key, k) {
return ErrBucketNotFound
} else if (flags & bucketLeafFlag) == 0 {
return ErrIncompatibleValue
}
// Recursively delete all child buckets.
child := b.Bucket(key)
err := child.ForEach(func(k, v []byte) error {
if v == nil {
if err := child.DeleteBucket(k); err != nil {
return fmt.Errorf("delete bucket: %s", err)
}
}
return nil
})
if err != nil {
return err
}
// Remove cached copy.
delete(b.buckets, string(key))
// Release all bucket pages to freelist.
child.nodes = nil
child.rootNode = nil
child.free()
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Get retrieves the value for a key in the bucket.
// Returns a nil value if the key does not exist or if the key is a nested bucket.
// The returned value is only valid for the life of the transaction.
func (b *Bucket) Get(key []byte) []byte {
k, v, flags := b.Cursor().seek(key)
// Return nil if this is a bucket.
if (flags & bucketLeafFlag) != 0 {
return nil
}
// If our target node isn't the same key as what's passed in then return nil.
if !bytes.Equal(key, k) {
return nil
}
return v
}
// Put sets the value for a key in the bucket.
// If the key exist then its previous value will be overwritten.
// Supplied value must remain valid for the life of the transaction.
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
func (b *Bucket) Put(key []byte, value []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
} else if len(key) == 0 {
return ErrKeyRequired
} else if len(key) > MaxKeySize {
return ErrKeyTooLarge
} else if int64(len(value)) > MaxValueSize {
return ErrValueTooLarge
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key with a bucket value.
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, 0)
return nil
}
// Delete removes a key from the bucket.
// If the key does not exist then nothing is done and a nil error is returned.
// Returns an error if the bucket was created from a read-only transaction.
func (b *Bucket) Delete(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
_, _, flags := c.seek(key)
// Return an error if there is already existing bucket value.
if (flags & bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {
return 0, ErrTxClosed
} else if !b.Writable() {
return 0, ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence++
return b.bucket.sequence, nil
}
// ForEach executes a function for each key/value pair in a bucket.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior.
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed
}
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if err := fn(k, v); err != nil {
return err
}
}
return nil
}
// Stat returns stats on a bucket.
func (b *Bucket) Stats() BucketStats {
var s, subStats BucketStats
pageSize := b.tx.db.pageSize
s.BucketN += 1
if b.root == 0 {
s.InlineBucketN += 1
}
b.forEachPage(func(p *page, depth int) {
if (p.flags & leafPageFlag) != 0 {
s.KeyN += int(p.count)
// used totals the used bytes for the page
used := pageHeaderSize
if p.count != 0 {
// If page has any elements, add all element headers.
used += leafPageElementSize * int(p.count-1)
// Add all element key, value sizes.
// The computation takes advantage of the fact that the position
// of the last element's key/value equals to the total of the sizes
// of all previous elements' keys and values.
// It also includes the last element's header.
lastElement := p.leafPageElement(p.count - 1)
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
}
if b.root == 0 {
// For inlined bucket just update the inline stats
s.InlineBucketInuse += used
} else {
// For non-inlined bucket update all the leaf stats
s.LeafPageN++
s.LeafInuse += used
s.LeafOverflowN += int(p.overflow)
// Collect stats from sub-buckets.
// Do that by iterating over all element headers
// looking for the ones with the bucketLeafFlag.
for i := uint16(0); i < p.count; i++ {
e := p.leafPageElement(i)
if (e.flags & bucketLeafFlag) != 0 {
// For any bucket element, open the element value
// and recursively call Stats on the contained bucket.
subStats.Add(b.openBucket(e.value()).Stats())
}
}
}
} else if (p.flags & branchPageFlag) != 0 {
s.BranchPageN++
lastElement := p.branchPageElement(p.count - 1)
// used totals the used bytes for the page
// Add header and all element headers.
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
// Add size of all keys and values.
// Again, use the fact that last element's position equals to
// the total of key, value sizes of all previous elements.
used += int(lastElement.pos + lastElement.ksize)
s.BranchInuse += used
s.BranchOverflowN += int(p.overflow)
}
// Keep track of maximum page depth.
if depth+1 > s.Depth {
s.Depth = (depth + 1)
}
})
// Alloc stats can be computed from page counts and pageSize.
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
// Add the max depth of sub-buckets to get total nested depth.
s.Depth += subStats.Depth
// Add the stats for all sub-buckets
s.Add(subStats)
return s
}
// forEachPage iterates over every page in a bucket, including inline pages.
func (b *Bucket) forEachPage(fn func(*page, int)) {
// If we have an inline page then just use that.
if b.page != nil {
fn(b.page, 0)
return
}
// Otherwise traverse the page hierarchy.
b.tx.forEachPage(b.root, 0, fn)
}
// forEachPageNode iterates over every page (or node) in a bucket.
// This also includes inline pages.
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
// If we have an inline page or root node then just use that.
if b.page != nil {
fn(b.page, nil, 0)
return
}
b._forEachPageNode(b.root, 0, fn)
}
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgid)
// Execute function.
fn(p, n, depth)
// Recursively loop over children.
if p != nil {
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
b._forEachPageNode(elem.pgid, depth+1, fn)
}
}
} else {
if !n.isLeaf {
for _, inode := range n.inodes {
b._forEachPageNode(inode.pgid, depth+1, fn)
}
}
}
}
// spill writes all the nodes for this bucket to dirty pages.
func (b *Bucket) spill() error {
// Spill all child buckets first.
for name, child := range b.buckets {
// If the child bucket is small enough and it has no child buckets then
// write it inline into the parent bucket's page. Otherwise spill it
// like a normal bucket and make the parent value a pointer to the page.
var value []byte
if child.inlineable() {
child.free()
value = child.write()
} else {
if err := child.spill(); err != nil {
return err
}
// Update the child bucket header in this bucket.
value = make([]byte, unsafe.Sizeof(bucket{}))
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *child.bucket
}
// Skip writing the bucket if there are no materialized nodes.
if child.rootNode == nil {
continue
}
// Update parent node.
var c = b.Cursor()
k, _, flags := c.seek([]byte(name))
if !bytes.Equal([]byte(name), k) {
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
}
if flags&bucketLeafFlag == 0 {
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
}
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
}
// Ignore if there's not a materialized root node.
if b.rootNode == nil {
return nil
}
// Spill nodes.
if err := b.rootNode.spill(); err != nil {
return err
}
b.rootNode = b.rootNode.root()
// Update the root node for this bucket.
if b.rootNode.pgid >= b.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
}
b.root = b.rootNode.pgid
return nil
}
// inlineable returns true if a bucket is small enough to be written inline
// and if it contains no subbuckets. Otherwise returns false.
func (b *Bucket) inlineable() bool {
var n = b.rootNode
// Bucket must only contain a single leaf node.
if n == nil || !n.isLeaf {
return false
}
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
// our threshold for inline bucket size.
var size = pageHeaderSize
for _, inode := range n.inodes {
size += leafPageElementSize + len(inode.key) + len(inode.value)
if inode.flags&bucketLeafFlag != 0 {
return false
} else if size > b.maxInlineBucketSize() {
return false
}
}
return true
}
// Returns the maximum total size of a bucket to make it a candidate for inlining.
func (b *Bucket) maxInlineBucketSize() int {
return b.tx.db.pageSize / 4
}
// write allocates and writes a bucket to a byte slice.
func (b *Bucket) write() []byte {
// Allocate the appropriate size.
var n = b.rootNode
var value = make([]byte, bucketHeaderSize+n.size())
// Write a bucket header.
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *b.bucket
// Convert byte slice to a fake page and write the root node.
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
n.write(p)
return value
}
// rebalance attempts to balance all nodes.
func (b *Bucket) rebalance() {
for _, n := range b.nodes {
n.rebalance()
}
for _, child := range b.buckets {
child.rebalance()
}
}
// node creates a node from a page and associates it with a given parent.
func (b *Bucket) node(pgid pgid, parent *node) *node {
_assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created.
if n := b.nodes[pgid]; n != nil {
return n
}
// Otherwise create a node and cache it.
n := &node{bucket: b, parent: parent}
if parent == nil {
b.rootNode = n
} else {
parent.children = append(parent.children, n)
}
// Use the inline page if this is an inline bucket.
var p = b.page
if p == nil {
p = b.tx.page(pgid)
}
// Read the page into the node and cache it.
n.read(p)
b.nodes[pgid] = n
// Update statistics.
b.tx.stats.NodeCount++
return n
}
// free recursively frees all pages in the bucket.
func (b *Bucket) free() {
if b.root == 0 {
return
}
var tx = b.tx
b.forEachPageNode(func(p *page, n *node, _ int) {
if p != nil {
tx.db.freelist.free(tx.meta.txid, p)
} else {
n.free()
}
})
b.root = 0
}
// dereference removes all references to the old mmap.
func (b *Bucket) dereference() {
if b.rootNode != nil {
b.rootNode.root().dereference()
}
for _, child := range b.buckets {
child.dereference()
}
}
// pageNode returns the in-memory node, if it exists.
// Otherwise returns the underlying page.
func (b *Bucket) pageNode(id pgid) (*page, *node) {
// Inline buckets have a fake page embedded in their value so treat them
// differently. We'll return the rootNode (if available) or the fake page.
if b.root == 0 {
if id != 0 {
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
}
if b.rootNode != nil {
return nil, b.rootNode
}
return b.page, nil
}
// Check the node cache for non-inline buckets.
if b.nodes != nil {
if n := b.nodes[id]; n != nil {
return nil, n
}
}
// Finally lookup the page from the transaction if no node is materialized.
return b.tx.page(id), nil
}
// BucketStats records statistics about resources used by a bucket.
type BucketStats struct {
// Page count statistics.
BranchPageN int // number of logical branch pages
BranchOverflowN int // number of physical branch overflow pages
LeafPageN int // number of logical leaf pages
LeafOverflowN int // number of physical leaf overflow pages
// Tree statistics.
KeyN int // number of keys/value pairs
Depth int // number of levels in B+tree
// Page size utilization.
BranchAlloc int // bytes allocated for physical branch pages
BranchInuse int // bytes actually used for branch data
LeafAlloc int // bytes allocated for physical leaf pages
LeafInuse int // bytes actually used for leaf data
// Bucket statistics
BucketN int // total number of buckets including the top bucket
InlineBucketN int // total number on inlined buckets
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
}
func (s *BucketStats) Add(other BucketStats) {
s.BranchPageN += other.BranchPageN
s.BranchOverflowN += other.BranchOverflowN
s.LeafPageN += other.LeafPageN
s.LeafOverflowN += other.LeafOverflowN
s.KeyN += other.KeyN
if s.Depth < other.Depth {
s.Depth = other.Depth
}
s.BranchAlloc += other.BranchAlloc
s.BranchInuse += other.BranchInuse
s.LeafAlloc += other.LeafAlloc
s.LeafInuse += other.LeafInuse
s.BucketN += other.BucketN
s.InlineBucketN += other.InlineBucketN
s.InlineBucketInuse += other.InlineBucketInuse
}
// cloneBytes returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
var clone = make([]byte, len(v))
copy(clone, v)
return clone
}

Some files were not shown because too many files have changed in this diff Show More