🚧 Created WIP hub

This commit is contained in:
Maxim Lebedev 2023-03-12 13:33:08 +06:00
parent d47349d50e
commit ff0168205c
Signed by: toby3d
GPG Key ID: 1F14E25B7C119FC5
1560 changed files with 4193112 additions and 1 deletions

67
catalog_gen.go Normal file
View File

@ -0,0 +1,67 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package main
import (
"golang.org/x/text/language"
"golang.org/x/text/message"
"golang.org/x/text/message/catalog"
)
type dictionary struct {
index []uint32
data string
}
func (d *dictionary) Lookup(key string) (data string, ok bool) {
p, ok := messageKeyToIndex[key]
if !ok {
return "", false
}
start, end := d.index[p], d.index[p+1]
if start == end {
return "", false
}
return d.data[start:end], true
}
func init() {
dict := map[string]catalog.Dictionary{
"en": &dictionary{index: enIndex, data: enData},
"ru": &dictionary{index: ruIndex, data: ruData},
}
fallback := language.MustParse("en")
cat, err := catalog.NewFromMap(dict, catalog.Fallback(fallback))
if err != nil {
panic(err)
}
message.DefaultCatalog = cat
}
var messageKeyToIndex = map[string]int{
"%d subscribers": 4,
"%s logo": 0,
"Dead simple WebSub hub": 1,
"How to publish and consume?": 2,
"What the spec?": 3,
}
var enIndex = []uint32{ // 6 elements
0x00000000, 0x0000000b, 0x00000022, 0x0000003e,
0x0000004d, 0x0000005f,
} // Size: 48 bytes
const enData string = "" + // Size: 95 bytes
"\x02%[1]s logo\x02Dead simple WebSub hub\x02How to publish and consume?" +
"\x02What the spec?\x02%[1]d subscribers"
var ruIndex = []uint32{ // 6 elements
0x00000000, 0x00000015, 0x00000038, 0x0000006d,
0x00000083, 0x000000a0,
} // Size: 48 bytes
const ruData string = "" + // Size: 160 bytes
"\x02логотип %[1]s\x02Простейший хаб WebSub\x02Как публиковать и принимат" +
"ь?\x02В чём спека?\x02%[1]d подписчиков"
// Total table size 351 bytes (0KiB); checksum: D963980B

36
go.mod
View File

@ -1,3 +1,37 @@
module source.toby3d.me/toby3d/hub
go 1.18
go 1.20
require (
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/caarlos0/env/v6 v6.10.1
github.com/go-logfmt/logfmt v0.6.0
github.com/google/go-cmp v0.5.8
github.com/jmoiron/sqlx v1.3.5
github.com/valyala/quicktemplate v1.7.0
golang.org/x/text v0.3.7
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
modernc.org/sqlite v1.17.3
)
require (
github.com/google/uuid v1.3.0 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/lib/pq v1.10.6 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/mattn/go-sqlite3 v1.14.13 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
golang.org/x/tools v0.1.12 // indirect
lukechampine.com/uint128 v1.1.1 // indirect
modernc.org/cc/v3 v3.36.0 // indirect
modernc.org/ccgo/v3 v3.16.6 // indirect
modernc.org/libc v1.16.7 // indirect
modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.1.1 // indirect
modernc.org/opt v0.1.1 // indirect
modernc.org/strutil v1.1.1 // indirect
modernc.org/token v1.0.0 // indirect
)

119
go.sum Normal file
View File

@ -0,0 +1,119 @@
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/caarlos0/env/v6 v6.10.1 h1:t1mPSxNpei6M5yAeu1qtRdPAK29Nbcf/n3G7x+b3/II=
github.com/caarlos0/env/v6 v6.10.1/go.mod h1:hvp/ryKXKipEkcuYjs9mI4bBCg+UI0Yhgm5Zu0ddvwc=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs=
github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.13 h1:1tj15ngiFfcZzii7yd82foL+ks+ouQcj8j/TPq3fk1I=
github.com/mattn/go-sqlite3 v1.14.13/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM=
github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.0 h1:0kmRkTmqNidmu3c7BNDSdVHCxXCkWLmWmCIVX4LUboo=
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.16.6 h1:3l18poV+iUemQ98O3X5OMr97LOqlzis+ytivU4NqGhA=
modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
modernc.org/libc v1.16.7 h1:qzQtHhsZNpVPpeCu+aMIQldXeV1P0vRhSqCL0nOIJOA=
modernc.org/libc v1.16.7/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A=
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.17.3 h1:iE+coC5g17LtByDYDWKpR6m2Z9022YrSh3bumwOnIrI=
modernc.org/sqlite v1.17.3/go.mod h1:10hPVYar9C0kfXuTWGz8s0XtB8uAGymUy51ZzStYe3k=
modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs=
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
modernc.org/tcl v1.13.1 h1:npxzTwFTZYM8ghWicVIX1cRWzj7Nd8i6AqqX2p+IYao=
modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM=
modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=

32
internal/common/common.go Normal file
View File

@ -0,0 +1,32 @@
package common
const (
MIMEApplicationForm = "application/x-www-form-urlencoded"
MIMEApplicationFormCharsetUTF8 = MIMEApplicationForm + "; " + charsetUTF8
MIMETextHTML = "text/html"
MIMETextHTMLCharsetUTF8 = MIMETextHTML + "; " + charsetUTF8
charsetUTF8 = "charset=UTF-8"
)
const (
HeaderContentType = "Content-Type"
HeaderLink = "Link"
HeaderXHubSignature = "X-Hub-Signature"
HeaderAcceptLanguage = "Accept-Language"
)
const (
HubCallback = hub + ".callback"
HubChallenge = hub + ".challenge"
HubLeaseSeconds = hub + ".lease_seconds"
HubMode = hub + ".mode"
HubReason = hub + ".reason"
HubSecret = hub + ".secret"
HubTopic = hub + ".topic"
HubURL = hub + ".url"
hub = "hub"
)
const Und = "und"

View File

@ -0,0 +1,77 @@
package domain
import (
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"errors"
"fmt"
"hash"
"source.toby3d.me/toby3d/hub/internal/common"
)
type Algorithm struct {
algorithm string
}
var (
AlgorithmUnd = Algorithm{algorithm: ""} // "und"
AlgorithmSHA1 = Algorithm{algorithm: "sha1"} // "sha1"
AlgorithmSHA256 = Algorithm{algorithm: "sha256"} // "sha256"
AlgorithmSHA384 = Algorithm{algorithm: "sha384"} // "sha384"
AlgorithmSHA512 = Algorithm{algorithm: "sha512"} // "sha512"
)
var ErrSyntaxAlgorithm = errors.New("bad algorithm syntax")
var stringsAlgorithms = map[string]Algorithm{
AlgorithmSHA1.algorithm: AlgorithmSHA1,
AlgorithmSHA256.algorithm: AlgorithmSHA256,
AlgorithmSHA384.algorithm: AlgorithmSHA384,
AlgorithmSHA512.algorithm: AlgorithmSHA512,
}
func ParseAlgorithm(algorithm string) (Algorithm, error) {
if alg, ok := stringsAlgorithms[algorithm]; ok {
return alg, nil
}
return AlgorithmUnd, fmt.Errorf("%w: %s", ErrSyntaxAlgorithm, algorithm)
}
func (a Algorithm) Hash() hash.Hash {
switch a {
default:
return nil
case AlgorithmSHA1:
return sha1.New()
case AlgorithmSHA256:
return sha256.New()
case AlgorithmSHA384:
return sha512.New384()
case AlgorithmSHA512:
return sha512.New()
}
}
func (a *Algorithm) UnmarshalForm(src []byte) error {
var err error
if *a, err = ParseAlgorithm(string(src)); err != nil {
return fmt.Errorf("Algorithm: %w", err)
}
return nil
}
func (a Algorithm) String() string {
if a.algorithm != "" {
return a.algorithm
}
return common.Und
}
func (a Algorithm) GoString() string {
return "domain.Algorithm(" + a.String() + ")"
}

View File

@ -0,0 +1,37 @@
package domain
import (
"fmt"
"net/url"
"source.toby3d.me/toby3d/hub/internal/common"
)
// Callback describes the URL at which a subscriber wishes to receive content
// distribution requests.
type Callback struct {
callback *url.URL
}
func ParseCallback(str string) (*Callback, error) {
u, err := url.Parse(str)
if err != nil {
return nil, fmt.Errorf("cannot parse string as callback URL: %w", err)
}
return &Callback{callback: u}, nil
}
func (c Callback) AddQuery(q url.Values) {
q.Add(common.HubCallback, c.callback.String())
}
func (c Callback) URL() *url.URL {
u, _ := url.Parse(c.callback.String())
return u
}
func (c Callback) String() string {
return c.callback.String()
}

View File

@ -0,0 +1,36 @@
package domain
import (
"bytes"
"crypto/rand"
"encoding/base64"
"fmt"
"net/url"
"source.toby3d.me/toby3d/hub/internal/common"
)
type Challenge struct {
challenge []byte
}
func NewChallenge(length uint8) (*Challenge, error) {
src := make([]byte, length)
if _, err := rand.Read(src); err != nil {
return nil, fmt.Errorf("cannot create a new challenge: %w", err)
}
return &Challenge{challenge: []byte(base64.URLEncoding.EncodeToString(src))}, nil
}
func (c Challenge) AddQuery(q url.Values) {
q.Add(common.HubChallenge, string(c.challenge))
}
func (c Challenge) Equal(target []byte) bool {
return bytes.Equal(c.challenge, target)
}
func (c Challenge) String() string {
return string(c.challenge)
}

26
internal/domain/config.go Normal file
View File

@ -0,0 +1,26 @@
package domain
import (
"net/url"
"testing"
)
type Config struct {
BaseURL *url.URL `env:"BASE_URL" envDefault:"http://localhost:3000/"`
Bind string `end:"BIND" envDefault:":3000"`
Name string `env:"NAME" envDefault:"WebSub"`
}
func TestConfig(tb testing.TB) *Config {
tb.Helper()
return &Config{
BaseURL: &url.URL{
Scheme: "https",
Host: "hub.example.com",
Path: "/",
},
Bind: ":3000",
Name: "WebSub",
}
}

55
internal/domain/error.go Normal file
View File

@ -0,0 +1,55 @@
package domain
import (
"fmt"
"net/url"
"golang.org/x/xerrors"
)
type Error struct {
frame xerrors.Frame
topic *url.URL
reason string
}
func NewError(reason string, topic ...*url.URL) error {
err := &Error{
reason: reason,
frame: xerrors.Caller(1),
}
if len(topic) > 0 {
err.topic = topic[0]
}
return err
}
// Error returns a string representation of the error, satisfying the error
// interface.
func (e Error) Error() string {
return fmt.Sprint(e)
}
// Format prints the stack as error detail.
func (e Error) Format(state fmt.State, r rune) {
xerrors.FormatError(e, state, r)
}
// FormatError prints the receiver's error, if any.
func (e Error) FormatError(printer xerrors.Printer) error {
printer.Print(e.reason)
if e.topic != nil {
printer.Printf(" (%s)", e.topic)
}
if !printer.Detail() {
return nil
}
e.frame.Format(printer)
return nil
}

View File

@ -0,0 +1,34 @@
package domain
import (
"net/url"
"strconv"
"source.toby3d.me/toby3d/hub/internal/common"
)
// LeaseSeconds describes a number of seconds for which the subscriber would
// like to have the subscription active, given as a positive decimal integer.
// Hubs MAY choose to respect this value or not, depending on their own
// policies, and MAY set a default value if the subscriber omits the parameter.
// This parameter MAY be present for unsubscription requests and MUST be ignored
// by the hub in that case.
type LeaseSeconds struct {
leaseSeconds uint
}
func NewLeaseSeconds(raw uint) LeaseSeconds {
return LeaseSeconds{leaseSeconds: raw}
}
func (ls LeaseSeconds) AddQuery(q url.Values) {
if ls.leaseSeconds == 0 {
return
}
q.Add(common.HubLeaseSeconds, strconv.FormatUint(uint64(ls.leaseSeconds), 10))
}
func (ls LeaseSeconds) IsZero() bool {
return ls.leaseSeconds == 0
}

63
internal/domain/mode.go Normal file
View File

@ -0,0 +1,63 @@
package domain
import (
"errors"
"fmt"
"net/url"
"source.toby3d.me/toby3d/hub/internal/common"
)
type Mode struct {
mode string
}
var (
ModeUnd Mode = Mode{mode: ""} // "und"
ModeDenied Mode = Mode{mode: "denied"} // "denied"
ModePublish Mode = Mode{mode: "publish"} // "publish"
ModeSubscribe Mode = Mode{mode: "subscribe"} // "subscribe"
ModeUnsubscribe Mode = Mode{mode: "unsubscribe"} // "unsubscribe"
)
var ErrModeSyntax = errors.New("bad mode syntax")
var stringsModes = map[string]Mode{
ModeDenied.mode: ModeDenied,
ModePublish.mode: ModePublish,
ModeSubscribe.mode: ModeSubscribe,
ModeUnsubscribe.mode: ModeUnsubscribe,
}
func ParseMode(mode string) (Mode, error) {
if mode, ok := stringsModes[mode]; ok {
return mode, nil
}
return ModeUnd, fmt.Errorf("%w: %s", ErrModeSyntax, mode)
}
func (m *Mode) UnmarshalForm(src []byte) error {
var err error
if *m, err = ParseMode(string(src)); err != nil {
return fmt.Errorf("Mode: %w", err)
}
return nil
}
func (m Mode) AddQuery(q url.Values) {
q.Add(common.HubMode, m.mode)
}
func (m Mode) String() string {
if m.mode != "" {
return m.mode
}
return common.Und
}
func (m Mode) GoString() string {
return "domain.Mode(" + m.String() + ")"
}

26
internal/domain/push.go Normal file
View File

@ -0,0 +1,26 @@
package domain
import (
"crypto/hmac"
"encoding/hex"
"hash"
"net/http"
"source.toby3d.me/toby3d/hub/internal/common"
)
type Push struct {
Self Topic
ContentType string
Content []byte
}
func (p Push) SetXHubSignatureHeader(req *http.Request, alg Algorithm, secret Secret) {
if alg == AlgorithmUnd || secret.secret == "" {
return
}
h := func() hash.Hash { return alg.Hash() }
req.Header.Set(common.HeaderXHubSignature, alg.algorithm+"="+hex.EncodeToString(hmac.New(h,
[]byte(secret.secret)).Sum(p.Content)))
}

View File

@ -0,0 +1,7 @@
package domain
import "net/url"
type QueryAdder interface {
AddQuery(q url.Values)
}

75
internal/domain/secret.go Normal file
View File

@ -0,0 +1,75 @@
package domain
import (
cryptorand "crypto/rand"
"encoding/base64"
"math/rand"
"net/url"
"testing"
"source.toby3d.me/toby3d/hub/internal/common"
)
// Secret describes a subscriber-provided cryptographically random unique secret
// string that will be used to compute an HMAC digest for authorized content
// distribution. If not supplied, the HMAC digest will not be present for
// content distribution requests. This parameter SHOULD only be specified when
// the request was made over HTTPS [RFC2818]. This parameter MUST be less than
// 200 bytes in length.
//
// [RFC2818]: https://tools.ietf.org/html/rfc2818
type Secret struct {
secret string
}
var ErrSyntaxSecret = NewError("secret MUST be less than 200 bytes in length")
var lengthMax = 200
func ParseSecret(raw string) (*Secret, error) {
if len(raw) >= lengthMax {
return nil, ErrSyntaxSecret
}
return &Secret{secret: raw}, nil
}
func (s Secret) IsSet() bool {
return s.secret != ""
}
func (s Secret) AddQuery(q url.Values) {
if s.secret == "" {
return
}
q.Add(common.HubSecret, s.secret)
}
func (s Secret) String() string {
return s.secret
}
// TestSecret returns a valid random generated Secret.
func TestSecret(tb testing.TB) *Secret {
tb.Helper()
src := make([]byte, rand.Intn(lengthMax/2))
if _, err := cryptorand.Read(src); err != nil {
tb.Fatal(err)
}
return &Secret{secret: base64.URLEncoding.EncodeToString(src)}
}
// TestSecret returns a invalid random generated Secret.
func TestSecretInvalid(tb testing.TB) *Secret {
tb.Helper()
src := make([]byte, lengthMax*2)
if _, err := cryptorand.Read(src); err != nil {
tb.Fatal(err)
}
return &Secret{secret: base64.URLEncoding.EncodeToString(src)}
}

View File

@ -0,0 +1,46 @@
package domain
import (
"math/rand"
"net/url"
"testing"
)
// Subscription is a unique relation to a topic by a subscriber that indicates
// it should receive updates for that topic.
type Subscription struct {
Topic Topic
Callback Callback
Secret Secret
LeaseSeconds LeaseSeconds
}
func (s Subscription) SUID() SUID {
return NewSSID(s.Topic, s.Callback)
}
func (s Subscription) AddQuery(q url.Values) {
for _, w := range []QueryAdder{s.Callback, s.Topic, s.LeaseSeconds, s.Secret} {
w.AddQuery(q)
}
}
func TestSubscription(tb testing.TB, callbackUrl string) *Subscription {
tb.Helper()
callback, err := ParseCallback(callbackUrl)
if err != nil {
tb.Fatal(err)
}
return &Subscription{
Topic: Topic{topic: &url.URL{
Scheme: "https",
Host: "example.com",
Path: "lipsum",
}},
Callback: *callback,
Secret: *TestSecret(tb),
LeaseSeconds: NewLeaseSeconds(uint(rand.Intn(60))),
}
}

29
internal/domain/suid.go Normal file
View File

@ -0,0 +1,29 @@
package domain
// SUID describes a subscription's unique key is the tuple ([Topic] URL,
// Subscriber [Callback] URL).
type SUID struct {
suid [2]string
}
func NewSSID(topic Topic, callback Callback) SUID {
return SUID{
suid: [2]string{topic.topic.String(), callback.callback.String()},
}
}
func (suid SUID) Equal(target SUID) bool {
for i := range suid.suid {
if suid.suid[i] == target.suid[i] {
continue
}
return false
}
return true
}
func (suid SUID) GoString() string {
return "domain.SUID(" + suid.suid[0] + ":" + suid.suid[1] + ")"
}

38
internal/domain/topic.go Normal file
View File

@ -0,0 +1,38 @@
package domain
import (
"fmt"
"net/url"
"source.toby3d.me/toby3d/hub/internal/common"
)
// Topic is a HTTP [RFC7230] (or HTTPS [RFC2818]) resource URL. The unit to
// which one can subscribe to changes.
//
// [RFC7230]: https://tools.ietf.org/html/rfc7230
// [RFC2818]: https://tools.ietf.org/html/rfc2818
type Topic struct {
topic *url.URL
}
func ParseTopic(str string) (*Topic, error) {
u, err := url.Parse(str)
if err != nil {
return nil, fmt.Errorf("cannot parse string as topic URL: %w", err)
}
return &Topic{topic: u}, nil
}
func (t Topic) AddQuery(q url.Values) {
q.Add(common.HubTopic, t.topic.String())
}
func (t Topic) Equal(target Topic) bool {
return t.topic.String() == target.topic.String()
}
func (t Topic) String() string {
return t.topic.String()
}

View File

@ -0,0 +1,240 @@
package http
import (
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
"golang.org/x/text/language"
"source.toby3d.me/toby3d/hub/internal/common"
"source.toby3d.me/toby3d/hub/internal/domain"
"source.toby3d.me/toby3d/hub/internal/hub"
"source.toby3d.me/toby3d/hub/internal/subscription"
"source.toby3d.me/toby3d/hub/web/template"
)
type (
Request struct {
Callback domain.Callback
Topic domain.Topic
Secret domain.Secret
Mode domain.Mode
LeaseSeconds domain.LeaseSeconds
}
Response struct {
Mode domain.Mode
Topic domain.Topic
Reason string
}
NewHandlerParams struct {
Hub hub.UseCase
Subscriptions subscription.UseCase
Matcher language.Matcher
Name string
}
Handler struct {
hub hub.UseCase
subscriptions subscription.UseCase
matcher language.Matcher
name string
}
)
var DefaultRequestLeaseSeconds = domain.NewLeaseSeconds(uint(time.Duration(time.Hour * 24 * 10).Seconds())) // 10 days
var (
ErrHubMode = errors.New(common.HubMode + " MUST be " + domain.ModeSubscribe.String() + " or " +
domain.ModeUnsubscribe.String())
ErrHubSecret = errors.New(common.HubSecret + " SHOULD be specified when the request was made over HTTPS")
)
func NewHandler(params NewHandlerParams) *Handler {
return &Handler{
hub: params.Hub,
matcher: params.Matcher,
name: params.Name,
subscriptions: params.Subscriptions,
}
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method {
default:
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
case http.MethodPost:
req := NewRequest()
if err := req.bind(r); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
s := new(domain.Subscription)
req.populate(s)
switch req.Mode {
case domain.ModeSubscribe:
h.hub.Subscribe(r.Context(), *s)
case domain.ModeUnsubscribe:
h.hub.Unsubscribe(r.Context(), *s)
case domain.ModePublish:
go h.hub.Publish(r.Context(), req.Topic)
}
w.WriteHeader(http.StatusAccepted)
case "", http.MethodGet:
tags, _, _ := language.ParseAcceptLanguage(r.Header.Get(common.HeaderAcceptLanguage))
tag, _, _ := h.matcher.Match(tags...)
baseOf := template.NewBaseOf(tag, h.name)
var page template.Page
if r.URL.Query().Has(common.HubTopic) {
topic, err := domain.ParseTopic(r.URL.Query().Get(common.HubTopic))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
subscriptions, err := h.subscriptions.Fetch(r.Context(), *topic)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
page = &template.Topic{
BaseOf: baseOf,
Subscribers: len(subscriptions),
}
} else {
page = &template.Home{BaseOf: baseOf}
}
w.Header().Set(common.HeaderContentType, common.MIMETextHTMLCharsetUTF8)
template.WriteTemplate(w, page)
}
}
func NewRequest() *Request {
return &Request{
Mode: domain.ModeUnd,
Callback: domain.Callback{},
Secret: domain.Secret{},
Topic: domain.Topic{},
LeaseSeconds: DefaultRequestLeaseSeconds,
}
}
func (r *Request) bind(req *http.Request) error {
var err error
if err = req.ParseForm(); err != nil {
return fmt.Errorf("cannot parse request form: %w", err)
}
if !req.PostForm.Has(common.HubMode) {
return fmt.Errorf("%s parameter is required, but not provided", common.HubMode)
}
// NOTE(toby3d): hub.mode
if r.Mode, err = domain.ParseMode(req.PostForm.Get(common.HubMode)); err != nil {
return fmt.Errorf("cannot parse %s: %w", common.HubMode, err)
}
switch r.Mode {
case domain.ModePublish:
if !req.PostForm.Has(common.HubURL) {
return fmt.Errorf("%s parameter for %s %s is required, but not provided", common.HubURL,
r.Mode, common.HubMode)
}
topic, err := domain.ParseTopic(req.PostForm.Get(common.HubURL))
if err != nil {
return fmt.Errorf("cannot parse %s: %w", common.HubTopic, err)
}
r.Topic = *topic
case domain.ModeSubscribe, domain.ModeUnsubscribe:
for _, k := range []string{common.HubTopic, common.HubCallback} {
if req.PostForm.Has(k) {
continue
}
return fmt.Errorf("%s parameter is required, but not provided", k)
}
// NOTE(toby3d): hub.topic
topic, err := domain.ParseTopic(req.PostForm.Get(common.HubTopic))
if err != nil {
return fmt.Errorf("cannot parse %s: %w", common.HubTopic, err)
}
r.Topic = *topic
// NOTE(toby3d): hub.callback
callback, err := domain.ParseCallback(req.PostForm.Get(common.HubCallback))
if err != nil {
return fmt.Errorf("cannot parse %s: %w", common.HubCallback, err)
}
r.Callback = *callback
// NOTE(toby3d): hub.lease_seconds
if r.Mode != domain.ModeUnsubscribe && req.PostForm.Has(common.HubLeaseSeconds) {
var ls uint64
if ls, err = strconv.ParseUint(req.PostForm.Get(common.HubLeaseSeconds), 10, 64); err != nil {
return fmt.Errorf("cannot parse %s: %w", common.HubLeaseSeconds, err)
}
if ls != 0 {
r.LeaseSeconds = domain.NewLeaseSeconds(uint(ls))
}
}
// NOTE(toby3d): hub.secret
if !req.PostForm.Has(common.HubSecret) {
if req.TLS != nil {
return ErrHubSecret
}
return nil
}
secret, err := domain.ParseSecret(req.PostForm.Get(common.HubSecret))
if err != nil {
return fmt.Errorf("cannot parse %s: %w", common.HubSecret, err)
}
r.Secret = *secret
}
return nil
}
func (r Request) populate(s *domain.Subscription) {
s.Callback = r.Callback
s.LeaseSeconds = r.LeaseSeconds
s.Secret = r.Secret
s.Topic = r.Topic
}
func NewResponse(t domain.Topic, err error) *Response {
return &Response{
Mode: domain.ModeDenied,
Topic: t,
Reason: err.Error(),
}
}
func (r *Response) populate(q url.Values) {
r.Mode.AddQuery(q)
r.Topic.AddQuery(q)
q.Add(common.HubReason, r.Reason)
}

View File

@ -0,0 +1,111 @@
package http_test
import (
"context"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"golang.org/x/text/language"
"source.toby3d.me/toby3d/hub/internal/common"
"source.toby3d.me/toby3d/hub/internal/domain"
delivery "source.toby3d.me/toby3d/hub/internal/hub/delivery/http"
ucase "source.toby3d.me/toby3d/hub/internal/hub/usecase"
"source.toby3d.me/toby3d/hub/internal/subscription"
subscriptionmemoryrepo "source.toby3d.me/toby3d/hub/internal/subscription/repository/memory"
)
func TestHandler_ServeHTTP_Subscribe(t *testing.T) {
t.Parallel()
srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, r.URL.Query().Get(common.HubChallenge))
}))
t.Cleanup(srv.Close)
in := domain.TestSubscription(t, srv.URL+"/lipsum")
subscriptions := subscriptionmemoryrepo.NewMemorySubscriptionRepository()
hub := ucase.NewHubUseCase(subscriptions, srv.Client(), &url.URL{Scheme: "https", Host: "hub.exmaple.com"})
payload := make(url.Values)
domain.ModeSubscribe.AddQuery(payload)
in.AddQuery(payload)
req := httptest.NewRequest(http.MethodPost, "https://hub.example.com/",
strings.NewReader(payload.Encode()))
req.Header.Set(common.HeaderContentType, common.MIMEApplicationFormCharsetUTF8)
w := httptest.NewRecorder()
delivery.NewHandler(delivery.NewHandlerParams{
Hub: hub,
Subscriptions: subscriptions,
Matcher: language.NewMatcher([]language.Tag{language.English}),
Name: "hub",
}).ServeHTTP(w, req)
resp := w.Result()
if expect := http.StatusAccepted; resp.StatusCode != expect {
t.Errorf("%s %s = %d, want %d", req.Method, req.RequestURI, resp.StatusCode, expect)
}
out, err := subscriptions.Get(context.Background(), in.SUID())
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(out, in, cmp.AllowUnexported(domain.Secret{}, domain.Callback{}, domain.Topic{},
domain.LeaseSeconds{})); diff != "" {
t.Error(diff)
}
}
func TestHandler_ServeHTTP_Unsubscribe(t *testing.T) {
t.Parallel()
srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, r.URL.Query().Get(common.HubChallenge))
}))
t.Cleanup(srv.Close)
in := domain.TestSubscription(t, srv.URL+"/lipsum")
subscriptions := subscriptionmemoryrepo.NewMemorySubscriptionRepository()
if err := subscriptions.Create(context.Background(), in.SUID(), *in); err != nil {
t.Fatal(err)
}
hub := ucase.NewHubUseCase(subscriptions, srv.Client(), &url.URL{Scheme: "https", Host: "hub.exmaple.com"})
payload := make(url.Values)
domain.ModeUnsubscribe.AddQuery(payload)
in.AddQuery(payload)
req := httptest.NewRequest(http.MethodPost, "https://hub.example.com/",
strings.NewReader(payload.Encode()))
req.Header.Set(common.HeaderContentType, common.MIMEApplicationFormCharsetUTF8)
w := httptest.NewRecorder()
delivery.NewHandler(delivery.NewHandlerParams{
Hub: hub,
Subscriptions: subscriptions,
Matcher: language.NewMatcher([]language.Tag{language.English}),
Name: "hub",
}).ServeHTTP(w, req)
resp := w.Result()
if expect := http.StatusAccepted; resp.StatusCode != expect {
t.Errorf("%s %s = %d, want %d", req.Method, req.RequestURI, resp.StatusCode, expect)
}
if _, err := subscriptions.Get(context.Background(), in.SUID()); !errors.Is(err, subscription.ErrNotExist) {
t.Errorf("want %s, got %s", subscription.ErrNotExist, err)
}
}

20
internal/hub/usecase.go Normal file
View File

@ -0,0 +1,20 @@
package hub
import (
"context"
"errors"
"source.toby3d.me/toby3d/hub/internal/domain"
)
type UseCase interface {
Subscribe(ctx context.Context, subscription domain.Subscription) (bool, error)
Unsubscribe(ctx context.Context, subscription domain.Subscription) (bool, error)
Publish(ctx context.Context, t domain.Topic) error
}
var (
ErrStatus = errors.New("subscriber replied with a non 2xx status")
ErrNotFound = errors.New("subscriber denied verification, responding with a 404 status")
ErrChallenge = errors.New("the challenge of the hub and the subscriber do not match")
)

View File

@ -0,0 +1,194 @@
package usecase
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"source.toby3d.me/toby3d/hub/internal/common"
"source.toby3d.me/toby3d/hub/internal/domain"
"source.toby3d.me/toby3d/hub/internal/hub"
"source.toby3d.me/toby3d/hub/internal/subscription"
)
type hubUseCase struct {
subscriptions subscription.Repository
client *http.Client
self *url.URL
}
const (
lengthMin = 16
lengthMax = 32
)
func NewHubUseCase(subscriptions subscription.Repository, client *http.Client, self *url.URL) hub.UseCase {
return &hubUseCase{
subscriptions: subscriptions,
client: client,
self: self,
}
}
func (ucase *hubUseCase) Verify(ctx context.Context, s domain.Subscription, mode domain.Mode) (bool, error) {
challenge, err := domain.NewChallenge(uint8(lengthMin + rand.Intn(lengthMax-lengthMin)))
if err != nil {
return false, fmt.Errorf("cannot generate hub.challenge: %w", err)
}
u := s.Callback.URL()
q := u.Query()
for _, w := range []domain.QueryAdder{mode, s.Topic, challenge} {
w.AddQuery(q)
}
if mode == domain.ModeSubscribe {
s.LeaseSeconds.AddQuery(q)
}
u.RawQuery = q.Encode()
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return false, fmt.Errorf("cannot build verification request: %w", err)
}
resp, err := ucase.client.Do(req)
if err != nil {
return false, fmt.Errorf("cannot send verification request: %w", err)
}
if resp.StatusCode == http.StatusNotFound {
return false, hub.ErrNotFound
}
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
return false, hub.ErrStatus
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("cannot verify subscriber response body: %w", err)
}
if !challenge.Equal(body) {
return false, fmt.Errorf("%w: got '%s', want '%s'", hub.ErrChallenge, body, *challenge)
}
return true, nil
}
func (ucase *hubUseCase) Subscribe(ctx context.Context, s domain.Subscription) (bool, error) {
var err error
if _, err = ucase.Verify(ctx, s, domain.ModeSubscribe); err != nil {
return false, fmt.Errorf("cannot validate subscription request: %w", err)
}
suid := s.SUID()
if _, err = ucase.subscriptions.Get(ctx, suid); err != nil {
if !errors.Is(err, subscription.ErrNotExist) {
return false, fmt.Errorf("cannot check exists subscriptions: %w", err)
}
if err = ucase.subscriptions.Create(ctx, suid, s); err != nil {
return false, fmt.Errorf("cannot create a new subscription: %w", err)
}
return true, nil
}
if err = ucase.subscriptions.Update(ctx, suid, func(buf *domain.Subscription) (*domain.Subscription, error) {
buf.LeaseSeconds = s.LeaseSeconds
buf.Secret = s.Secret
return buf, nil
}); err != nil {
return false, fmt.Errorf("cannot update subscription: %w", err)
}
return false, nil
}
func (ucase *hubUseCase) Unsubscribe(ctx context.Context, s domain.Subscription) (bool, error) {
var err error
if _, err = ucase.Verify(ctx, s, domain.ModeUnsubscribe); err != nil {
return false, fmt.Errorf("cannot validate unsubscription request: %w", err)
}
if err = ucase.subscriptions.Delete(ctx, s.SUID()); err != nil {
return false, fmt.Errorf("cannot remove subscription: %w", err)
}
return true, nil
}
func (ucase *hubUseCase) Publish(ctx context.Context, t domain.Topic) error {
resp, err := ucase.client.Get(t.String())
if err != nil {
return fmt.Errorf("cannot fetch topic payload for publishing: %w", err)
}
push := domain.Push{ContentType: resp.Header.Get(common.HeaderContentType)}
canonicalTopic, err := domain.ParseTopic(resp.Request.URL.String())
if err != nil {
return fmt.Errorf("cannot parse canonical topic URL: %w", err)
}
push.Self = *canonicalTopic
if push.Content, err = io.ReadAll(resp.Body); err != nil {
return fmt.Errorf("cannot read topic body: %w", err)
}
subscriptions, err := ucase.subscriptions.Fetch(ctx, t)
if err != nil {
return fmt.Errorf("cannot fetch subscriptions for topic: %w", err)
}
for i := range subscriptions {
ucase.Push(ctx, push, subscriptions[i])
}
return nil
}
func (ucase *hubUseCase) Push(ctx context.Context, p domain.Push, s domain.Subscription) (bool, error) {
req, err := http.NewRequest(http.MethodPost, s.Callback.String(), bytes.NewReader(p.Content))
if err != nil {
return false, fmt.Errorf("cannot build push request: %w", err)
}
req.Header.Set(common.HeaderContentType, p.ContentType)
req.Header.Set(common.HeaderLink, `<`+ucase.self.String()+`>; rel="hub", <`+p.Self.String()+`>; rel="self"`)
p.SetXHubSignatureHeader(req, domain.AlgorithmSHA512, s.Secret)
resp, err := ucase.client.Do(req)
if err != nil {
return false, fmt.Errorf("cannot push: %w", err)
}
// The subscriber's callback URL MAY return an HTTP 410 code to indicate that the subscription has been
// deleted, and the hub MAY terminate the subscription if it receives that code as a response.
if resp.StatusCode == http.StatusGone {
if err = ucase.subscriptions.Delete(ctx, s.SUID()); err != nil {
return false, fmt.Errorf("cannot remove deleted subscription: %w", err)
}
return true, nil
}
// The subscriber's callback URL MUST return an HTTP 2xx response code to indicate a success.
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
return false, hub.ErrStatus
}
return true, nil
}

View File

@ -0,0 +1,126 @@
package middleware
import (
"io"
"net/http"
"os"
"strings"
"sync/atomic"
"time"
"github.com/go-logfmt/logfmt"
)
type (
LogFmtConfig struct {
// Skipper defines a function to skip middleware.
Skipper Skipper
// Output is a writer where logs in JSON format are written.
// Optional. Default value os.Stdout.
Output io.Writer
}
logFmtResponse struct {
start time.Time
http.ResponseWriter
error error
id uint64
statusCode int
responseLength int
}
)
//nolint:gochecknoglobals // default configuration
var DefaultLogFmtConfig = LogFmtConfig{
Skipper: DefaultSkipper,
Output: os.Stdout,
}
//nolint:gochecknoglobals
var globalConnID uint64
func LogFmt() Interceptor {
c := DefaultLogFmtConfig
return LogFmtWithConfig(c)
}
func LogFmtWithConfig(config LogFmtConfig) Interceptor {
if config.Skipper == nil {
config.Skipper = DefaultLogFmtConfig.Skipper
}
if config.Output == nil {
config.Output = DefaultLogFmtConfig.Output
}
encoder := logfmt.NewEncoder(config.Output)
return func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
rw := &logFmtResponse{
id: nextConnID(),
responseLength: 0,
ResponseWriter: w,
start: time.Now().UTC(),
statusCode: 0,
}
next(rw, r)
end := time.Now().UTC()
encoder.EncodeKeyvals(
"bytes_in", r.ContentLength,
"bytes_out", rw.responseLength,
"error", rw.error,
"host", r.Host,
"id", rw.id,
"latency", end.Sub(rw.start).Nanoseconds(),
"latency_human", end.Sub(rw.start).String(),
"method", r.Method,
"path", r.URL.Path,
"protocol", r.Proto,
"referer", r.Referer(),
"remote_ip", r.RemoteAddr,
"status", rw.statusCode,
"time_rfc3339", rw.start.Format(time.RFC3339),
"time_rfc3339_nano", rw.start.Format(time.RFC3339Nano),
"time_unix", rw.start.Unix(),
"time_unix_nano", rw.start.UnixNano(),
"uri", r.RequestURI,
"user_agent", r.UserAgent(),
)
for name, src := range map[string]map[string][]string{
"form": r.PostForm,
"header": r.Header,
"query": r.URL.Query(),
} {
for k, v := range src {
encoder.EncodeKeyval(name+"_"+strings.ReplaceAll(strings.ToLower(k), "-", "_"), v)
}
}
encoder.EndRecord()
}
}
func (r *logFmtResponse) WriteHeader(status int) {
r.statusCode = status
r.ResponseWriter.WriteHeader(status)
}
func (r *logFmtResponse) Write(src []byte) (int, error) {
var l int
l, r.error = r.ResponseWriter.Write(src)
r.responseLength += l
return l, r.error
}
func nextConnID() uint64 {
return atomic.AddUint64(&globalConnID, 1)
}

View File

@ -0,0 +1,36 @@
package middleware
import (
"net/http"
)
type (
BeforeFunc = http.HandlerFunc
Chain []Interceptor
Interceptor func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc)
HandlerFunc http.HandlerFunc
Skipper func(r *http.Request) bool
)
var DefaultSkipper Skipper = func(_ *http.Request) bool { return false }
func (count HandlerFunc) Intercept(middleware Interceptor) HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
middleware(w, r, http.HandlerFunc(count))
}
}
func (chain Chain) Handler(handler http.HandlerFunc) http.Handler {
current := HandlerFunc(handler)
for i := len(chain) - 1; i >= 0; i-- {
m := chain[i]
current = current.Intercept(m)
}
return http.HandlerFunc(current)
}

View File

@ -0,0 +1,25 @@
package subscription
import (
"context"
"errors"
"source.toby3d.me/toby3d/hub/internal/domain"
)
type (
UpdateFunc func(subscription *domain.Subscription) (*domain.Subscription, error)
Repository interface {
Create(ctx context.Context, suid domain.SUID, subscription domain.Subscription) error
Get(ctx context.Context, suid domain.SUID) (*domain.Subscription, error)
Fetch(ctx context.Context, topic domain.Topic) ([]domain.Subscription, error)
Update(ctx context.Context, suid domain.SUID, update UpdateFunc) error
Delete(ctx context.Context, suid domain.SUID) error
}
)
var (
ErrNotExist = errors.New("subscription does not exist")
ErrExist = errors.New("subscription already exists")
)

View File

@ -0,0 +1,109 @@
package memory
import (
"context"
"errors"
"fmt"
"sync"
"source.toby3d.me/toby3d/hub/internal/domain"
"source.toby3d.me/toby3d/hub/internal/subscription"
)
type memorySubscriptionRepository struct {
mutex *sync.RWMutex
subscriptions map[domain.SUID]domain.Subscription
}
// Create implements subscription.Repository
func (repo *memorySubscriptionRepository) Create(ctx context.Context, suid domain.SUID, s domain.Subscription) error {
_, err := repo.Get(ctx, suid)
if err != nil {
if !errors.Is(err, subscription.ErrNotExist) {
return fmt.Errorf("cannot create subscription: %w", err)
}
} else {
return fmt.Errorf("cannot create subscription: %w", subscription.ErrExist)
}
repo.mutex.Lock()
defer repo.mutex.Unlock()
repo.subscriptions[suid] = s
return nil
}
// Delete implements subscription.Repository
func (repo *memorySubscriptionRepository) Delete(ctx context.Context, suid domain.SUID) error {
if _, err := repo.Get(ctx, suid); err != nil {
if !errors.Is(err, subscription.ErrNotExist) {
return fmt.Errorf("cannot delete subscription: %w", err)
}
return nil
}
repo.mutex.Lock()
defer repo.mutex.Unlock()
delete(repo.subscriptions, suid)
return nil
}
// Get implements subscription.Repository
func (repo *memorySubscriptionRepository) Get(_ context.Context, suid domain.SUID) (*domain.Subscription, error) {
repo.mutex.RLock()
defer repo.mutex.RUnlock()
if out, ok := repo.subscriptions[suid]; ok {
return &out, nil
}
return nil, subscription.ErrNotExist
}
func (repo *memorySubscriptionRepository) Fetch(ctx context.Context, t domain.Topic) ([]domain.Subscription, error) {
repo.mutex.RLock()
defer repo.mutex.RUnlock()
out := make([]domain.Subscription, 0)
for _, s := range repo.subscriptions {
if !s.Topic.Equal(t) {
continue
}
out = append(out, s)
}
return out, nil
}
// Update implements subscription.Repository
func (repo *memorySubscriptionRepository) Update(ctx context.Context, suid domain.SUID, update subscription.UpdateFunc) error {
in, err := repo.Get(ctx, suid)
if err != nil {
return fmt.Errorf("cannot update subscription: %w", err)
}
repo.mutex.Lock()
defer repo.mutex.Unlock()
out, err := update(in)
if err != nil {
return fmt.Errorf("cannot update subscription: %w", err)
}
repo.subscriptions[suid] = *out
return nil
}
func NewMemorySubscriptionRepository() subscription.Repository {
return &memorySubscriptionRepository{
mutex: new(sync.RWMutex),
subscriptions: make(map[domain.SUID]domain.Subscription),
}
}

View File

@ -0,0 +1,11 @@
package subscription
import (
"context"
"source.toby3d.me/toby3d/hub/internal/domain"
)
type UseCase interface {
Fetch(ctx context.Context, topic domain.Topic) ([]domain.Subscription, error)
}

View File

@ -0,0 +1,28 @@
package usecase
import (
"context"
"fmt"
"source.toby3d.me/toby3d/hub/internal/domain"
"source.toby3d.me/toby3d/hub/internal/subscription"
)
type subscriptionUseCase struct {
subscriptions subscription.Repository
}
func NewSubscriptionUseCase(subscriptions subscription.Repository) subscription.UseCase {
return &subscriptionUseCase{
subscriptions: subscriptions,
}
}
func (ucase *subscriptionUseCase) Fetch(ctx context.Context, topic domain.Topic) ([]domain.Subscription, error) {
out, err := ucase.subscriptions.Fetch(ctx, topic)
if err != nil {
return nil, fmt.Errorf("cannot fetch subscriptions for topic: %w", err)
}
return out, nil
}

View File

@ -0,0 +1,40 @@
package sqltest
import (
"database/sql/driver"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/jmoiron/sqlx"
_ "modernc.org/sqlite" // used for running tests without same import in "god object"
)
type Time struct{}
func (Time) Match(v driver.Value) bool {
_, ok := v.(time.Time)
return ok
}
// Open creates a new InMemory sqlite3 database for testing.
func Open(tb testing.TB) (*sqlx.DB, sqlmock.Sqlmock, func()) {
tb.Helper()
db, mock, err := sqlmock.New()
if err != nil {
tb.Fatal(err)
}
xdb := sqlx.NewDb(db, "sqlite")
if err = xdb.Ping(); err != nil {
_ = db.Close()
tb.Fatal(err)
}
return xdb, mock, func() {
_ = db.Close()
}
}

View File

@ -0,0 +1,22 @@
package urlutil
import (
"path"
"strings"
)
// ShiftPath splits off the first component of p, which will be cleaned of
// relative components before processing. head will never contain a slash and
// tail will always be a rooted path without trailing slash.
//
// See: https://blog.merovius.de/posts/2017-06-18-how-not-to-use-an-http-router/
func ShiftPath(p string) (head, tail string) {
p = path.Clean("/" + p)
i := strings.Index(p[1:], "/") + 1
if i <= 0 {
return p[1:], "/"
}
return p[1:i], p[i:]
}

View File

@ -0,0 +1,60 @@
{
"language": "en",
"messages": [
{
"id": "{Name} logo",
"message": "{Name} logo",
"translation": "{Name} logo",
"translatorComment": "Copied from source.",
"placeholders": [
{
"id": "Name",
"string": "%[1]s",
"type": "string",
"underlyingType": "string",
"argNum": 1,
"expr": "p.name"
}
],
"fuzzy": true
},
{
"id": "Dead simple WebSub hub",
"message": "Dead simple WebSub hub",
"translation": "Dead simple WebSub hub",
"translatorComment": "Copied from source.",
"fuzzy": true
},
{
"id": "How to publish and consume?",
"message": "How to publish and consume?",
"translation": "How to publish and consume?",
"translatorComment": "Copied from source.",
"fuzzy": true
},
{
"id": "What the spec?",
"message": "What the spec?",
"translation": "What the spec?",
"translatorComment": "Copied from source.",
"fuzzy": true
},
{
"id": "{Subscribers} subscribers",
"message": "{Subscribers} subscribers",
"translation": "{Subscribers} subscribers",
"translatorComment": "Copied from source.",
"placeholders": [
{
"id": "Subscribers",
"string": "%[1]d",
"type": "int",
"underlyingType": "int",
"argNum": 1,
"expr": "p.Subscribers"
}
],
"fuzzy": true
}
]
}

View File

@ -0,0 +1,50 @@
{
"language": "ru",
"messages": [
{
"id": "{Name} logo",
"message": "{Name} logo",
"translation": "логотип {Name}",
"placeholders": [
{
"id": "Name",
"string": "%[1]s",
"type": "string",
"underlyingType": "string",
"argNum": 1,
"expr": "p.name"
}
]
},
{
"id": "Dead simple WebSub hub",
"message": "Dead simple WebSub hub",
"translation": "Простейший хаб WebSub"
},
{
"id": "How to publish and consume?",
"message": "How to publish and consume?",
"translation": "Как публиковать и принимать?"
},
{
"id": "What the spec?",
"message": "What the spec?",
"translation": "В чём спека?"
},
{
"id": "{Subscribers} subscribers",
"message": "{Subscribers} subscribers",
"translation": "{Subscribers} подписчиков",
"placeholders": [
{
"id": "Subscribers",
"string": "%[1]d",
"type": "int",
"underlyingType": "int",
"argNum": 1,
"expr": "p.Subscribers"
}
]
}
]
}

View File

@ -0,0 +1,50 @@
{
"language": "ru",
"messages": [
{
"id": "{Name} logo",
"message": "{Name} logo",
"translation": "логотип {Name}",
"placeholders": [
{
"id": "Name",
"string": "%[1]s",
"type": "string",
"underlyingType": "string",
"argNum": 1,
"expr": "p.name"
}
]
},
{
"id": "Dead simple WebSub hub",
"message": "Dead simple WebSub hub",
"translation": "Простейший хаб WebSub"
},
{
"id": "How to publish and consume?",
"message": "How to publish and consume?",
"translation": "Как публиковать и принимать?"
},
{
"id": "What the spec?",
"message": "What the spec?",
"translation": "В чём спека?"
},
{
"id": "{Subscribers} subscribers",
"message": "{Subscribers} subscribers",
"translation": "{Subscribers} подписчиков",
"placeholders": [
{
"id": "Subscribers",
"string": "%[1]d",
"type": "int",
"underlyingType": "int",
"argNum": 1,
"expr": "p.Subscribers"
}
]
}
]
}

93
main.go Normal file
View File

@ -0,0 +1,93 @@
//go:generate go install github.com/valyala/quicktemplate/qtc@latest
//go:generate qtc -dir=web
//go:generate go install golang.org/x/text/cmd/gotext@latest
//go:generate gotext -srclang=en update -out=catalog_gen.go -lang=en,ru
package main
import (
"embed"
"io/fs"
"log"
"net/http"
"os"
"path/filepath"
"time"
"github.com/caarlos0/env/v6"
"golang.org/x/text/feature/plural"
"golang.org/x/text/language"
"golang.org/x/text/message"
"source.toby3d.me/toby3d/hub/internal/domain"
hubhttprelivery "source.toby3d.me/toby3d/hub/internal/hub/delivery/http"
hubucase "source.toby3d.me/toby3d/hub/internal/hub/usecase"
"source.toby3d.me/toby3d/hub/internal/middleware"
subscriptionmemoryrepo "source.toby3d.me/toby3d/hub/internal/subscription/repository/memory"
subscriptionucase "source.toby3d.me/toby3d/hub/internal/subscription/usecase"
"source.toby3d.me/toby3d/hub/internal/urlutil"
)
var logger = log.New(os.Stdout, "hub", log.LstdFlags)
//go:embed web/static/*
var static embed.FS
func init() {
message.Set(language.English, "%d subscribers",
plural.Selectf(1, "%d",
"one", "%d subscriber",
"other", "%d subscribers",
),
)
message.Set(language.Russian, "%d subscribers",
plural.Selectf(1, "%d",
"one", "%d подписчик",
"few", "%d подписчика",
"many", "%d подписчиков",
"other", "%d подписчика",
),
)
}
func main() {
config := new(domain.Config)
if err := env.Parse(config, env.Options{Prefix: "HUB_"}); err != nil {
logger.Fatalln(err)
}
static, err := fs.Sub(static, filepath.Join("web"))
if err != nil {
logger.Fatalln(err)
}
client := &http.Client{Timeout: 1 * time.Second}
matcher := language.NewMatcher(message.DefaultCatalog.Languages())
subscriptions := subscriptionmemoryrepo.NewMemorySubscriptionRepository()
hub := hubhttprelivery.NewHandler(hubhttprelivery.NewHandlerParams{
Hub: hubucase.NewHubUseCase(subscriptions, client, config.BaseURL),
Subscriptions: subscriptionucase.NewSubscriptionUseCase(subscriptions),
Matcher: matcher,
Name: config.Name,
})
server := &http.Server{
Addr: ":3000",
Handler: http.HandlerFunc(middleware.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
head, _ := urlutil.ShiftPath(r.URL.Path)
switch head {
case "":
hub.ServeHTTP(w, r)
case "static":
http.FileServer(http.FS(static)).ServeHTTP(w, r)
}
}).Intercept(middleware.LogFmt())),
ReadTimeout: 1 * time.Second,
WriteTimeout: 1 * time.Second,
ErrorLog: logger,
}
if err = server.ListenAndServe(); err != nil {
logger.Fatalln(err)
}
}

4
vendor/github.com/DATA-DOG/go-sqlmock/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
/examples/blog/blog
/examples/orders/orders
/examples/basic/basic
.idea/

26
vendor/github.com/DATA-DOG/go-sqlmock/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,26 @@
language: go
go_import_path: github.com/DATA-DOG/go-sqlmock
go:
- 1.2.x
- 1.3.x
- 1.4 # has no cover tool for latest releases
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
- 1.14.x
script:
- go vet
- test -z "$(go fmt ./...)" # fail if not formatted properly
- go test -race -coverprofile=coverage.txt -covermode=atomic
after_success:
- bash <(curl -s https://codecov.io/bash)

28
vendor/github.com/DATA-DOG/go-sqlmock/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
The three clause BSD license (http://en.wikipedia.org/wiki/BSD_licenses)
Copyright (c) 2013-2019, DATA-DOG team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name DataDog.lt may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

265
vendor/github.com/DATA-DOG/go-sqlmock/README.md generated vendored Normal file
View File

@ -0,0 +1,265 @@
[![Build Status](https://travis-ci.org/DATA-DOG/go-sqlmock.svg)](https://travis-ci.org/DATA-DOG/go-sqlmock)
[![GoDoc](https://godoc.org/github.com/DATA-DOG/go-sqlmock?status.svg)](https://godoc.org/github.com/DATA-DOG/go-sqlmock)
[![Go Report Card](https://goreportcard.com/badge/github.com/DATA-DOG/go-sqlmock)](https://goreportcard.com/report/github.com/DATA-DOG/go-sqlmock)
[![codecov.io](https://codecov.io/github/DATA-DOG/go-sqlmock/branch/master/graph/badge.svg)](https://codecov.io/github/DATA-DOG/go-sqlmock)
# Sql driver mock for Golang
**sqlmock** is a mock library implementing [sql/driver](https://godoc.org/database/sql/driver). Which has one and only
purpose - to simulate any **sql** driver behavior in tests, without needing a real database connection. It helps to
maintain correct **TDD** workflow.
- this library is now complete and stable. (you may not find new changes for this reason)
- supports concurrency and multiple connections.
- supports **go1.8** Context related feature mocking and Named sql parameters.
- does not require any modifications to your source code.
- the driver allows to mock any sql driver method behavior.
- has strict by default expectation order matching.
- has no third party dependencies.
**NOTE:** in **v1.2.0** **sqlmock.Rows** has changed to struct from interface, if you were using any type references to that
interface, you will need to switch it to a pointer struct type. Also, **sqlmock.Rows** were used to implement **driver.Rows**
interface, which was not required or useful for mocking and was removed. Hope it will not cause issues.
## Looking for maintainers
I do not have much spare time for this library and willing to transfer the repository ownership
to person or an organization motivated to maintain it. Open up a conversation if you are interested. See #230.
## Install
go get github.com/DATA-DOG/go-sqlmock
## Documentation and Examples
Visit [godoc](http://godoc.org/github.com/DATA-DOG/go-sqlmock) for general examples and public api reference.
See **.travis.yml** for supported **go** versions.
Different use case, is to functionally test with a real database - [go-txdb](https://github.com/DATA-DOG/go-txdb)
all database related actions are isolated within a single transaction so the database can remain in the same state.
See implementation examples:
- [blog API server](https://github.com/DATA-DOG/go-sqlmock/tree/master/examples/blog)
- [the same orders example](https://github.com/DATA-DOG/go-sqlmock/tree/master/examples/orders)
### Something you may want to test, assuming you use the [go-mysql-driver](https://github.com/go-sql-driver/mysql)
``` go
package main
import (
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
func recordStats(db *sql.DB, userID, productID int64) (err error) {
tx, err = db.Begin()
if err != nil {
return
}
defer func() {
switch err {
case nil:
err = tx.Commit()
default:
tx.Rollback()
}
}()
if _, err = tx.Exec("UPDATE products SET views = views + 1"); err != nil {
return
}
if _, err = tx.Exec("INSERT INTO product_viewers (user_id, product_id) VALUES (?, ?)", userID, productID); err != nil {
return
}
return
}
func main() {
// @NOTE: the real connection is not required for tests
db, err := sql.Open("mysql", "root@/blog")
if err != nil {
panic(err)
}
defer db.Close()
if err = recordStats(db, 1 /*some user id*/, 5 /*some product id*/); err != nil {
panic(err)
}
}
```
### Tests with sqlmock
``` go
package main
import (
"fmt"
"testing"
"github.com/DATA-DOG/go-sqlmock"
)
// a successful case
func TestShouldUpdateStats(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
mock.ExpectBegin()
mock.ExpectExec("UPDATE products").WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec("INSERT INTO product_viewers").WithArgs(2, 3).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
// now we execute our method
if err = recordStats(db, 2, 3); err != nil {
t.Errorf("error was not expected while updating stats: %s", err)
}
// we make sure that all expectations were met
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
// a failing test case
func TestShouldRollbackStatUpdatesOnFailure(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Fatalf("an error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
mock.ExpectBegin()
mock.ExpectExec("UPDATE products").WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec("INSERT INTO product_viewers").
WithArgs(2, 3).
WillReturnError(fmt.Errorf("some error"))
mock.ExpectRollback()
// now we execute our method
if err = recordStats(db, 2, 3); err == nil {
t.Errorf("was expecting an error, but there was none")
}
// we make sure that all expectations were met
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
```
## Customize SQL query matching
There were plenty of requests from users regarding SQL query string validation or different matching option.
We have now implemented the `QueryMatcher` interface, which can be passed through an option when calling
`sqlmock.New` or `sqlmock.NewWithDSN`.
This now allows to include some library, which would allow for example to parse and validate `mysql` SQL AST.
And create a custom QueryMatcher in order to validate SQL in sophisticated ways.
By default, **sqlmock** is preserving backward compatibility and default query matcher is `sqlmock.QueryMatcherRegexp`
which uses expected SQL string as a regular expression to match incoming query string. There is an equality matcher:
`QueryMatcherEqual` which will do a full case sensitive match.
In order to customize the QueryMatcher, use the following:
``` go
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
```
The query matcher can be fully customized based on user needs. **sqlmock** will not
provide a standard sql parsing matchers, since various drivers may not follow the same SQL standard.
## Matching arguments like time.Time
There may be arguments which are of `struct` type and cannot be compared easily by value like `time.Time`. In this case
**sqlmock** provides an [Argument](https://godoc.org/github.com/DATA-DOG/go-sqlmock#Argument) interface which
can be used in more sophisticated matching. Here is a simple example of time argument matching:
``` go
type AnyTime struct{}
// Match satisfies sqlmock.Argument interface
func (a AnyTime) Match(v driver.Value) bool {
_, ok := v.(time.Time)
return ok
}
func TestAnyTimeArgument(t *testing.T) {
t.Parallel()
db, mock, err := New()
if err != nil {
t.Errorf("an error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
mock.ExpectExec("INSERT INTO users").
WithArgs("john", AnyTime{}).
WillReturnResult(NewResult(1, 1))
_, err = db.Exec("INSERT INTO users(name, created_at) VALUES (?, ?)", "john", time.Now())
if err != nil {
t.Errorf("error '%s' was not expected, while inserting a row", err)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
}
```
It only asserts that argument is of `time.Time` type.
## Run tests
go test -race
## Change Log
- **2019-04-06** - added functionality to mock a sql MetaData request
- **2019-02-13** - added `go.mod` removed the references and suggestions using `gopkg.in`.
- **2018-12-11** - added expectation of Rows to be closed, while mocking expected query.
- **2018-12-11** - introduced an option to provide **QueryMatcher** in order to customize SQL query matching.
- **2017-09-01** - it is now possible to expect that prepared statement will be closed,
using **ExpectedPrepare.WillBeClosed**.
- **2017-02-09** - implemented support for **go1.8** features. **Rows** interface was changed to struct
but contains all methods as before and should maintain backwards compatibility. **ExpectedQuery.WillReturnRows** may now
accept multiple row sets.
- **2016-11-02** - `db.Prepare()` was not validating expected prepare SQL
query. It should still be validated even if Exec or Query is not
executed on that prepared statement.
- **2016-02-23** - added **sqlmock.AnyArg()** function to provide any kind
of argument matcher.
- **2016-02-23** - convert expected arguments to driver.Value as natural
driver does, the change may affect time.Time comparison and will be
stricter. See [issue](https://github.com/DATA-DOG/go-sqlmock/issues/31).
- **2015-08-27** - **v1** api change, concurrency support, all known issues fixed.
- **2014-08-16** instead of **panic** during reflect type mismatch when comparing query arguments - now return error
- **2014-08-14** added **sqlmock.NewErrorResult** which gives an option to return driver.Result with errors for
interface methods, see [issue](https://github.com/DATA-DOG/go-sqlmock/issues/5)
- **2014-05-29** allow to match arguments in more sophisticated ways, by providing an **sqlmock.Argument** interface
- **2014-04-21** introduce **sqlmock.New()** to open a mock database connection for tests. This method
calls sql.DB.Ping to ensure that connection is open, see [issue](https://github.com/DATA-DOG/go-sqlmock/issues/4).
This way on Close it will surely assert if all expectations are met, even if database was not triggered at all.
The old way is still available, but it is advisable to call db.Ping manually before asserting with db.Close.
- **2014-02-14** RowsFromCSVString is now a part of Rows interface named as FromCSVString.
It has changed to allow more ways to construct rows and to easily extend this API in future.
See [issue 1](https://github.com/DATA-DOG/go-sqlmock/issues/1)
**RowsFromCSVString** is deprecated and will be removed in future
## Contributions
Feel free to open a pull request. Note, if you wish to contribute an extension to public (exported methods or types) -
please open an issue before, to discuss whether these changes can be accepted. All backward incompatible changes are
and will be treated cautiously
## License
The [three clause BSD license](http://en.wikipedia.org/wiki/BSD_licenses)

24
vendor/github.com/DATA-DOG/go-sqlmock/argument.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package sqlmock
import "database/sql/driver"
// Argument interface allows to match
// any argument in specific way when used with
// ExpectedQuery and ExpectedExec expectations.
type Argument interface {
Match(driver.Value) bool
}
// AnyArg will return an Argument which can
// match any kind of arguments.
//
// Useful for time.Time or similar kinds of arguments.
func AnyArg() Argument {
return anyArgument{}
}
type anyArgument struct{}
func (a anyArgument) Match(_ driver.Value) bool {
return true
}

77
vendor/github.com/DATA-DOG/go-sqlmock/column.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package sqlmock
import "reflect"
// Column is a mocked column Metadata for rows.ColumnTypes()
type Column struct {
name string
dbType string
nullable bool
nullableOk bool
length int64
lengthOk bool
precision int64
scale int64
psOk bool
scanType reflect.Type
}
func (c *Column) Name() string {
return c.name
}
func (c *Column) DbType() string {
return c.dbType
}
func (c *Column) IsNullable() (bool, bool) {
return c.nullable, c.nullableOk
}
func (c *Column) Length() (int64, bool) {
return c.length, c.lengthOk
}
func (c *Column) PrecisionScale() (int64, int64, bool) {
return c.precision, c.scale, c.psOk
}
func (c *Column) ScanType() reflect.Type {
return c.scanType
}
// NewColumn returns a Column with specified name
func NewColumn(name string) *Column {
return &Column{
name: name,
}
}
// Nullable returns the column with nullable metadata set
func (c *Column) Nullable(nullable bool) *Column {
c.nullable = nullable
c.nullableOk = true
return c
}
// OfType returns the column with type metadata set
func (c *Column) OfType(dbType string, sampleValue interface{}) *Column {
c.dbType = dbType
c.scanType = reflect.TypeOf(sampleValue)
return c
}
// WithLength returns the column with length metadata set.
func (c *Column) WithLength(length int64) *Column {
c.length = length
c.lengthOk = true
return c
}
// WithPrecisionAndScale returns the column with precision and scale metadata set.
func (c *Column) WithPrecisionAndScale(precision, scale int64) *Column {
c.precision = precision
c.scale = scale
c.psOk = true
return c
}

81
vendor/github.com/DATA-DOG/go-sqlmock/driver.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
package sqlmock
import (
"database/sql"
"database/sql/driver"
"fmt"
"sync"
)
var pool *mockDriver
func init() {
pool = &mockDriver{
conns: make(map[string]*sqlmock),
}
sql.Register("sqlmock", pool)
}
type mockDriver struct {
sync.Mutex
counter int
conns map[string]*sqlmock
}
func (d *mockDriver) Open(dsn string) (driver.Conn, error) {
d.Lock()
defer d.Unlock()
c, ok := d.conns[dsn]
if !ok {
return c, fmt.Errorf("expected a connection to be available, but it is not")
}
c.opened++
return c, nil
}
// New creates sqlmock database connection and a mock to manage expectations.
// Accepts options, like ValueConverterOption, to use a ValueConverter from
// a specific driver.
// Pings db so that all expectations could be
// asserted.
func New(options ...func(*sqlmock) error) (*sql.DB, Sqlmock, error) {
pool.Lock()
dsn := fmt.Sprintf("sqlmock_db_%d", pool.counter)
pool.counter++
smock := &sqlmock{dsn: dsn, drv: pool, ordered: true}
pool.conns[dsn] = smock
pool.Unlock()
return smock.open(options)
}
// NewWithDSN creates sqlmock database connection with a specific DSN
// and a mock to manage expectations.
// Accepts options, like ValueConverterOption, to use a ValueConverter from
// a specific driver.
// Pings db so that all expectations could be asserted.
//
// This method is introduced because of sql abstraction
// libraries, which do not provide a way to initialize
// with sql.DB instance. For example GORM library.
//
// Note, it will error if attempted to create with an
// already used dsn
//
// It is not recommended to use this method, unless you
// really need it and there is no other way around.
func NewWithDSN(dsn string, options ...func(*sqlmock) error) (*sql.DB, Sqlmock, error) {
pool.Lock()
if _, ok := pool.conns[dsn]; ok {
pool.Unlock()
return nil, nil, fmt.Errorf("cannot create a new mock database with the same dsn: %s", dsn)
}
smock := &sqlmock{dsn: dsn, drv: pool, ordered: true}
pool.conns[dsn] = smock
pool.Unlock()
return smock.open(options)
}

369
vendor/github.com/DATA-DOG/go-sqlmock/expectations.go generated vendored Normal file
View File

@ -0,0 +1,369 @@
package sqlmock
import (
"database/sql/driver"
"fmt"
"strings"
"sync"
"time"
)
// an expectation interface
type expectation interface {
fulfilled() bool
Lock()
Unlock()
String() string
}
// common expectation struct
// satisfies the expectation interface
type commonExpectation struct {
sync.Mutex
triggered bool
err error
}
func (e *commonExpectation) fulfilled() bool {
return e.triggered
}
// ExpectedClose is used to manage *sql.DB.Close expectation
// returned by *Sqlmock.ExpectClose.
type ExpectedClose struct {
commonExpectation
}
// WillReturnError allows to set an error for *sql.DB.Close action
func (e *ExpectedClose) WillReturnError(err error) *ExpectedClose {
e.err = err
return e
}
// String returns string representation
func (e *ExpectedClose) String() string {
msg := "ExpectedClose => expecting database Close"
if e.err != nil {
msg += fmt.Sprintf(", which should return error: %s", e.err)
}
return msg
}
// ExpectedBegin is used to manage *sql.DB.Begin expectation
// returned by *Sqlmock.ExpectBegin.
type ExpectedBegin struct {
commonExpectation
delay time.Duration
}
// WillReturnError allows to set an error for *sql.DB.Begin action
func (e *ExpectedBegin) WillReturnError(err error) *ExpectedBegin {
e.err = err
return e
}
// String returns string representation
func (e *ExpectedBegin) String() string {
msg := "ExpectedBegin => expecting database transaction Begin"
if e.err != nil {
msg += fmt.Sprintf(", which should return error: %s", e.err)
}
return msg
}
// WillDelayFor allows to specify duration for which it will delay
// result. May be used together with Context
func (e *ExpectedBegin) WillDelayFor(duration time.Duration) *ExpectedBegin {
e.delay = duration
return e
}
// ExpectedCommit is used to manage *sql.Tx.Commit expectation
// returned by *Sqlmock.ExpectCommit.
type ExpectedCommit struct {
commonExpectation
}
// WillReturnError allows to set an error for *sql.Tx.Close action
func (e *ExpectedCommit) WillReturnError(err error) *ExpectedCommit {
e.err = err
return e
}
// String returns string representation
func (e *ExpectedCommit) String() string {
msg := "ExpectedCommit => expecting transaction Commit"
if e.err != nil {
msg += fmt.Sprintf(", which should return error: %s", e.err)
}
return msg
}
// ExpectedRollback is used to manage *sql.Tx.Rollback expectation
// returned by *Sqlmock.ExpectRollback.
type ExpectedRollback struct {
commonExpectation
}
// WillReturnError allows to set an error for *sql.Tx.Rollback action
func (e *ExpectedRollback) WillReturnError(err error) *ExpectedRollback {
e.err = err
return e
}
// String returns string representation
func (e *ExpectedRollback) String() string {
msg := "ExpectedRollback => expecting transaction Rollback"
if e.err != nil {
msg += fmt.Sprintf(", which should return error: %s", e.err)
}
return msg
}
// ExpectedQuery is used to manage *sql.DB.Query, *dql.DB.QueryRow, *sql.Tx.Query,
// *sql.Tx.QueryRow, *sql.Stmt.Query or *sql.Stmt.QueryRow expectations.
// Returned by *Sqlmock.ExpectQuery.
type ExpectedQuery struct {
queryBasedExpectation
rows driver.Rows
delay time.Duration
rowsMustBeClosed bool
rowsWereClosed bool
}
// WithArgs will match given expected args to actual database query arguments.
// if at least one argument does not match, it will return an error. For specific
// arguments an sqlmock.Argument interface can be used to match an argument.
func (e *ExpectedQuery) WithArgs(args ...driver.Value) *ExpectedQuery {
e.args = args
return e
}
// RowsWillBeClosed expects this query rows to be closed.
func (e *ExpectedQuery) RowsWillBeClosed() *ExpectedQuery {
e.rowsMustBeClosed = true
return e
}
// WillReturnError allows to set an error for expected database query
func (e *ExpectedQuery) WillReturnError(err error) *ExpectedQuery {
e.err = err
return e
}
// WillDelayFor allows to specify duration for which it will delay
// result. May be used together with Context
func (e *ExpectedQuery) WillDelayFor(duration time.Duration) *ExpectedQuery {
e.delay = duration
return e
}
// String returns string representation
func (e *ExpectedQuery) String() string {
msg := "ExpectedQuery => expecting Query, QueryContext or QueryRow which:"
msg += "\n - matches sql: '" + e.expectSQL + "'"
if len(e.args) == 0 {
msg += "\n - is without arguments"
} else {
msg += "\n - is with arguments:\n"
for i, arg := range e.args {
msg += fmt.Sprintf(" %d - %+v\n", i, arg)
}
msg = strings.TrimSpace(msg)
}
if e.rows != nil {
msg += fmt.Sprintf("\n - %s", e.rows)
}
if e.err != nil {
msg += fmt.Sprintf("\n - should return error: %s", e.err)
}
return msg
}
// ExpectedExec is used to manage *sql.DB.Exec, *sql.Tx.Exec or *sql.Stmt.Exec expectations.
// Returned by *Sqlmock.ExpectExec.
type ExpectedExec struct {
queryBasedExpectation
result driver.Result
delay time.Duration
}
// WithArgs will match given expected args to actual database exec operation arguments.
// if at least one argument does not match, it will return an error. For specific
// arguments an sqlmock.Argument interface can be used to match an argument.
func (e *ExpectedExec) WithArgs(args ...driver.Value) *ExpectedExec {
e.args = args
return e
}
// WillReturnError allows to set an error for expected database exec action
func (e *ExpectedExec) WillReturnError(err error) *ExpectedExec {
e.err = err
return e
}
// WillDelayFor allows to specify duration for which it will delay
// result. May be used together with Context
func (e *ExpectedExec) WillDelayFor(duration time.Duration) *ExpectedExec {
e.delay = duration
return e
}
// String returns string representation
func (e *ExpectedExec) String() string {
msg := "ExpectedExec => expecting Exec or ExecContext which:"
msg += "\n - matches sql: '" + e.expectSQL + "'"
if len(e.args) == 0 {
msg += "\n - is without arguments"
} else {
msg += "\n - is with arguments:\n"
var margs []string
for i, arg := range e.args {
margs = append(margs, fmt.Sprintf(" %d - %+v", i, arg))
}
msg += strings.Join(margs, "\n")
}
if e.result != nil {
res, _ := e.result.(*result)
msg += "\n - should return Result having:"
msg += fmt.Sprintf("\n LastInsertId: %d", res.insertID)
msg += fmt.Sprintf("\n RowsAffected: %d", res.rowsAffected)
if res.err != nil {
msg += fmt.Sprintf("\n Error: %s", res.err)
}
}
if e.err != nil {
msg += fmt.Sprintf("\n - should return error: %s", e.err)
}
return msg
}
// WillReturnResult arranges for an expected Exec() to return a particular
// result, there is sqlmock.NewResult(lastInsertID int64, affectedRows int64) method
// to build a corresponding result. Or if actions needs to be tested against errors
// sqlmock.NewErrorResult(err error) to return a given error.
func (e *ExpectedExec) WillReturnResult(result driver.Result) *ExpectedExec {
e.result = result
return e
}
// ExpectedPrepare is used to manage *sql.DB.Prepare or *sql.Tx.Prepare expectations.
// Returned by *Sqlmock.ExpectPrepare.
type ExpectedPrepare struct {
commonExpectation
mock *sqlmock
expectSQL string
statement driver.Stmt
closeErr error
mustBeClosed bool
wasClosed bool
delay time.Duration
}
// WillReturnError allows to set an error for the expected *sql.DB.Prepare or *sql.Tx.Prepare action.
func (e *ExpectedPrepare) WillReturnError(err error) *ExpectedPrepare {
e.err = err
return e
}
// WillReturnCloseError allows to set an error for this prepared statement Close action
func (e *ExpectedPrepare) WillReturnCloseError(err error) *ExpectedPrepare {
e.closeErr = err
return e
}
// WillDelayFor allows to specify duration for which it will delay
// result. May be used together with Context
func (e *ExpectedPrepare) WillDelayFor(duration time.Duration) *ExpectedPrepare {
e.delay = duration
return e
}
// WillBeClosed expects this prepared statement to
// be closed.
func (e *ExpectedPrepare) WillBeClosed() *ExpectedPrepare {
e.mustBeClosed = true
return e
}
// ExpectQuery allows to expect Query() or QueryRow() on this prepared statement.
// This method is convenient in order to prevent duplicating sql query string matching.
func (e *ExpectedPrepare) ExpectQuery() *ExpectedQuery {
eq := &ExpectedQuery{}
eq.expectSQL = e.expectSQL
eq.converter = e.mock.converter
e.mock.expected = append(e.mock.expected, eq)
return eq
}
// ExpectExec allows to expect Exec() on this prepared statement.
// This method is convenient in order to prevent duplicating sql query string matching.
func (e *ExpectedPrepare) ExpectExec() *ExpectedExec {
eq := &ExpectedExec{}
eq.expectSQL = e.expectSQL
eq.converter = e.mock.converter
e.mock.expected = append(e.mock.expected, eq)
return eq
}
// String returns string representation
func (e *ExpectedPrepare) String() string {
msg := "ExpectedPrepare => expecting Prepare statement which:"
msg += "\n - matches sql: '" + e.expectSQL + "'"
if e.err != nil {
msg += fmt.Sprintf("\n - should return error: %s", e.err)
}
if e.closeErr != nil {
msg += fmt.Sprintf("\n - should return error on Close: %s", e.closeErr)
}
return msg
}
// query based expectation
// adds a query matching logic
type queryBasedExpectation struct {
commonExpectation
expectSQL string
converter driver.ValueConverter
args []driver.Value
}
// ExpectedPing is used to manage *sql.DB.Ping expectations.
// Returned by *Sqlmock.ExpectPing.
type ExpectedPing struct {
commonExpectation
delay time.Duration
}
// WillDelayFor allows to specify duration for which it will delay result. May
// be used together with Context.
func (e *ExpectedPing) WillDelayFor(duration time.Duration) *ExpectedPing {
e.delay = duration
return e
}
// WillReturnError allows to set an error for expected database ping
func (e *ExpectedPing) WillReturnError(err error) *ExpectedPing {
e.err = err
return e
}
// String returns string representation
func (e *ExpectedPing) String() string {
msg := "ExpectedPing => expecting database Ping"
if e.err != nil {
msg += fmt.Sprintf(", which should return error: %s", e.err)
}
return msg
}

View File

@ -0,0 +1,67 @@
// +build !go1.8
package sqlmock
import (
"database/sql/driver"
"fmt"
"reflect"
)
// WillReturnRows specifies the set of resulting rows that will be returned
// by the triggered query
func (e *ExpectedQuery) WillReturnRows(rows *Rows) *ExpectedQuery {
e.rows = &rowSets{sets: []*Rows{rows}, ex: e}
return e
}
func (e *queryBasedExpectation) argsMatches(args []namedValue) error {
if nil == e.args {
return nil
}
if len(args) != len(e.args) {
return fmt.Errorf("expected %d, but got %d arguments", len(e.args), len(args))
}
for k, v := range args {
// custom argument matcher
matcher, ok := e.args[k].(Argument)
if ok {
// @TODO: does it make sense to pass value instead of named value?
if !matcher.Match(v.Value) {
return fmt.Errorf("matcher %T could not match %d argument %T - %+v", matcher, k, args[k], args[k])
}
continue
}
dval := e.args[k]
// convert to driver converter
darg, err := e.converter.ConvertValue(dval)
if err != nil {
return fmt.Errorf("could not convert %d argument %T - %+v to driver value: %s", k, e.args[k], e.args[k], err)
}
if !driver.IsValue(darg) {
return fmt.Errorf("argument %d: non-subset type %T returned from Value", k, darg)
}
if !reflect.DeepEqual(darg, v.Value) {
return fmt.Errorf("argument %d expected [%T - %+v] does not match actual [%T - %+v]", k, darg, darg, v.Value, v.Value)
}
}
return nil
}
func (e *queryBasedExpectation) attemptArgMatch(args []namedValue) (err error) {
// catch panic
defer func() {
if e := recover(); e != nil {
_, ok := e.(error)
if !ok {
err = fmt.Errorf(e.(string))
}
}
}()
err = e.argsMatches(args)
return
}

View File

@ -0,0 +1,85 @@
// +build go1.8
package sqlmock
import (
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
)
// WillReturnRows specifies the set of resulting rows that will be returned
// by the triggered query
func (e *ExpectedQuery) WillReturnRows(rows ...*Rows) *ExpectedQuery {
defs := 0
sets := make([]*Rows, len(rows))
for i, r := range rows {
sets[i] = r
if r.def != nil {
defs++
}
}
if defs > 0 && defs == len(sets) {
e.rows = &rowSetsWithDefinition{&rowSets{sets: sets, ex: e}}
} else {
e.rows = &rowSets{sets: sets, ex: e}
}
return e
}
func (e *queryBasedExpectation) argsMatches(args []driver.NamedValue) error {
if nil == e.args {
return nil
}
if len(args) != len(e.args) {
return fmt.Errorf("expected %d, but got %d arguments", len(e.args), len(args))
}
// @TODO should we assert either all args are named or ordinal?
for k, v := range args {
// custom argument matcher
matcher, ok := e.args[k].(Argument)
if ok {
if !matcher.Match(v.Value) {
return fmt.Errorf("matcher %T could not match %d argument %T - %+v", matcher, k, args[k], args[k])
}
continue
}
dval := e.args[k]
if named, isNamed := dval.(sql.NamedArg); isNamed {
dval = named.Value
if v.Name != named.Name {
return fmt.Errorf("named argument %d: name: \"%s\" does not match expected: \"%s\"", k, v.Name, named.Name)
}
} else if k+1 != v.Ordinal {
return fmt.Errorf("argument %d: ordinal position: %d does not match expected: %d", k, k+1, v.Ordinal)
}
// convert to driver converter
darg, err := e.converter.ConvertValue(dval)
if err != nil {
return fmt.Errorf("could not convert %d argument %T - %+v to driver value: %s", k, e.args[k], e.args[k], err)
}
if !reflect.DeepEqual(darg, v.Value) {
return fmt.Errorf("argument %d expected [%T - %+v] does not match actual [%T - %+v]", k, darg, darg, v.Value, v.Value)
}
}
return nil
}
func (e *queryBasedExpectation) attemptArgMatch(args []driver.NamedValue) (err error) {
// catch panic
defer func() {
if e := recover(); e != nil {
_, ok := e.(error)
if !ok {
err = fmt.Errorf(e.(string))
}
}
}()
err = e.argsMatches(args)
return
}

38
vendor/github.com/DATA-DOG/go-sqlmock/options.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package sqlmock
import "database/sql/driver"
// ValueConverterOption allows to create a sqlmock connection
// with a custom ValueConverter to support drivers with special data types.
func ValueConverterOption(converter driver.ValueConverter) func(*sqlmock) error {
return func(s *sqlmock) error {
s.converter = converter
return nil
}
}
// QueryMatcherOption allows to customize SQL query matcher
// and match SQL query strings in more sophisticated ways.
// The default QueryMatcher is QueryMatcherRegexp.
func QueryMatcherOption(queryMatcher QueryMatcher) func(*sqlmock) error {
return func(s *sqlmock) error {
s.queryMatcher = queryMatcher
return nil
}
}
// MonitorPingsOption determines whether calls to Ping on the driver should be
// observed and mocked.
//
// If true is passed, we will check these calls were expected. Expectations can
// be registered using the ExpectPing() method on the mock.
//
// If false is passed or this option is omitted, calls to Ping will not be
// considered when determining expectations and calls to ExpectPing will have
// no effect.
func MonitorPingsOption(monitorPings bool) func(*sqlmock) error {
return func(s *sqlmock) error {
s.monitorPings = monitorPings
return nil
}
}

68
vendor/github.com/DATA-DOG/go-sqlmock/query.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
package sqlmock
import (
"fmt"
"regexp"
"strings"
)
var re = regexp.MustCompile("\\s+")
// strip out new lines and trim spaces
func stripQuery(q string) (s string) {
return strings.TrimSpace(re.ReplaceAllString(q, " "))
}
// QueryMatcher is an SQL query string matcher interface,
// which can be used to customize validation of SQL query strings.
// As an example, external library could be used to build
// and validate SQL ast, columns selected.
//
// sqlmock can be customized to implement a different QueryMatcher
// configured through an option when sqlmock.New or sqlmock.NewWithDSN
// is called, default QueryMatcher is QueryMatcherRegexp.
type QueryMatcher interface {
// Match expected SQL query string without whitespace to
// actual SQL.
Match(expectedSQL, actualSQL string) error
}
// QueryMatcherFunc type is an adapter to allow the use of
// ordinary functions as QueryMatcher. If f is a function
// with the appropriate signature, QueryMatcherFunc(f) is a
// QueryMatcher that calls f.
type QueryMatcherFunc func(expectedSQL, actualSQL string) error
// Match implements the QueryMatcher
func (f QueryMatcherFunc) Match(expectedSQL, actualSQL string) error {
return f(expectedSQL, actualSQL)
}
// QueryMatcherRegexp is the default SQL query matcher
// used by sqlmock. It parses expectedSQL to a regular
// expression and attempts to match actualSQL.
var QueryMatcherRegexp QueryMatcher = QueryMatcherFunc(func(expectedSQL, actualSQL string) error {
expect := stripQuery(expectedSQL)
actual := stripQuery(actualSQL)
re, err := regexp.Compile(expect)
if err != nil {
return err
}
if !re.MatchString(actual) {
return fmt.Errorf(`could not match actual sql: "%s" with expected regexp "%s"`, actual, re.String())
}
return nil
})
// QueryMatcherEqual is the SQL query matcher
// which simply tries a case sensitive match of
// expected and actual SQL strings without whitespace.
var QueryMatcherEqual QueryMatcher = QueryMatcherFunc(func(expectedSQL, actualSQL string) error {
expect := stripQuery(expectedSQL)
actual := stripQuery(actualSQL)
if actual != expect {
return fmt.Errorf(`actual sql: "%s" does not equal to expected "%s"`, actual, expect)
}
return nil
})

39
vendor/github.com/DATA-DOG/go-sqlmock/result.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
package sqlmock
import (
"database/sql/driver"
)
// Result satisfies sql driver Result, which
// holds last insert id and rows affected
// by Exec queries
type result struct {
insertID int64
rowsAffected int64
err error
}
// NewResult creates a new sql driver Result
// for Exec based query mocks.
func NewResult(lastInsertID int64, rowsAffected int64) driver.Result {
return &result{
insertID: lastInsertID,
rowsAffected: rowsAffected,
}
}
// NewErrorResult creates a new sql driver Result
// which returns an error given for both interface methods
func NewErrorResult(err error) driver.Result {
return &result{
err: err,
}
}
func (r *result) LastInsertId() (int64, error) {
return r.insertID, r.err
}
func (r *result) RowsAffected() (int64, error) {
return r.rowsAffected, r.err
}

212
vendor/github.com/DATA-DOG/go-sqlmock/rows.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
package sqlmock
import (
"bytes"
"database/sql/driver"
"encoding/csv"
"fmt"
"io"
"strings"
)
const invalidate = "☠☠☠ MEMORY OVERWRITTEN ☠☠☠ "
// CSVColumnParser is a function which converts trimmed csv
// column string to a []byte representation. Currently
// transforms NULL to nil
var CSVColumnParser = func(s string) []byte {
switch {
case strings.ToLower(s) == "null":
return nil
}
return []byte(s)
}
type rowSets struct {
sets []*Rows
pos int
ex *ExpectedQuery
raw [][]byte
}
func (rs *rowSets) Columns() []string {
return rs.sets[rs.pos].cols
}
func (rs *rowSets) Close() error {
rs.invalidateRaw()
rs.ex.rowsWereClosed = true
return rs.sets[rs.pos].closeErr
}
// advances to next row
func (rs *rowSets) Next(dest []driver.Value) error {
r := rs.sets[rs.pos]
r.pos++
rs.invalidateRaw()
if r.pos > len(r.rows) {
return io.EOF // per interface spec
}
for i, col := range r.rows[r.pos-1] {
if b, ok := rawBytes(col); ok {
rs.raw = append(rs.raw, b)
dest[i] = b
continue
}
dest[i] = col
}
return r.nextErr[r.pos-1]
}
// transforms to debuggable printable string
func (rs *rowSets) String() string {
if rs.empty() {
return "with empty rows"
}
msg := "should return rows:\n"
if len(rs.sets) == 1 {
for n, row := range rs.sets[0].rows {
msg += fmt.Sprintf(" row %d - %+v\n", n, row)
}
return strings.TrimSpace(msg)
}
for i, set := range rs.sets {
msg += fmt.Sprintf(" result set: %d\n", i)
for n, row := range set.rows {
msg += fmt.Sprintf(" row %d - %+v\n", n, row)
}
}
return strings.TrimSpace(msg)
}
func (rs *rowSets) empty() bool {
for _, set := range rs.sets {
if len(set.rows) > 0 {
return false
}
}
return true
}
func rawBytes(col driver.Value) (_ []byte, ok bool) {
val, ok := col.([]byte)
if !ok || len(val) == 0 {
return nil, false
}
// Copy the bytes from the mocked row into a shared raw buffer, which we'll replace the content of later
// This allows scanning into sql.RawBytes to correctly become invalid on subsequent calls to Next(), Scan() or Close()
b := make([]byte, len(val))
copy(b, val)
return b, true
}
// Bytes that could have been scanned as sql.RawBytes are only valid until the next call to Next, Scan or Close.
// If those occur, we must replace their content to simulate the shared memory to expose misuse of sql.RawBytes
func (rs *rowSets) invalidateRaw() {
// Replace the content of slices previously returned
b := []byte(invalidate)
for _, r := range rs.raw {
copy(r, bytes.Repeat(b, len(r)/len(b)+1))
}
// Start with new slices for the next scan
rs.raw = nil
}
// Rows is a mocked collection of rows to
// return for Query result
type Rows struct {
converter driver.ValueConverter
cols []string
def []*Column
rows [][]driver.Value
pos int
nextErr map[int]error
closeErr error
}
// NewRows allows Rows to be created from a
// sql driver.Value slice or from the CSV string and
// to be used as sql driver.Rows.
// Use Sqlmock.NewRows instead if using a custom converter
func NewRows(columns []string) *Rows {
return &Rows{
cols: columns,
nextErr: make(map[int]error),
converter: driver.DefaultParameterConverter,
}
}
// CloseError allows to set an error
// which will be returned by rows.Close
// function.
//
// The close error will be triggered only in cases
// when rows.Next() EOF was not yet reached, that is
// a default sql library behavior
func (r *Rows) CloseError(err error) *Rows {
r.closeErr = err
return r
}
// RowError allows to set an error
// which will be returned when a given
// row number is read
func (r *Rows) RowError(row int, err error) *Rows {
r.nextErr[row] = err
return r
}
// AddRow composed from database driver.Value slice
// return the same instance to perform subsequent actions.
// Note that the number of values must match the number
// of columns
func (r *Rows) AddRow(values ...driver.Value) *Rows {
if len(values) != len(r.cols) {
panic("Expected number of values to match number of columns")
}
row := make([]driver.Value, len(r.cols))
for i, v := range values {
// Convert user-friendly values (such as int or driver.Valuer)
// to database/sql native value (driver.Value such as int64)
var err error
v, err = r.converter.ConvertValue(v)
if err != nil {
panic(fmt.Errorf(
"row #%d, column #%d (%q) type %T: %s",
len(r.rows)+1, i, r.cols[i], values[i], err,
))
}
row[i] = v
}
r.rows = append(r.rows, row)
return r
}
// FromCSVString build rows from csv string.
// return the same instance to perform subsequent actions.
// Note that the number of values must match the number
// of columns
func (r *Rows) FromCSVString(s string) *Rows {
res := strings.NewReader(strings.TrimSpace(s))
csvReader := csv.NewReader(res)
for {
res, err := csvReader.Read()
if err != nil || res == nil {
break
}
row := make([]driver.Value, len(r.cols))
for i, v := range res {
row[i] = CSVColumnParser(strings.TrimSpace(v))
}
r.rows = append(r.rows, row)
}
return r
}

74
vendor/github.com/DATA-DOG/go-sqlmock/rows_go18.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
// +build go1.8
package sqlmock
import (
"database/sql/driver"
"io"
"reflect"
)
// Implement the "RowsNextResultSet" interface
func (rs *rowSets) HasNextResultSet() bool {
return rs.pos+1 < len(rs.sets)
}
// Implement the "RowsNextResultSet" interface
func (rs *rowSets) NextResultSet() error {
if !rs.HasNextResultSet() {
return io.EOF
}
rs.pos++
return nil
}
// type for rows with columns definition created with sqlmock.NewRowsWithColumnDefinition
type rowSetsWithDefinition struct {
*rowSets
}
// Implement the "RowsColumnTypeDatabaseTypeName" interface
func (rs *rowSetsWithDefinition) ColumnTypeDatabaseTypeName(index int) string {
return rs.getDefinition(index).DbType()
}
// Implement the "RowsColumnTypeLength" interface
func (rs *rowSetsWithDefinition) ColumnTypeLength(index int) (length int64, ok bool) {
return rs.getDefinition(index).Length()
}
// Implement the "RowsColumnTypeNullable" interface
func (rs *rowSetsWithDefinition) ColumnTypeNullable(index int) (nullable, ok bool) {
return rs.getDefinition(index).IsNullable()
}
// Implement the "RowsColumnTypePrecisionScale" interface
func (rs *rowSetsWithDefinition) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
return rs.getDefinition(index).PrecisionScale()
}
// ColumnTypeScanType is defined from driver.RowsColumnTypeScanType
func (rs *rowSetsWithDefinition) ColumnTypeScanType(index int) reflect.Type {
return rs.getDefinition(index).ScanType()
}
// return column definition from current set metadata
func (rs *rowSetsWithDefinition) getDefinition(index int) *Column {
return rs.sets[rs.pos].def[index]
}
// NewRowsWithColumnDefinition return rows with columns metadata
func NewRowsWithColumnDefinition(columns ...*Column) *Rows {
cols := make([]string, len(columns))
for i, column := range columns {
cols[i] = column.Name()
}
return &Rows{
cols: cols,
def: columns,
nextErr: make(map[int]error),
converter: driver.DefaultParameterConverter,
}
}

439
vendor/github.com/DATA-DOG/go-sqlmock/sqlmock.go generated vendored Normal file
View File

@ -0,0 +1,439 @@
/*
Package sqlmock is a mock library implementing sql driver. Which has one and only
purpose - to simulate any sql driver behavior in tests, without needing a real
database connection. It helps to maintain correct **TDD** workflow.
It does not require any modifications to your source code in order to test
and mock database operations. Supports concurrency and multiple database mocking.
The driver allows to mock any sql driver method behavior.
*/
package sqlmock
import (
"database/sql"
"database/sql/driver"
"fmt"
"time"
)
// Sqlmock interface serves to create expectations
// for any kind of database action in order to mock
// and test real database behavior.
type SqlmockCommon interface {
// ExpectClose queues an expectation for this database
// action to be triggered. the *ExpectedClose allows
// to mock database response
ExpectClose() *ExpectedClose
// ExpectationsWereMet checks whether all queued expectations
// were met in order. If any of them was not met - an error is returned.
ExpectationsWereMet() error
// ExpectPrepare expects Prepare() to be called with expectedSQL query.
// the *ExpectedPrepare allows to mock database response.
// Note that you may expect Query() or Exec() on the *ExpectedPrepare
// statement to prevent repeating expectedSQL
ExpectPrepare(expectedSQL string) *ExpectedPrepare
// ExpectQuery expects Query() or QueryRow() to be called with expectedSQL query.
// the *ExpectedQuery allows to mock database response.
ExpectQuery(expectedSQL string) *ExpectedQuery
// ExpectExec expects Exec() to be called with expectedSQL query.
// the *ExpectedExec allows to mock database response
ExpectExec(expectedSQL string) *ExpectedExec
// ExpectBegin expects *sql.DB.Begin to be called.
// the *ExpectedBegin allows to mock database response
ExpectBegin() *ExpectedBegin
// ExpectCommit expects *sql.Tx.Commit to be called.
// the *ExpectedCommit allows to mock database response
ExpectCommit() *ExpectedCommit
// ExpectRollback expects *sql.Tx.Rollback to be called.
// the *ExpectedRollback allows to mock database response
ExpectRollback() *ExpectedRollback
// ExpectPing expected *sql.DB.Ping to be called.
// the *ExpectedPing allows to mock database response
//
// Ping support only exists in the SQL library in Go 1.8 and above.
// ExpectPing in Go <=1.7 will return an ExpectedPing but not register
// any expectations.
//
// You must enable pings using MonitorPingsOption for this to register
// any expectations.
ExpectPing() *ExpectedPing
// MatchExpectationsInOrder gives an option whether to match all
// expectations in the order they were set or not.
//
// By default it is set to - true. But if you use goroutines
// to parallelize your query executation, that option may
// be handy.
//
// This option may be turned on anytime during tests. As soon
// as it is switched to false, expectations will be matched
// in any order. Or otherwise if switched to true, any unmatched
// expectations will be expected in order
MatchExpectationsInOrder(bool)
// NewRows allows Rows to be created from a
// sql driver.Value slice or from the CSV string and
// to be used as sql driver.Rows.
NewRows(columns []string) *Rows
}
type sqlmock struct {
ordered bool
dsn string
opened int
drv *mockDriver
converter driver.ValueConverter
queryMatcher QueryMatcher
monitorPings bool
expected []expectation
}
func (c *sqlmock) open(options []func(*sqlmock) error) (*sql.DB, Sqlmock, error) {
db, err := sql.Open("sqlmock", c.dsn)
if err != nil {
return db, c, err
}
for _, option := range options {
err := option(c)
if err != nil {
return db, c, err
}
}
if c.converter == nil {
c.converter = driver.DefaultParameterConverter
}
if c.queryMatcher == nil {
c.queryMatcher = QueryMatcherRegexp
}
if c.monitorPings {
// We call Ping on the driver shortly to verify startup assertions by
// driving internal behaviour of the sql standard library. We don't
// want this call to ping to be monitored for expectation purposes so
// temporarily disable.
c.monitorPings = false
defer func() { c.monitorPings = true }()
}
return db, c, db.Ping()
}
func (c *sqlmock) ExpectClose() *ExpectedClose {
e := &ExpectedClose{}
c.expected = append(c.expected, e)
return e
}
func (c *sqlmock) MatchExpectationsInOrder(b bool) {
c.ordered = b
}
// Close a mock database driver connection. It may or may not
// be called depending on the circumstances, but if it is called
// there must be an *ExpectedClose expectation satisfied.
// meets http://golang.org/pkg/database/sql/driver/#Conn interface
func (c *sqlmock) Close() error {
c.drv.Lock()
defer c.drv.Unlock()
c.opened--
if c.opened == 0 {
delete(c.drv.conns, c.dsn)
}
var expected *ExpectedClose
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if expected, ok = next.(*ExpectedClose); ok {
break
}
next.Unlock()
if c.ordered {
return fmt.Errorf("call to database Close, was not expected, next expectation is: %s", next)
}
}
if expected == nil {
msg := "call to database Close was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return fmt.Errorf(msg)
}
expected.triggered = true
expected.Unlock()
return expected.err
}
func (c *sqlmock) ExpectationsWereMet() error {
for _, e := range c.expected {
e.Lock()
fulfilled := e.fulfilled()
e.Unlock()
if !fulfilled {
return fmt.Errorf("there is a remaining expectation which was not matched: %s", e)
}
// for expected prepared statement check whether it was closed if expected
if prep, ok := e.(*ExpectedPrepare); ok {
if prep.mustBeClosed && !prep.wasClosed {
return fmt.Errorf("expected prepared statement to be closed, but it was not: %s", prep)
}
}
// must check whether all expected queried rows are closed
if query, ok := e.(*ExpectedQuery); ok {
if query.rowsMustBeClosed && !query.rowsWereClosed {
return fmt.Errorf("expected query rows to be closed, but it was not: %s", query)
}
}
}
return nil
}
// Begin meets http://golang.org/pkg/database/sql/driver/#Conn interface
func (c *sqlmock) Begin() (driver.Tx, error) {
ex, err := c.begin()
if ex != nil {
time.Sleep(ex.delay)
}
if err != nil {
return nil, err
}
return c, nil
}
func (c *sqlmock) begin() (*ExpectedBegin, error) {
var expected *ExpectedBegin
var ok bool
var fulfilled int
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if expected, ok = next.(*ExpectedBegin); ok {
break
}
next.Unlock()
if c.ordered {
return nil, fmt.Errorf("call to database transaction Begin, was not expected, next expectation is: %s", next)
}
}
if expected == nil {
msg := "call to database transaction Begin was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return nil, fmt.Errorf(msg)
}
expected.triggered = true
expected.Unlock()
return expected, expected.err
}
func (c *sqlmock) ExpectBegin() *ExpectedBegin {
e := &ExpectedBegin{}
c.expected = append(c.expected, e)
return e
}
func (c *sqlmock) ExpectExec(expectedSQL string) *ExpectedExec {
e := &ExpectedExec{}
e.expectSQL = expectedSQL
e.converter = c.converter
c.expected = append(c.expected, e)
return e
}
// Prepare meets http://golang.org/pkg/database/sql/driver/#Conn interface
func (c *sqlmock) Prepare(query string) (driver.Stmt, error) {
ex, err := c.prepare(query)
if ex != nil {
time.Sleep(ex.delay)
}
if err != nil {
return nil, err
}
return &statement{c, ex, query}, nil
}
func (c *sqlmock) prepare(query string) (*ExpectedPrepare, error) {
var expected *ExpectedPrepare
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if c.ordered {
if expected, ok = next.(*ExpectedPrepare); ok {
break
}
next.Unlock()
return nil, fmt.Errorf("call to Prepare statement with query '%s', was not expected, next expectation is: %s", query, next)
}
if pr, ok := next.(*ExpectedPrepare); ok {
if err := c.queryMatcher.Match(pr.expectSQL, query); err == nil {
expected = pr
break
}
}
next.Unlock()
}
if expected == nil {
msg := "call to Prepare '%s' query was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return nil, fmt.Errorf(msg, query)
}
defer expected.Unlock()
if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil {
return nil, fmt.Errorf("Prepare: %v", err)
}
expected.triggered = true
return expected, expected.err
}
func (c *sqlmock) ExpectPrepare(expectedSQL string) *ExpectedPrepare {
e := &ExpectedPrepare{expectSQL: expectedSQL, mock: c}
c.expected = append(c.expected, e)
return e
}
func (c *sqlmock) ExpectQuery(expectedSQL string) *ExpectedQuery {
e := &ExpectedQuery{}
e.expectSQL = expectedSQL
e.converter = c.converter
c.expected = append(c.expected, e)
return e
}
func (c *sqlmock) ExpectCommit() *ExpectedCommit {
e := &ExpectedCommit{}
c.expected = append(c.expected, e)
return e
}
func (c *sqlmock) ExpectRollback() *ExpectedRollback {
e := &ExpectedRollback{}
c.expected = append(c.expected, e)
return e
}
// Commit meets http://golang.org/pkg/database/sql/driver/#Tx
func (c *sqlmock) Commit() error {
var expected *ExpectedCommit
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if expected, ok = next.(*ExpectedCommit); ok {
break
}
next.Unlock()
if c.ordered {
return fmt.Errorf("call to Commit transaction, was not expected, next expectation is: %s", next)
}
}
if expected == nil {
msg := "call to Commit transaction was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return fmt.Errorf(msg)
}
expected.triggered = true
expected.Unlock()
return expected.err
}
// Rollback meets http://golang.org/pkg/database/sql/driver/#Tx
func (c *sqlmock) Rollback() error {
var expected *ExpectedRollback
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if expected, ok = next.(*ExpectedRollback); ok {
break
}
next.Unlock()
if c.ordered {
return fmt.Errorf("call to Rollback transaction, was not expected, next expectation is: %s", next)
}
}
if expected == nil {
msg := "call to Rollback transaction was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return fmt.Errorf(msg)
}
expected.triggered = true
expected.Unlock()
return expected.err
}
// NewRows allows Rows to be created from a
// sql driver.Value slice or from the CSV string and
// to be used as sql driver.Rows.
func (c *sqlmock) NewRows(columns []string) *Rows {
r := NewRows(columns)
r.converter = c.converter
return r
}

View File

@ -0,0 +1,191 @@
// +build !go1.8
package sqlmock
import (
"database/sql/driver"
"fmt"
"log"
"time"
)
// Sqlmock interface for Go up to 1.7
type Sqlmock interface {
// Embed common methods
SqlmockCommon
}
type namedValue struct {
Name string
Ordinal int
Value driver.Value
}
func (c *sqlmock) ExpectPing() *ExpectedPing {
log.Println("ExpectPing has no effect on Go 1.7 or below")
return &ExpectedPing{}
}
// Query meets http://golang.org/pkg/database/sql/driver/#Queryer
func (c *sqlmock) Query(query string, args []driver.Value) (driver.Rows, error) {
namedArgs := make([]namedValue, len(args))
for i, v := range args {
namedArgs[i] = namedValue{
Ordinal: i + 1,
Value: v,
}
}
ex, err := c.query(query, namedArgs)
if ex != nil {
time.Sleep(ex.delay)
}
if err != nil {
return nil, err
}
return ex.rows, nil
}
func (c *sqlmock) query(query string, args []namedValue) (*ExpectedQuery, error) {
var expected *ExpectedQuery
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if c.ordered {
if expected, ok = next.(*ExpectedQuery); ok {
break
}
next.Unlock()
return nil, fmt.Errorf("call to Query '%s' with args %+v, was not expected, next expectation is: %s", query, args, next)
}
if qr, ok := next.(*ExpectedQuery); ok {
if err := c.queryMatcher.Match(qr.expectSQL, query); err != nil {
next.Unlock()
continue
}
if err := qr.attemptArgMatch(args); err == nil {
expected = qr
break
}
}
next.Unlock()
}
if expected == nil {
msg := "call to Query '%s' with args %+v was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return nil, fmt.Errorf(msg, query, args)
}
defer expected.Unlock()
if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil {
return nil, fmt.Errorf("Query: %v", err)
}
if err := expected.argsMatches(args); err != nil {
return nil, fmt.Errorf("Query '%s', arguments do not match: %s", query, err)
}
expected.triggered = true
if expected.err != nil {
return expected, expected.err // mocked to return error
}
if expected.rows == nil {
return nil, fmt.Errorf("Query '%s' with args %+v, must return a database/sql/driver.Rows, but it was not set for expectation %T as %+v", query, args, expected, expected)
}
return expected, nil
}
// Exec meets http://golang.org/pkg/database/sql/driver/#Execer
func (c *sqlmock) Exec(query string, args []driver.Value) (driver.Result, error) {
namedArgs := make([]namedValue, len(args))
for i, v := range args {
namedArgs[i] = namedValue{
Ordinal: i + 1,
Value: v,
}
}
ex, err := c.exec(query, namedArgs)
if ex != nil {
time.Sleep(ex.delay)
}
if err != nil {
return nil, err
}
return ex.result, nil
}
func (c *sqlmock) exec(query string, args []namedValue) (*ExpectedExec, error) {
var expected *ExpectedExec
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if c.ordered {
if expected, ok = next.(*ExpectedExec); ok {
break
}
next.Unlock()
return nil, fmt.Errorf("call to ExecQuery '%s' with args %+v, was not expected, next expectation is: %s", query, args, next)
}
if exec, ok := next.(*ExpectedExec); ok {
if err := c.queryMatcher.Match(exec.expectSQL, query); err != nil {
next.Unlock()
continue
}
if err := exec.attemptArgMatch(args); err == nil {
expected = exec
break
}
}
next.Unlock()
}
if expected == nil {
msg := "call to ExecQuery '%s' with args %+v was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return nil, fmt.Errorf(msg, query, args)
}
defer expected.Unlock()
if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil {
return nil, fmt.Errorf("ExecQuery: %v", err)
}
if err := expected.argsMatches(args); err != nil {
return nil, fmt.Errorf("ExecQuery '%s', arguments do not match: %s", query, err)
}
expected.triggered = true
if expected.err != nil {
return expected, expected.err // mocked to return error
}
if expected.result == nil {
return nil, fmt.Errorf("ExecQuery '%s' with args %+v, must return a database/sql/driver.Result, but it was not set for expectation %T as %+v", query, args, expected, expected)
}
return expected, nil
}

356
vendor/github.com/DATA-DOG/go-sqlmock/sqlmock_go18.go generated vendored Normal file
View File

@ -0,0 +1,356 @@
// +build go1.8
package sqlmock
import (
"context"
"database/sql/driver"
"errors"
"fmt"
"log"
"time"
)
// Sqlmock interface for Go 1.8+
type Sqlmock interface {
// Embed common methods
SqlmockCommon
// NewRowsWithColumnDefinition allows Rows to be created from a
// sql driver.Value slice with a definition of sql metadata
NewRowsWithColumnDefinition(columns ...*Column) *Rows
// New Column allows to create a Column
NewColumn(name string) *Column
}
// ErrCancelled defines an error value, which can be expected in case of
// such cancellation error.
var ErrCancelled = errors.New("canceling query due to user request")
// Implement the "QueryerContext" interface
func (c *sqlmock) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
ex, err := c.query(query, args)
if ex != nil {
select {
case <-time.After(ex.delay):
if err != nil {
return nil, err
}
return ex.rows, nil
case <-ctx.Done():
return nil, ErrCancelled
}
}
return nil, err
}
// Implement the "ExecerContext" interface
func (c *sqlmock) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
ex, err := c.exec(query, args)
if ex != nil {
select {
case <-time.After(ex.delay):
if err != nil {
return nil, err
}
return ex.result, nil
case <-ctx.Done():
return nil, ErrCancelled
}
}
return nil, err
}
// Implement the "ConnBeginTx" interface
func (c *sqlmock) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
ex, err := c.begin()
if ex != nil {
select {
case <-time.After(ex.delay):
if err != nil {
return nil, err
}
return c, nil
case <-ctx.Done():
return nil, ErrCancelled
}
}
return nil, err
}
// Implement the "ConnPrepareContext" interface
func (c *sqlmock) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
ex, err := c.prepare(query)
if ex != nil {
select {
case <-time.After(ex.delay):
if err != nil {
return nil, err
}
return &statement{c, ex, query}, nil
case <-ctx.Done():
return nil, ErrCancelled
}
}
return nil, err
}
// Implement the "Pinger" interface - the explicit DB driver ping was only added to database/sql in Go 1.8
func (c *sqlmock) Ping(ctx context.Context) error {
if !c.monitorPings {
return nil
}
ex, err := c.ping()
if ex != nil {
select {
case <-ctx.Done():
return ErrCancelled
case <-time.After(ex.delay):
}
}
return err
}
func (c *sqlmock) ping() (*ExpectedPing, error) {
var expected *ExpectedPing
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if expected, ok = next.(*ExpectedPing); ok {
break
}
next.Unlock()
if c.ordered {
return nil, fmt.Errorf("call to database Ping, was not expected, next expectation is: %s", next)
}
}
if expected == nil {
msg := "call to database Ping was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return nil, fmt.Errorf(msg)
}
expected.triggered = true
expected.Unlock()
return expected, expected.err
}
// Implement the "StmtExecContext" interface
func (stmt *statement) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
return stmt.conn.ExecContext(ctx, stmt.query, args)
}
// Implement the "StmtQueryContext" interface
func (stmt *statement) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
return stmt.conn.QueryContext(ctx, stmt.query, args)
}
func (c *sqlmock) ExpectPing() *ExpectedPing {
if !c.monitorPings {
log.Println("ExpectPing will have no effect as monitoring pings is disabled. Use MonitorPingsOption to enable.")
return nil
}
e := &ExpectedPing{}
c.expected = append(c.expected, e)
return e
}
// Query meets http://golang.org/pkg/database/sql/driver/#Queryer
// Deprecated: Drivers should implement QueryerContext instead.
func (c *sqlmock) Query(query string, args []driver.Value) (driver.Rows, error) {
namedArgs := make([]driver.NamedValue, len(args))
for i, v := range args {
namedArgs[i] = driver.NamedValue{
Ordinal: i + 1,
Value: v,
}
}
ex, err := c.query(query, namedArgs)
if ex != nil {
time.Sleep(ex.delay)
}
if err != nil {
return nil, err
}
return ex.rows, nil
}
func (c *sqlmock) query(query string, args []driver.NamedValue) (*ExpectedQuery, error) {
var expected *ExpectedQuery
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if c.ordered {
if expected, ok = next.(*ExpectedQuery); ok {
break
}
next.Unlock()
return nil, fmt.Errorf("call to Query '%s' with args %+v, was not expected, next expectation is: %s", query, args, next)
}
if qr, ok := next.(*ExpectedQuery); ok {
if err := c.queryMatcher.Match(qr.expectSQL, query); err != nil {
next.Unlock()
continue
}
if err := qr.attemptArgMatch(args); err == nil {
expected = qr
break
}
}
next.Unlock()
}
if expected == nil {
msg := "call to Query '%s' with args %+v was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return nil, fmt.Errorf(msg, query, args)
}
defer expected.Unlock()
if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil {
return nil, fmt.Errorf("Query: %v", err)
}
if err := expected.argsMatches(args); err != nil {
return nil, fmt.Errorf("Query '%s', arguments do not match: %s", query, err)
}
expected.triggered = true
if expected.err != nil {
return expected, expected.err // mocked to return error
}
if expected.rows == nil {
return nil, fmt.Errorf("Query '%s' with args %+v, must return a database/sql/driver.Rows, but it was not set for expectation %T as %+v", query, args, expected, expected)
}
return expected, nil
}
// Exec meets http://golang.org/pkg/database/sql/driver/#Execer
// Deprecated: Drivers should implement ExecerContext instead.
func (c *sqlmock) Exec(query string, args []driver.Value) (driver.Result, error) {
namedArgs := make([]driver.NamedValue, len(args))
for i, v := range args {
namedArgs[i] = driver.NamedValue{
Ordinal: i + 1,
Value: v,
}
}
ex, err := c.exec(query, namedArgs)
if ex != nil {
time.Sleep(ex.delay)
}
if err != nil {
return nil, err
}
return ex.result, nil
}
func (c *sqlmock) exec(query string, args []driver.NamedValue) (*ExpectedExec, error) {
var expected *ExpectedExec
var fulfilled int
var ok bool
for _, next := range c.expected {
next.Lock()
if next.fulfilled() {
next.Unlock()
fulfilled++
continue
}
if c.ordered {
if expected, ok = next.(*ExpectedExec); ok {
break
}
next.Unlock()
return nil, fmt.Errorf("call to ExecQuery '%s' with args %+v, was not expected, next expectation is: %s", query, args, next)
}
if exec, ok := next.(*ExpectedExec); ok {
if err := c.queryMatcher.Match(exec.expectSQL, query); err != nil {
next.Unlock()
continue
}
if err := exec.attemptArgMatch(args); err == nil {
expected = exec
break
}
}
next.Unlock()
}
if expected == nil {
msg := "call to ExecQuery '%s' with args %+v was not expected"
if fulfilled == len(c.expected) {
msg = "all expectations were already fulfilled, " + msg
}
return nil, fmt.Errorf(msg, query, args)
}
defer expected.Unlock()
if err := c.queryMatcher.Match(expected.expectSQL, query); err != nil {
return nil, fmt.Errorf("ExecQuery: %v", err)
}
if err := expected.argsMatches(args); err != nil {
return nil, fmt.Errorf("ExecQuery '%s', arguments do not match: %s", query, err)
}
expected.triggered = true
if expected.err != nil {
return expected, expected.err // mocked to return error
}
if expected.result == nil {
return nil, fmt.Errorf("ExecQuery '%s' with args %+v, must return a database/sql/driver.Result, but it was not set for expectation %T as %+v", query, args, expected, expected)
}
return expected, nil
}
// @TODO maybe add ExpectedBegin.WithOptions(driver.TxOptions)
// NewRowsWithColumnDefinition allows Rows to be created from a
// sql driver.Value slice with a definition of sql metadata
func (c *sqlmock) NewRowsWithColumnDefinition(columns ...*Column) *Rows {
r := NewRowsWithColumnDefinition(columns...)
r.converter = c.converter
return r
}
// NewColumn allows to create a Column that can be enhanced with metadata
// using OfType/Nullable/WithLength/WithPrecisionAndScale methods.
func (c *sqlmock) NewColumn(name string) *Column {
return NewColumn(name)
}

View File

@ -0,0 +1,11 @@
// +build go1.8,!go1.9
package sqlmock
import "database/sql/driver"
// CheckNamedValue meets https://golang.org/pkg/database/sql/driver/#NamedValueChecker
func (c *sqlmock) CheckNamedValue(nv *driver.NamedValue) (err error) {
nv.Value, err = c.converter.ConvertValue(nv.Value)
return err
}

19
vendor/github.com/DATA-DOG/go-sqlmock/sqlmock_go19.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// +build go1.9
package sqlmock
import (
"database/sql"
"database/sql/driver"
)
// CheckNamedValue meets https://golang.org/pkg/database/sql/driver/#NamedValueChecker
func (c *sqlmock) CheckNamedValue(nv *driver.NamedValue) (err error) {
switch nv.Value.(type) {
case sql.Out:
return nil
default:
nv.Value, err = c.converter.ConvertValue(nv.Value)
return err
}
}

16
vendor/github.com/DATA-DOG/go-sqlmock/statement.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package sqlmock
type statement struct {
conn *sqlmock
ex *ExpectedPrepare
query string
}
func (stmt *statement) Close() error {
stmt.ex.wasClosed = true
return stmt.ex.closeErr
}
func (stmt *statement) NumInput() int {
return -1
}

View File

@ -0,0 +1,17 @@
// +build !go1.8
package sqlmock
import (
"database/sql/driver"
)
// Deprecated: Drivers should implement ExecerContext instead.
func (stmt *statement) Exec(args []driver.Value) (driver.Result, error) {
return stmt.conn.Exec(stmt.query, args)
}
// Deprecated: Drivers should implement StmtQueryContext instead (or additionally).
func (stmt *statement) Query(args []driver.Value) (driver.Rows, error) {
return stmt.conn.Query(stmt.query, args)
}

View File

@ -0,0 +1,26 @@
// +build go1.8
package sqlmock
import (
"context"
"database/sql/driver"
)
// Deprecated: Drivers should implement ExecerContext instead.
func (stmt *statement) Exec(args []driver.Value) (driver.Result, error) {
return stmt.conn.ExecContext(context.Background(), stmt.query, convertValueToNamedValue(args))
}
// Deprecated: Drivers should implement StmtQueryContext instead (or additionally).
func (stmt *statement) Query(args []driver.Value) (driver.Rows, error) {
return stmt.conn.QueryContext(context.Background(), stmt.query, convertValueToNamedValue(args))
}
func convertValueToNamedValue(args []driver.Value) []driver.NamedValue {
namedArgs := make([]driver.NamedValue, len(args))
for i, v := range args {
namedArgs[i] = driver.NamedValue{Ordinal: i + 1, Value: v}
}
return namedArgs
}

4
vendor/github.com/caarlos0/env/v6/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
coverage.txt
bin
card.png
dist

8
vendor/github.com/caarlos0/env/v6/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
linters:
enable:
- thelper
- gofumpt
- tparallel
- unconvert
- unparam
- wastedassign

3
vendor/github.com/caarlos0/env/v6/.goreleaser.yml generated vendored Normal file
View File

@ -0,0 +1,3 @@
includes:
- from_url:
url: https://raw.githubusercontent.com/caarlos0/.goreleaserfiles/main/lib.yml

21
vendor/github.com/caarlos0/env/v6/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015-2022 Carlos Alexandro Becker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

37
vendor/github.com/caarlos0/env/v6/Makefile generated vendored Normal file
View File

@ -0,0 +1,37 @@
SOURCE_FILES?=./...
TEST_PATTERN?=.
export GO111MODULE := on
setup:
go mod tidy
.PHONY: setup
build:
go build
.PHONY: build
test:
go test -v -failfast -race -coverpkg=./... -covermode=atomic -coverprofile=coverage.txt $(SOURCE_FILES) -run $(TEST_PATTERN) -timeout=2m
.PHONY: test
cover: test
go tool cover -html=coverage.txt
.PHONY: cover
fmt:
gofumpt -w -l .
.PHONY: fmt
lint:
golangci-lint run ./...
.PHONY: lint
ci: build test
.PHONY: ci
card:
wget -O card.png -c "https://og.caarlos0.dev/**env**: parse envs to structs.png?theme=light&md=1&fontSize=100px&images=https://github.com/caarlos0.png"
.PHONY: card
.DEFAULT_GOAL := ci

452
vendor/github.com/caarlos0/env/v6/README.md generated vendored Normal file
View File

@ -0,0 +1,452 @@
# env
[![Build Status](https://img.shields.io/github/workflow/status/caarlos0/env/build?style=for-the-badge)](https://github.com/caarlos0/env/actions?workflow=build)
[![Coverage Status](https://img.shields.io/codecov/c/gh/caarlos0/env.svg?logo=codecov&style=for-the-badge)](https://codecov.io/gh/caarlos0/env)
[![](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=for-the-badge)](https://pkg.go.dev/github.com/caarlos0/env/v6)
A simple and zero-dependencies library to parse environment variables into structs.
## Example
Get the module with:
```sh
go get github.com/caarlos0/env/v6
```
The usage looks like this:
```go
package main
import (
"fmt"
"time"
"github.com/caarlos0/env/v6"
)
type config struct {
Home string `env:"HOME"`
Port int `env:"PORT" envDefault:"3000"`
Password string `env:"PASSWORD,unset"`
IsProduction bool `env:"PRODUCTION"`
Hosts []string `env:"HOSTS" envSeparator:":"`
Duration time.Duration `env:"DURATION"`
TempFolder string `env:"TEMP_FOLDER" envDefault:"${HOME}/tmp" envExpand:"true"`
}
func main() {
cfg := config{}
if err := env.Parse(&cfg); err != nil {
fmt.Printf("%+v\n", err)
}
fmt.Printf("%+v\n", cfg)
}
```
You can run it like this:
```sh
$ PRODUCTION=true HOSTS="host1:host2:host3" DURATION=1s go run main.go
{Home:/your/home Port:3000 IsProduction:true Hosts:[host1 host2 host3] Duration:1s}
```
⚠️⚠️⚠️ **Attention:** _unexported fields_ will be **ignored**.
## Supported types and defaults
Out of the box all built-in types are supported, plus a few others that
are commonly used.
Complete list:
- `string`
- `bool`
- `int`
- `int8`
- `int16`
- `int32`
- `int64`
- `uint`
- `uint8`
- `uint16`
- `uint32`
- `uint64`
- `float32`
- `float64`
- `time.Duration`
- `encoding.TextUnmarshaler`
- `url.URL`
Pointers, slices and slices of pointers of those types are also supported.
You can also use/define a [custom parser func](#custom-parser-funcs) for any
other type you want.
If you set the `envDefault` tag for something, this value will be used in the
case of absence of it in the environment.
By default, slice types will split the environment value on `,`; you can change
this behavior by setting the `envSeparator` tag.
If you set the `envExpand` tag, environment variables (either in `${var}` or
`$var` format) in the string will be replaced according with the actual value
of the variable.
## Custom Parser Funcs
If you have a type that is not supported out of the box by the lib, you are able
to use (or define) and pass custom parsers (and their associated `reflect.Type`)
to the `env.ParseWithFuncs()` function.
In addition to accepting a struct pointer (same as `Parse()`), this function
also accepts a `map[reflect.Type]env.ParserFunc`.
If you add a custom parser for, say `Foo`, it will also be used to parse
`*Foo` and `[]Foo` types.
Check the examples in the [go doc](http://pkg.go.dev/github.com/caarlos0/env/v6)
for more info.
### A note about `TextUnmarshaler` and `time.Time`
Env supports by default anything that implements the `TextUnmarshaler` interface.
That includes things like `time.Time` for example.
The upside is that depending on the format you need, you don't need to change anything.
The downside is that if you do need time in another format, you'll need to create your own type.
Its fairly straightforward:
```go
type MyTime time.Time
func (t *MyTime) UnmarshalText(text []byte) error {
tt, err := time.Parse("2006-01-02", string(text))
*t = MyTime(tt)
return err
}
type Config struct {
SomeTime MyTime `env:"SOME_TIME"`
}
```
And then you can parse `Config` with `env.Parse`.
## Required fields
The `env` tag option `required` (e.g., `env:"tagKey,required"`) can be added to ensure that some environment variable is set.
In the example above, an error is returned if the `config` struct is changed to:
```go
type config struct {
SecretKey string `env:"SECRET_KEY,required"`
}
```
## Not Empty fields
While `required` demands the environment variable to be check, it doesn't check its value.
If you want to make sure the environment is set and not empty, you need to use the `notEmpty` tag option instead (`env:"SOME_ENV,notEmpty"`).
Example:
```go
type config struct {
SecretKey string `env:"SECRET_KEY,notEmpty"`
}
```
## Unset environment variable after reading it
The `env` tag option `unset` (e.g., `env:"tagKey,unset"`) can be added
to ensure that some environment variable is unset after reading it.
Example:
```go
type config struct {
SecretKey string `env:"SECRET_KEY,unset"`
}
```
## From file
The `env` tag option `file` (e.g., `env:"tagKey,file"`) can be added
to in order to indicate that the value of the variable shall be loaded from a file. The path of that file is given
by the environment variable associated with it
Example below
```go
package main
import (
"fmt"
"time"
"github.com/caarlos0/env/v6"
)
type config struct {
Secret string `env:"SECRET,file"`
Password string `env:"PASSWORD,file" envDefault:"/tmp/password"`
Certificate string `env:"CERTIFICATE,file" envDefault:"${CERTIFICATE_FILE}" envExpand:"true"`
}
func main() {
cfg := config{}
if err := env.Parse(&cfg); err != nil {
fmt.Printf("%+v\n", err)
}
fmt.Printf("%+v\n", cfg)
}
```
```sh
$ echo qwerty > /tmp/secret
$ echo dvorak > /tmp/password
$ echo coleman > /tmp/certificate
$ SECRET=/tmp/secret \
CERTIFICATE_FILE=/tmp/certificate \
go run main.go
{Secret:qwerty Password:dvorak Certificate:coleman}
```
## Options
### Environment
By setting the `Options.Environment` map you can tell `Parse` to add those `keys` and `values`
as env vars before parsing is done. These envs are stored in the map and never actually set by `os.Setenv`.
This option effectively makes `env` ignore the OS environment variables: only the ones provided in the option are used.
This can make your testing scenarios a bit more clean and easy to handle.
```go
package main
import (
"fmt"
"log"
"github.com/caarlos0/env/v6"
)
type Config struct {
Password string `env:"PASSWORD"`
}
func main() {
cfg := &Config{}
opts := &env.Options{Environment: map[string]string{
"PASSWORD": "MY_PASSWORD",
}}
// Load env vars.
if err := env.Parse(cfg, opts); err != nil {
log.Fatal(err)
}
// Print the loaded data.
fmt.Printf("%+v\n", cfg.envData)
}
```
### Changing default tag name
You can change what tag name to use for setting the env vars by setting the `Options.TagName`
variable.
For example
```go
package main
import (
"fmt"
"log"
"github.com/caarlos0/env/v6"
)
type Config struct {
Password string `json:"PASSWORD"`
}
func main() {
cfg := &Config{}
opts := &env.Options{TagName: "json"}
// Load env vars.
if err := env.Parse(cfg, opts); err != nil {
log.Fatal(err)
}
// Print the loaded data.
fmt.Printf("%+v\n", cfg.envData)
}
```
### Prefixes
You can prefix sub-structs env tags, as well as a whole `env.Parse` call.
Here's an example flexing it a bit:
```go
package main
import (
"fmt"
"log"
"github.com/caarlos0/env/v6"
)
type Config struct {
Home string `env:"HOME"`
}
type ComplexConfig struct {
Foo Config `envPrefix:"FOO_"`
Clean Config
Bar Config `envPrefix:"BAR_"`
Blah string `env:"BLAH"`
}
func main() {
cfg := ComplexConfig{}
if err := Parse(&cfg, Options{
Prefix: "T_",
Environment: map[string]string{
"T_FOO_HOME": "/foo",
"T_BAR_HOME": "/bar",
"T_BLAH": "blahhh",
"T_HOME": "/clean",
},
}); err != nil {
log.Fatal(err)
}
// Load env vars.
if err := env.Parse(cfg, opts); err != nil {
log.Fatal(err)
}
// Print the loaded data.
fmt.Printf("%+v\n", cfg.envData)
}
```
### On set hooks
You might want to listen to value sets and, for example, log something or do some other kind of logic.
You can do this by passing a `OnSet` option:
```go
package main
import (
"fmt"
"log"
"github.com/caarlos0/env/v6"
)
type Config struct {
Username string `env:"USERNAME" envDefault:"admin"`
Password string `env:"PASSWORD"`
}
func main() {
cfg := &Config{}
opts := &env.Options{
OnSet: func(tag string, value interface{}, isDefault bool) {
fmt.Printf("Set %s to %v (default? %v)\n", tag, value, isDefault)
},
}
// Load env vars.
if err := env.Parse(cfg, opts); err != nil {
log.Fatal(err)
}
// Print the loaded data.
fmt.Printf("%+v\n", cfg.envData)
}
```
## Making all fields to required
You can make all fields that don't have a default value be required by setting the `RequiredIfNoDef: true` in the `Options`.
For example
```go
package main
import (
"fmt"
"log"
"github.com/caarlos0/env/v6"
)
type Config struct {
Username string `env:"USERNAME" envDefault:"admin"`
Password string `env:"PASSWORD"`
}
func main() {
cfg := &Config{}
opts := &env.Options{RequiredIfNoDef: true}
// Load env vars.
if err := env.Parse(cfg, opts); err != nil {
log.Fatal(err)
}
// Print the loaded data.
fmt.Printf("%+v\n", cfg.envData)
}
```
## Defaults from code
You may define default value also in code, by initialising the config data before it's filled by `env.Parse`.
Default values defined as struct tags will overwrite existing values during Parse.
```go
package main
import (
"fmt"
"log"
"github.com/caarlos0/env/v6"
)
type Config struct {
Username string `env:"USERNAME" envDefault:"admin"`
Password string `env:"PASSWORD"`
}
func main() {
var cfg = Config{
Username: "test",
Password: "123456",
}
if err := env.Parse(&cfg); err != nil {
fmt.Println("failed:", err)
}
fmt.Printf("%+v", cfg) // {Username:admin Password:123456}
}
```
## Stargazers over time
[![Stargazers over time](https://starchart.cc/caarlos0/env.svg)](https://starchart.cc/caarlos0/env)

504
vendor/github.com/caarlos0/env/v6/env.go generated vendored Normal file
View File

@ -0,0 +1,504 @@
package env
import (
"encoding"
"errors"
"fmt"
"net/url"
"os"
"reflect"
"strconv"
"strings"
"time"
)
// nolint: gochecknoglobals
var (
// ErrNotAStructPtr is returned if you pass something that is not a pointer to a
// Struct to Parse.
ErrNotAStructPtr = errors.New("env: expected a pointer to a Struct")
defaultBuiltInParsers = map[reflect.Kind]ParserFunc{
reflect.Bool: func(v string) (interface{}, error) {
return strconv.ParseBool(v)
},
reflect.String: func(v string) (interface{}, error) {
return v, nil
},
reflect.Int: func(v string) (interface{}, error) {
i, err := strconv.ParseInt(v, 10, 32)
return int(i), err
},
reflect.Int16: func(v string) (interface{}, error) {
i, err := strconv.ParseInt(v, 10, 16)
return int16(i), err
},
reflect.Int32: func(v string) (interface{}, error) {
i, err := strconv.ParseInt(v, 10, 32)
return int32(i), err
},
reflect.Int64: func(v string) (interface{}, error) {
return strconv.ParseInt(v, 10, 64)
},
reflect.Int8: func(v string) (interface{}, error) {
i, err := strconv.ParseInt(v, 10, 8)
return int8(i), err
},
reflect.Uint: func(v string) (interface{}, error) {
i, err := strconv.ParseUint(v, 10, 32)
return uint(i), err
},
reflect.Uint16: func(v string) (interface{}, error) {
i, err := strconv.ParseUint(v, 10, 16)
return uint16(i), err
},
reflect.Uint32: func(v string) (interface{}, error) {
i, err := strconv.ParseUint(v, 10, 32)
return uint32(i), err
},
reflect.Uint64: func(v string) (interface{}, error) {
i, err := strconv.ParseUint(v, 10, 64)
return i, err
},
reflect.Uint8: func(v string) (interface{}, error) {
i, err := strconv.ParseUint(v, 10, 8)
return uint8(i), err
},
reflect.Float64: func(v string) (interface{}, error) {
return strconv.ParseFloat(v, 64)
},
reflect.Float32: func(v string) (interface{}, error) {
f, err := strconv.ParseFloat(v, 32)
return float32(f), err
},
}
)
func defaultTypeParsers() map[reflect.Type]ParserFunc {
return map[reflect.Type]ParserFunc{
reflect.TypeOf(url.URL{}): func(v string) (interface{}, error) {
u, err := url.Parse(v)
if err != nil {
return nil, fmt.Errorf("unable to parse URL: %v", err)
}
return *u, nil
},
reflect.TypeOf(time.Nanosecond): func(v string) (interface{}, error) {
s, err := time.ParseDuration(v)
if err != nil {
return nil, fmt.Errorf("unable to parse duration: %v", err)
}
return s, err
},
}
}
// ParserFunc defines the signature of a function that can be used within `CustomParsers`.
type ParserFunc func(v string) (interface{}, error)
// OnSetFn is a hook that can be run when a value is set.
type OnSetFn func(tag string, value interface{}, isDefault bool)
// Options for the parser.
type Options struct {
// Environment keys and values that will be accessible for the service.
Environment map[string]string
// TagName specifies another tagname to use rather than the default env.
TagName string
// RequiredIfNoDef automatically sets all env as required if they do not declare 'envDefault'
RequiredIfNoDef bool
// OnSet allows to run a function when a value is set
OnSet OnSetFn
// Prefix define a prefix for each key
Prefix string
// Sets to true if we have already configured once.
configured bool
}
// configure will do the basic configurations and defaults.
func configure(opts []Options) []Options {
// If we have already configured the first item
// of options will have been configured set to true.
if len(opts) > 0 && opts[0].configured {
return opts
}
// Created options with defaults.
opt := Options{
TagName: "env",
Environment: toMap(os.Environ()),
configured: true,
}
// Loop over all opts structs and set
// to opt if value is not default/empty.
for _, item := range opts {
if item.Environment != nil {
opt.Environment = item.Environment
}
if item.TagName != "" {
opt.TagName = item.TagName
}
if item.OnSet != nil {
opt.OnSet = item.OnSet
}
if item.Prefix != "" {
opt.Prefix = item.Prefix
}
opt.RequiredIfNoDef = item.RequiredIfNoDef
}
return []Options{opt}
}
func getOnSetFn(opts []Options) OnSetFn {
return opts[0].OnSet
}
// getTagName returns the tag name.
func getTagName(opts []Options) string {
return opts[0].TagName
}
// getEnvironment returns the environment map.
func getEnvironment(opts []Options) map[string]string {
return opts[0].Environment
}
// Parse parses a struct containing `env` tags and loads its values from
// environment variables.
func Parse(v interface{}, opts ...Options) error {
return ParseWithFuncs(v, map[reflect.Type]ParserFunc{}, opts...)
}
// ParseWithFuncs is the same as `Parse` except it also allows the user to pass
// in custom parsers.
func ParseWithFuncs(v interface{}, funcMap map[reflect.Type]ParserFunc, opts ...Options) error {
opts = configure(opts)
ptrRef := reflect.ValueOf(v)
if ptrRef.Kind() != reflect.Ptr {
return ErrNotAStructPtr
}
ref := ptrRef.Elem()
if ref.Kind() != reflect.Struct {
return ErrNotAStructPtr
}
parsers := defaultTypeParsers()
for k, v := range funcMap {
parsers[k] = v
}
return doParse(ref, parsers, opts)
}
func doParse(ref reflect.Value, funcMap map[reflect.Type]ParserFunc, opts []Options) error {
refType := ref.Type()
var agrErr aggregateError
for i := 0; i < refType.NumField(); i++ {
refField := ref.Field(i)
refTypeField := refType.Field(i)
if err := doParseField(refField, refTypeField, funcMap, opts); err != nil {
if val, ok := err.(aggregateError); ok {
agrErr.errors = append(agrErr.errors, val.errors...)
} else {
agrErr.errors = append(agrErr.errors, err)
}
}
}
if len(agrErr.errors) == 0 {
return nil
}
return agrErr
}
func doParseField(refField reflect.Value, refTypeField reflect.StructField, funcMap map[reflect.Type]ParserFunc, opts []Options) error {
if !refField.CanSet() {
return nil
}
if reflect.Ptr == refField.Kind() && refField.Elem().Kind() == reflect.Struct {
return ParseWithFuncs(refField.Interface(), funcMap, optsWithPrefix(refTypeField, opts)...)
}
if reflect.Struct == refField.Kind() && refField.CanAddr() && refField.Type().Name() == "" {
return ParseWithFuncs(refField.Addr().Interface(), funcMap, optsWithPrefix(refTypeField, opts)...)
}
value, err := get(refTypeField, opts)
if err != nil {
return err
}
if value != "" {
return set(refField, refTypeField, value, funcMap)
}
if reflect.Struct == refField.Kind() {
return doParse(refField, funcMap, optsWithPrefix(refTypeField, opts))
}
return nil
}
func get(field reflect.StructField, opts []Options) (val string, err error) {
var exists bool
var isDefault bool
var loadFile bool
var unset bool
var notEmpty bool
required := opts[0].RequiredIfNoDef
prefix := opts[0].Prefix
ownKey, tags := parseKeyForOption(field.Tag.Get(getTagName(opts)))
key := prefix + ownKey
for _, tag := range tags {
switch tag {
case "":
continue
case "file":
loadFile = true
case "required":
required = true
case "unset":
unset = true
case "notEmpty":
notEmpty = true
default:
return "", fmt.Errorf("tag option %q not supported", tag)
}
}
expand := strings.EqualFold(field.Tag.Get("envExpand"), "true")
defaultValue, defExists := field.Tag.Lookup("envDefault")
val, exists, isDefault = getOr(key, defaultValue, defExists, getEnvironment(opts))
if expand {
val = os.ExpandEnv(val)
}
if unset {
defer os.Unsetenv(key)
}
if required && !exists && len(ownKey) > 0 {
return "", fmt.Errorf(`required environment variable %q is not set`, key)
}
if notEmpty && val == "" {
return "", fmt.Errorf("environment variable %q should not be empty", key)
}
if loadFile && val != "" {
filename := val
val, err = getFromFile(filename)
if err != nil {
return "", fmt.Errorf(`could not load content of file "%s" from variable %s: %v`, filename, key, err)
}
}
if onSetFn := getOnSetFn(opts); onSetFn != nil {
onSetFn(key, val, isDefault)
}
return val, err
}
// split the env tag's key into the expected key and desired option, if any.
func parseKeyForOption(key string) (string, []string) {
opts := strings.Split(key, ",")
return opts[0], opts[1:]
}
func getFromFile(filename string) (value string, err error) {
b, err := os.ReadFile(filename)
return string(b), err
}
func getOr(key, defaultValue string, defExists bool, envs map[string]string) (string, bool, bool) {
value, exists := envs[key]
switch {
case (!exists || key == "") && defExists:
return defaultValue, true, true
case !exists:
return "", false, false
}
return value, true, false
}
func set(field reflect.Value, sf reflect.StructField, value string, funcMap map[reflect.Type]ParserFunc) error {
if tm := asTextUnmarshaler(field); tm != nil {
if err := tm.UnmarshalText([]byte(value)); err != nil {
return newParseError(sf, err)
}
return nil
}
typee := sf.Type
fieldee := field
if typee.Kind() == reflect.Ptr {
typee = typee.Elem()
fieldee = field.Elem()
}
parserFunc, ok := funcMap[typee]
if ok {
val, err := parserFunc(value)
if err != nil {
return newParseError(sf, err)
}
fieldee.Set(reflect.ValueOf(val))
return nil
}
parserFunc, ok = defaultBuiltInParsers[typee.Kind()]
if ok {
val, err := parserFunc(value)
if err != nil {
return newParseError(sf, err)
}
fieldee.Set(reflect.ValueOf(val).Convert(typee))
return nil
}
if field.Kind() == reflect.Slice {
return handleSlice(field, value, sf, funcMap)
}
return newNoParserError(sf)
}
func handleSlice(field reflect.Value, value string, sf reflect.StructField, funcMap map[reflect.Type]ParserFunc) error {
separator := sf.Tag.Get("envSeparator")
if separator == "" {
separator = ","
}
parts := strings.Split(value, separator)
typee := sf.Type.Elem()
if typee.Kind() == reflect.Ptr {
typee = typee.Elem()
}
if _, ok := reflect.New(typee).Interface().(encoding.TextUnmarshaler); ok {
return parseTextUnmarshalers(field, parts, sf)
}
parserFunc, ok := funcMap[typee]
if !ok {
parserFunc, ok = defaultBuiltInParsers[typee.Kind()]
if !ok {
return newNoParserError(sf)
}
}
result := reflect.MakeSlice(sf.Type, 0, len(parts))
for _, part := range parts {
r, err := parserFunc(part)
if err != nil {
return newParseError(sf, err)
}
v := reflect.ValueOf(r).Convert(typee)
if sf.Type.Elem().Kind() == reflect.Ptr {
v = reflect.New(typee)
v.Elem().Set(reflect.ValueOf(r).Convert(typee))
}
result = reflect.Append(result, v)
}
field.Set(result)
return nil
}
func asTextUnmarshaler(field reflect.Value) encoding.TextUnmarshaler {
if reflect.Ptr == field.Kind() {
if field.IsNil() {
field.Set(reflect.New(field.Type().Elem()))
}
} else if field.CanAddr() {
field = field.Addr()
}
tm, ok := field.Interface().(encoding.TextUnmarshaler)
if !ok {
return nil
}
return tm
}
func parseTextUnmarshalers(field reflect.Value, data []string, sf reflect.StructField) error {
s := len(data)
elemType := field.Type().Elem()
slice := reflect.MakeSlice(reflect.SliceOf(elemType), s, s)
for i, v := range data {
sv := slice.Index(i)
kind := sv.Kind()
if kind == reflect.Ptr {
sv = reflect.New(elemType.Elem())
} else {
sv = sv.Addr()
}
tm := sv.Interface().(encoding.TextUnmarshaler)
if err := tm.UnmarshalText([]byte(v)); err != nil {
return newParseError(sf, err)
}
if kind == reflect.Ptr {
slice.Index(i).Set(sv)
}
}
field.Set(slice)
return nil
}
func newParseError(sf reflect.StructField, err error) error {
return parseError{
sf: sf,
err: err,
}
}
type parseError struct {
sf reflect.StructField
err error
}
func (e parseError) Error() string {
return fmt.Sprintf(`parse error on field "%s" of type "%s": %v`, e.sf.Name, e.sf.Type, e.err)
}
func newNoParserError(sf reflect.StructField) error {
return fmt.Errorf(`no parser found for field "%s" of type "%s"`, sf.Name, sf.Type)
}
func optsWithPrefix(field reflect.StructField, opts []Options) []Options {
subOpts := make([]Options, len(opts))
copy(subOpts, opts)
if prefix := field.Tag.Get("envPrefix"); prefix != "" {
subOpts[0].Prefix += prefix
}
return subOpts
}
type aggregateError struct {
errors []error
}
func (e aggregateError) Error() string {
var sb strings.Builder
sb.WriteString("env:")
for _, err := range e.errors {
sb.WriteString(fmt.Sprintf(" %v;", err.Error()))
}
return strings.TrimRight(sb.String(), ";")
}

15
vendor/github.com/caarlos0/env/v6/env_unix.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package env
import "strings"
func toMap(env []string) map[string]string {
r := map[string]string{}
for _, e := range env {
p := strings.SplitN(e, "=", 2)
r[p[0]] = p[1]
}
return r
}

25
vendor/github.com/caarlos0/env/v6/env_windows.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package env
import "strings"
func toMap(env []string) map[string]string {
r := map[string]string{}
for _, e := range env {
p := strings.SplitN(e, "=", 2)
// On Windows, environment variables can start with '='. If so, Split at next character.
// See env_windows.go in the Go source: https://github.com/golang/go/blob/master/src/syscall/env_windows.go#L58
prefixEqualSign := false
if len(e) > 0 && e[0] == '=' {
e = e[1:]
prefixEqualSign = true
}
p = strings.SplitN(e, "=", 2)
if prefixEqualSign {
p[0] = "=" + p[0]
}
r[p[0]] = p[1]
}
return r
}

1
vendor/github.com/go-logfmt/logfmt/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
.vscode/

82
vendor/github.com/go-logfmt/logfmt/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,82 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.6.0] - 2023-01-30
[0.6.0]: https://github.com/go-logfmt/logfmt/compare/v0.5.1...v0.6.0
### Added
- NewDecoderSize by [@alexanderjophus]
## [0.5.1] - 2021-08-18
[0.5.1]: https://github.com/go-logfmt/logfmt/compare/v0.5.0...v0.5.1
### Changed
- Update the `go.mod` file for Go 1.17 as described in the [Go 1.17 release
notes](https://golang.org/doc/go1.17#go-command)
## [0.5.0] - 2020-01-03
[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0
### Changed
- Remove the dependency on github.com/kr/logfmt by [@ChrisHines]
- Move fuzz code to github.com/go-logfmt/fuzzlogfmt by [@ChrisHines]
## [0.4.0] - 2018-11-21
[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0
### Added
- Go module support by [@ChrisHines]
- CHANGELOG by [@ChrisHines]
### Changed
- Drop invalid runes from keys instead of returning ErrInvalidKey by [@ChrisHines]
- On panic while printing, attempt to print panic value by [@bboreham]
## [0.3.0] - 2016-11-15
[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0
### Added
- Pool buffers for quoted strings and byte slices by [@nussjustin]
### Fixed
- Fuzz fix, quote invalid UTF-8 values by [@judwhite]
## [0.2.0] - 2016-05-08
[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0
### Added
- Encoder.EncodeKeyvals by [@ChrisHines]
## [0.1.0] - 2016-03-28
[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0
### Added
- Encoder by [@ChrisHines]
- Decoder by [@ChrisHines]
- MarshalKeyvals by [@ChrisHines]
[@ChrisHines]: https://github.com/ChrisHines
[@bboreham]: https://github.com/bboreham
[@judwhite]: https://github.com/judwhite
[@nussjustin]: https://github.com/nussjustin
[@alexanderjophus]: https://github.com/alexanderjophus

22
vendor/github.com/go-logfmt/logfmt/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 go-logfmt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

41
vendor/github.com/go-logfmt/logfmt/README.md generated vendored Normal file
View File

@ -0,0 +1,41 @@
# logfmt
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logfmt/logfmt.svg)](https://pkg.go.dev/github.com/go-logfmt/logfmt)
[![Go Report Card](https://goreportcard.com/badge/go-logfmt/logfmt)](https://goreportcard.com/report/go-logfmt/logfmt)
[![Github Actions](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml/badge.svg)](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml)
[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=main)
Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
format][fmt]. It provides an API similar to [encoding/json][json] and
[encoding/xml][xml].
[fmt]: https://brandur.org/logfmt
[json]: https://pkg.go.dev/encoding/json
[xml]: https://pkg.go.dev/encoding/xml
The logfmt format was first documented by Brandur Leach in [this
article][origin]. The format has not been formally standardized. The most
authoritative public specification to date has been the documentation of a Go
Language [package][parser] written by Blake Mizerany and Keith Rarick.
[origin]: https://brandur.org/logfmt
[parser]: https://pkg.go.dev/github.com/kr/logfmt
## Goals
This project attempts to conform as closely as possible to the prior art, while
also removing ambiguity where necessary to provide well behaved encoder and
decoder implementations.
## Non-goals
This project does not attempt to formally standardize the logfmt format. In the
event that logfmt is standardized this project would take conforming to the
standard as a goal.
## Versioning
This project publishes releases according to the Go language guidelines for
[developing and publishing modules][pub].
[pub]: https://go.dev/doc/modules/developing

254
vendor/github.com/go-logfmt/logfmt/decode.go generated vendored Normal file
View File

@ -0,0 +1,254 @@
package logfmt
import (
"bufio"
"bytes"
"fmt"
"io"
"unicode/utf8"
)
// A Decoder reads and decodes logfmt records from an input stream.
type Decoder struct {
pos int
key []byte
value []byte
lineNum int
s *bufio.Scanner
err error
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read data from r beyond
// the logfmt records requested.
func NewDecoder(r io.Reader) *Decoder {
dec := &Decoder{
s: bufio.NewScanner(r),
}
return dec
}
// NewDecoderSize returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read data from r beyond
// the logfmt records requested.
// The size argument specifies the size of the initial buffer that the
// Decoder will use to read records from r.
// If a log line is longer than the size argument, the Decoder will return
// a bufio.ErrTooLong error.
func NewDecoderSize(r io.Reader, size int) *Decoder {
scanner := bufio.NewScanner(r)
scanner.Buffer(make([]byte, 0, size), size)
dec := &Decoder{
s: scanner,
}
return dec
}
// ScanRecord advances the Decoder to the next record, which can then be
// parsed with the ScanKeyval method. It returns false when decoding stops,
// either by reaching the end of the input or an error. After ScanRecord
// returns false, the Err method will return any error that occurred during
// decoding, except that if it was io.EOF, Err will return nil.
func (dec *Decoder) ScanRecord() bool {
if dec.err != nil {
return false
}
if !dec.s.Scan() {
dec.err = dec.s.Err()
return false
}
dec.lineNum++
dec.pos = 0
return true
}
// ScanKeyval advances the Decoder to the next key/value pair of the current
// record, which can then be retrieved with the Key and Value methods. It
// returns false when decoding stops, either by reaching the end of the
// current record or an error.
func (dec *Decoder) ScanKeyval() bool {
dec.key, dec.value = nil, nil
if dec.err != nil {
return false
}
line := dec.s.Bytes()
// garbage
for p, c := range line[dec.pos:] {
if c > ' ' {
dec.pos += p
goto key
}
}
dec.pos = len(line)
return false
key:
const invalidKeyError = "invalid key"
start, multibyte := dec.pos, false
for p, c := range line[dec.pos:] {
switch {
case c == '=':
dec.pos += p
if dec.pos > start {
dec.key = line[start:dec.pos]
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
dec.syntaxError(invalidKeyError)
return false
}
}
if dec.key == nil {
dec.unexpectedByte(c)
return false
}
goto equal
case c == '"':
dec.pos += p
dec.unexpectedByte(c)
return false
case c <= ' ':
dec.pos += p
if dec.pos > start {
dec.key = line[start:dec.pos]
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
dec.syntaxError(invalidKeyError)
return false
}
}
return true
case c >= utf8.RuneSelf:
multibyte = true
}
}
dec.pos = len(line)
if dec.pos > start {
dec.key = line[start:dec.pos]
if multibyte && bytes.ContainsRune(dec.key, utf8.RuneError) {
dec.syntaxError(invalidKeyError)
return false
}
}
return true
equal:
dec.pos++
if dec.pos >= len(line) {
return true
}
switch c := line[dec.pos]; {
case c <= ' ':
return true
case c == '"':
goto qvalue
}
// value
start = dec.pos
for p, c := range line[dec.pos:] {
switch {
case c == '=' || c == '"':
dec.pos += p
dec.unexpectedByte(c)
return false
case c <= ' ':
dec.pos += p
if dec.pos > start {
dec.value = line[start:dec.pos]
}
return true
}
}
dec.pos = len(line)
if dec.pos > start {
dec.value = line[start:dec.pos]
}
return true
qvalue:
const (
untermQuote = "unterminated quoted value"
invalidQuote = "invalid quoted value"
)
hasEsc, esc := false, false
start = dec.pos
for p, c := range line[dec.pos+1:] {
switch {
case esc:
esc = false
case c == '\\':
hasEsc, esc = true, true
case c == '"':
dec.pos += p + 2
if hasEsc {
v, ok := unquoteBytes(line[start:dec.pos])
if !ok {
dec.syntaxError(invalidQuote)
return false
}
dec.value = v
} else {
start++
end := dec.pos - 1
if end > start {
dec.value = line[start:end]
}
}
return true
}
}
dec.pos = len(line)
dec.syntaxError(untermQuote)
return false
}
// Key returns the most recent key found by a call to ScanKeyval. The returned
// slice may point to internal buffers and is only valid until the next call
// to ScanRecord. It does no allocation.
func (dec *Decoder) Key() []byte {
return dec.key
}
// Value returns the most recent value found by a call to ScanKeyval. The
// returned slice may point to internal buffers and is only valid until the
// next call to ScanRecord. It does no allocation when the value has no
// escape sequences.
func (dec *Decoder) Value() []byte {
return dec.value
}
// Err returns the first non-EOF error that was encountered by the Scanner.
func (dec *Decoder) Err() error {
return dec.err
}
func (dec *Decoder) syntaxError(msg string) {
dec.err = &SyntaxError{
Msg: msg,
Line: dec.lineNum,
Pos: dec.pos + 1,
}
}
func (dec *Decoder) unexpectedByte(c byte) {
dec.err = &SyntaxError{
Msg: fmt.Sprintf("unexpected %q", c),
Line: dec.lineNum,
Pos: dec.pos + 1,
}
}
// A SyntaxError represents a syntax error in the logfmt input stream.
type SyntaxError struct {
Msg string
Line int
Pos int
}
func (e *SyntaxError) Error() string {
return fmt.Sprintf("logfmt syntax error at pos %d on line %d: %s", e.Pos, e.Line, e.Msg)
}

6
vendor/github.com/go-logfmt/logfmt/doc.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
// Package logfmt implements utilities to marshal and unmarshal data in the
// logfmt format. The logfmt format records key/value pairs in a way that
// balances readability for humans and simplicity of computer parsing. It is
// most commonly used as a more human friendly alternative to JSON for
// structured logging.
package logfmt

322
vendor/github.com/go-logfmt/logfmt/encode.go generated vendored Normal file
View File

@ -0,0 +1,322 @@
package logfmt
import (
"bytes"
"encoding"
"errors"
"fmt"
"io"
"reflect"
"strings"
"unicode/utf8"
)
// MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence
// of alternating keys and values.
func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) {
buf := &bytes.Buffer{}
if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// An Encoder writes logfmt data to an output stream.
type Encoder struct {
w io.Writer
scratch bytes.Buffer
needSep bool
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: w,
}
}
var (
space = []byte(" ")
equals = []byte("=")
newline = []byte("\n")
null = []byte("null")
)
// EncodeKeyval writes the logfmt encoding of key and value to the stream. A
// single space is written before the second and subsequent keys in a record.
// Nothing is written if a non-nil error is returned.
func (enc *Encoder) EncodeKeyval(key, value interface{}) error {
enc.scratch.Reset()
if enc.needSep {
if _, err := enc.scratch.Write(space); err != nil {
return err
}
}
if err := writeKey(&enc.scratch, key); err != nil {
return err
}
if _, err := enc.scratch.Write(equals); err != nil {
return err
}
if err := writeValue(&enc.scratch, value); err != nil {
return err
}
_, err := enc.w.Write(enc.scratch.Bytes())
enc.needSep = true
return err
}
// EncodeKeyvals writes the logfmt encoding of keyvals to the stream. Keyvals
// is a variadic sequence of alternating keys and values. Keys of unsupported
// type are skipped along with their corresponding value. Values of
// unsupported type or that cause a MarshalerError are replaced by their error
// but do not cause EncodeKeyvals to return an error. If a non-nil error is
// returned some key/value pairs may not have be written.
func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error {
if len(keyvals) == 0 {
return nil
}
if len(keyvals)%2 == 1 {
keyvals = append(keyvals, nil)
}
for i := 0; i < len(keyvals); i += 2 {
k, v := keyvals[i], keyvals[i+1]
err := enc.EncodeKeyval(k, v)
if err == ErrUnsupportedKeyType {
continue
}
if _, ok := err.(*MarshalerError); ok || err == ErrUnsupportedValueType {
v = err
err = enc.EncodeKeyval(k, v)
}
if err != nil {
return err
}
}
return nil
}
// MarshalerError represents an error encountered while marshaling a value.
type MarshalerError struct {
Type reflect.Type
Err error
}
func (e *MarshalerError) Error() string {
return "error marshaling value of type " + e.Type.String() + ": " + e.Err.Error()
}
// ErrNilKey is returned by Marshal functions and Encoder methods if a key is
// a nil interface or pointer value.
var ErrNilKey = errors.New("nil key")
// ErrInvalidKey is returned by Marshal functions and Encoder methods if, after
// dropping invalid runes, a key is empty.
var ErrInvalidKey = errors.New("invalid key")
// ErrUnsupportedKeyType is returned by Encoder methods if a key has an
// unsupported type.
var ErrUnsupportedKeyType = errors.New("unsupported key type")
// ErrUnsupportedValueType is returned by Encoder methods if a value has an
// unsupported type.
var ErrUnsupportedValueType = errors.New("unsupported value type")
func writeKey(w io.Writer, key interface{}) error {
if key == nil {
return ErrNilKey
}
switch k := key.(type) {
case string:
return writeStringKey(w, k)
case []byte:
if k == nil {
return ErrNilKey
}
return writeBytesKey(w, k)
case encoding.TextMarshaler:
kb, err := safeMarshal(k)
if err != nil {
return err
}
if kb == nil {
return ErrNilKey
}
return writeBytesKey(w, kb)
case fmt.Stringer:
ks, ok := safeString(k)
if !ok {
return ErrNilKey
}
return writeStringKey(w, ks)
default:
rkey := reflect.ValueOf(key)
switch rkey.Kind() {
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
return ErrUnsupportedKeyType
case reflect.Ptr:
if rkey.IsNil() {
return ErrNilKey
}
return writeKey(w, rkey.Elem().Interface())
}
return writeStringKey(w, fmt.Sprint(k))
}
}
// keyRuneFilter returns r for all valid key runes, and -1 for all invalid key
// runes. When used as the mapping function for strings.Map and bytes.Map
// functions it causes them to remove invalid key runes from strings or byte
// slices respectively.
func keyRuneFilter(r rune) rune {
if r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError {
return -1
}
return r
}
func writeStringKey(w io.Writer, key string) error {
k := strings.Map(keyRuneFilter, key)
if k == "" {
return ErrInvalidKey
}
_, err := io.WriteString(w, k)
return err
}
func writeBytesKey(w io.Writer, key []byte) error {
k := bytes.Map(keyRuneFilter, key)
if len(k) == 0 {
return ErrInvalidKey
}
_, err := w.Write(k)
return err
}
func writeValue(w io.Writer, value interface{}) error {
switch v := value.(type) {
case nil:
return writeBytesValue(w, null)
case string:
return writeStringValue(w, v, true)
case []byte:
return writeBytesValue(w, v)
case encoding.TextMarshaler:
vb, err := safeMarshal(v)
if err != nil {
return err
}
if vb == nil {
vb = null
}
return writeBytesValue(w, vb)
case error:
se, ok := safeError(v)
return writeStringValue(w, se, ok)
case fmt.Stringer:
ss, ok := safeString(v)
return writeStringValue(w, ss, ok)
default:
rvalue := reflect.ValueOf(value)
switch rvalue.Kind() {
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
return ErrUnsupportedValueType
case reflect.Ptr:
if rvalue.IsNil() {
return writeBytesValue(w, null)
}
return writeValue(w, rvalue.Elem().Interface())
}
return writeStringValue(w, fmt.Sprint(v), true)
}
}
func needsQuotedValueRune(r rune) bool {
return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError
}
func writeStringValue(w io.Writer, value string, ok bool) error {
var err error
if ok && value == "null" {
_, err = io.WriteString(w, `"null"`)
} else if strings.IndexFunc(value, needsQuotedValueRune) != -1 {
_, err = writeQuotedString(w, value)
} else {
_, err = io.WriteString(w, value)
}
return err
}
func writeBytesValue(w io.Writer, value []byte) error {
var err error
if bytes.IndexFunc(value, needsQuotedValueRune) != -1 {
_, err = writeQuotedBytes(w, value)
} else {
_, err = w.Write(value)
}
return err
}
// EndRecord writes a newline character to the stream and resets the encoder
// to the beginning of a new record.
func (enc *Encoder) EndRecord() error {
_, err := enc.w.Write(newline)
if err == nil {
enc.needSep = false
}
return err
}
// Reset resets the encoder to the beginning of a new record.
func (enc *Encoder) Reset() {
enc.needSep = false
}
func safeError(err error) (s string, ok bool) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
s, ok = "null", false
} else {
s, ok = fmt.Sprintf("PANIC:%v", panicVal), false
}
}
}()
s, ok = err.Error(), true
return
}
func safeString(str fmt.Stringer) (s string, ok bool) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
s, ok = "null", false
} else {
s, ok = fmt.Sprintf("PANIC:%v", panicVal), true
}
}
}()
s, ok = str.String(), true
return
}
func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() {
b, err = nil, nil
} else {
b, err = nil, fmt.Errorf("panic when marshalling: %s", panicVal)
}
}
}()
b, err = tm.MarshalText()
if err != nil {
return nil, &MarshalerError{
Type: reflect.TypeOf(tm),
Err: err,
}
}
return
}

277
vendor/github.com/go-logfmt/logfmt/jsonstring.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
package logfmt
import (
"bytes"
"io"
"strconv"
"sync"
"unicode"
"unicode/utf16"
"unicode/utf8"
)
// Taken from Go's encoding/json and modified for use here.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
var hex = "0123456789abcdef"
var bufferPool = sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
}
func getBuffer() *bytes.Buffer {
return bufferPool.Get().(*bytes.Buffer)
}
func poolBuffer(buf *bytes.Buffer) {
buf.Reset()
bufferPool.Put(buf)
}
// NOTE: keep in sync with writeQuotedBytes below.
func writeQuotedString(w io.Writer, s string) (int, error) {
buf := getBuffer()
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' {
i++
continue
}
if start < i {
buf.WriteString(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
case '\t':
buf.WriteByte('\\')
buf.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \n, \r, and \t.
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError {
if start < i {
buf.WriteString(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.WriteString(s[start:])
}
buf.WriteByte('"')
n, err := w.Write(buf.Bytes())
poolBuffer(buf)
return n, err
}
// NOTE: keep in sync with writeQuoteString above.
func writeQuotedBytes(w io.Writer, s []byte) (int, error) {
buf := getBuffer()
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' {
i++
continue
}
if start < i {
buf.Write(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
case '\t':
buf.WriteByte('\\')
buf.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \n, \r, and \t.
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRune(s[i:])
if c == utf8.RuneError {
if start < i {
buf.Write(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.Write(s[start:])
}
buf.WriteByte('"')
n, err := w.Write(buf.Bytes())
poolBuffer(buf)
return n, err
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
if err != nil {
return -1
}
return rune(r)
}
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}

27
vendor/github.com/google/go-cmp/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

667
vendor/github.com/google/go-cmp/cmp/compare.go generated vendored Normal file
View File

@ -0,0 +1,667 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cmp determines equality of values.
//
// This package is intended to be a more powerful and safer alternative to
// reflect.DeepEqual for comparing whether two values are semantically equal.
// It is intended to only be used in tests, as performance is not a goal and
// it may panic if it cannot compare the values. Its propensity towards
// panicking means that its unsuitable for production environments where a
// spurious panic may be fatal.
//
// The primary features of cmp are:
//
// • When the default behavior of equality does not suit the needs of the test,
// custom equality functions can override the equality operation.
// For example, an equality function may report floats as equal so long as they
// are within some tolerance of each other.
//
// • Types that have an Equal method may use that method to determine equality.
// This allows package authors to determine the equality operation for the types
// that they define.
//
// • If no custom equality functions are used and no Equal method is defined,
// equality is determined by recursively comparing the primitive kinds on both
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
// fields are not compared by default; they result in panics unless suppressed
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
// compared using the Exporter option.
package cmp
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp/internal/diff"
"github.com/google/go-cmp/cmp/internal/function"
"github.com/google/go-cmp/cmp/internal/value"
)
// TODO(≥go1.18): Use any instead of interface{}.
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
// • Let S be the set of all Ignore, Transformer, and Comparer options that
// remain after applying all path filters, value filters, and type filters.
// If at least one Ignore exists in S, then the comparison is ignored.
// If the number of Transformer and Comparer options in S is greater than one,
// then Equal panics because it is ambiguous which option to use.
// If S contains a single Transformer, then use that to transform the current
// values and recursively call Equal on the output values.
// If S contains a single Comparer, then use that to compare the current values.
// Otherwise, evaluation proceeds to the next rule.
//
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
// evaluation proceeds to the next rule.
//
// • Lastly, try to compare x and y based on their basic kinds.
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
// channels are compared using the equivalent of the == operator in Go.
// Functions are only equal if they are both nil, otherwise they are unequal.
//
// Structs are equal if recursively calling Equal on all fields report equal.
// If a struct contains unexported fields, Equal panics unless an Ignore option
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
// explicitly permits comparing the unexported field.
//
// Slices are equal if they are both nil or both non-nil, where recursively
// calling Equal on all non-ignored slice or array elements report equal.
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
// consider using cmpopts.EquateEmpty.
//
// Maps are equal if they are both nil or both non-nil, where recursively
// calling Equal on all non-ignored map entries report equal.
// Map keys are equal according to the == operator.
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
// consider using cmpopts.EquateEmpty.
//
// Pointers and interfaces are equal if they are both nil or both non-nil,
// where they have the same underlying concrete type and recursively
// calling Equal on the underlying values reports equal.
//
// Before recursing into a pointer, slice element, or map, the current path
// is checked to detect whether the address has already been visited.
// If there is a cycle, then the pointed at values are considered equal
// only if both addresses were previously visited in the same path step.
func Equal(x, y interface{}, opts ...Option) bool {
s := newState(opts)
s.compareAny(rootStep(x, y))
return s.result.Equal()
}
// Diff returns a human-readable report of the differences between two values:
// y - x. It returns an empty string if and only if Equal returns true for the
// same input values and options.
//
// The output is displayed as a literal in pseudo-Go syntax.
// At the start of each line, a "-" prefix indicates an element removed from x,
// a "+" prefix to indicates an element added from y, and the lack of a prefix
// indicates an element common to both x and y. If possible, the output
// uses fmt.Stringer.String or error.Error methods to produce more humanly
// readable outputs. In such cases, the string is prefixed with either an
// 's' or 'e' character, respectively, to indicate that the method was called.
//
// Do not depend on this output being stable. If you need the ability to
// programmatically interpret the difference, consider using a custom Reporter.
func Diff(x, y interface{}, opts ...Option) string {
s := newState(opts)
// Optimization: If there are no other reporters, we can optimize for the
// common case where the result is equal (and thus no reported difference).
// This avoids the expensive construction of a difference tree.
if len(s.reporters) == 0 {
s.compareAny(rootStep(x, y))
if s.result.Equal() {
return ""
}
s.result = diff.Result{} // Reset results
}
r := new(defaultReporter)
s.reporters = append(s.reporters, reporter{r})
s.compareAny(rootStep(x, y))
d := r.String()
if (d == "") != s.result.Equal() {
panic("inconsistent difference and equality results")
}
return d
}
// rootStep constructs the first path step. If x and y have differing types,
// then they are stored within an empty interface type.
func rootStep(x, y interface{}) PathStep {
vx := reflect.ValueOf(x)
vy := reflect.ValueOf(y)
// If the inputs are different types, auto-wrap them in an empty interface
// so that they have the same parent type.
var t reflect.Type
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
t = reflect.TypeOf((*interface{})(nil)).Elem()
if vx.IsValid() {
vvx := reflect.New(t).Elem()
vvx.Set(vx)
vx = vvx
}
if vy.IsValid() {
vvy := reflect.New(t).Elem()
vvy.Set(vy)
vy = vvy
}
} else {
t = vx.Type()
}
return &pathStep{t, vx, vy}
}
type state struct {
// These fields represent the "comparison state".
// Calling statelessCompare must not result in observable changes to these.
result diff.Result // The current result of comparison
curPath Path // The current path in the value tree
curPtrs pointerPath // The current set of visited pointers
reporters []reporter // Optional reporters
// recChecker checks for infinite cycles applying the same set of
// transformers upon the output of itself.
recChecker recChecker
// dynChecker triggers pseudo-random checks for option correctness.
// It is safe for statelessCompare to mutate this value.
dynChecker dynChecker
// These fields, once set by processOption, will not change.
exporters []exporter // List of exporters for structs with unexported fields
opts Options // List of all fundamental and filter options
}
func newState(opts []Option) *state {
// Always ensure a validator option exists to validate the inputs.
s := &state{opts: Options{validator{}}}
s.curPtrs.Init()
s.processOption(Options(opts))
return s
}
func (s *state) processOption(opt Option) {
switch opt := opt.(type) {
case nil:
case Options:
for _, o := range opt {
s.processOption(o)
}
case coreOption:
type filtered interface {
isFiltered() bool
}
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
}
s.opts = append(s.opts, opt)
case exporter:
s.exporters = append(s.exporters, opt)
case reporter:
s.reporters = append(s.reporters, opt)
default:
panic(fmt.Sprintf("unknown option %T", opt))
}
}
// statelessCompare compares two values and returns the result.
// This function is stateless in that it does not alter the current result,
// or output to any registered reporters.
func (s *state) statelessCompare(step PathStep) diff.Result {
// We do not save and restore curPath and curPtrs because all of the
// compareX methods should properly push and pop from them.
// It is an implementation bug if the contents of the paths differ from
// when calling this function to when returning from it.
oldResult, oldReporters := s.result, s.reporters
s.result = diff.Result{} // Reset result
s.reporters = nil // Remove reporters to avoid spurious printouts
s.compareAny(step)
res := s.result
s.result, s.reporters = oldResult, oldReporters
return res
}
func (s *state) compareAny(step PathStep) {
// Update the path stack.
s.curPath.push(step)
defer s.curPath.pop()
for _, r := range s.reporters {
r.PushStep(step)
defer r.PopStep()
}
s.recChecker.Check(s.curPath)
// Cycle-detection for slice elements (see NOTE in compareSlice).
t := step.Type()
vx, vy := step.Values()
if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
px, py := vx.Addr(), vy.Addr()
if eq, visited := s.curPtrs.Push(px, py); visited {
s.report(eq, reportByCycle)
return
}
defer s.curPtrs.Pop(px, py)
}
// Rule 1: Check whether an option applies on this node in the value tree.
if s.tryOptions(t, vx, vy) {
return
}
// Rule 2: Check whether the type has a valid Equal method.
if s.tryMethod(t, vx, vy) {
return
}
// Rule 3: Compare based on the underlying kind.
switch t.Kind() {
case reflect.Bool:
s.report(vx.Bool() == vy.Bool(), 0)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s.report(vx.Int() == vy.Int(), 0)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s.report(vx.Uint() == vy.Uint(), 0)
case reflect.Float32, reflect.Float64:
s.report(vx.Float() == vy.Float(), 0)
case reflect.Complex64, reflect.Complex128:
s.report(vx.Complex() == vy.Complex(), 0)
case reflect.String:
s.report(vx.String() == vy.String(), 0)
case reflect.Chan, reflect.UnsafePointer:
s.report(vx.Pointer() == vy.Pointer(), 0)
case reflect.Func:
s.report(vx.IsNil() && vy.IsNil(), 0)
case reflect.Struct:
s.compareStruct(t, vx, vy)
case reflect.Slice, reflect.Array:
s.compareSlice(t, vx, vy)
case reflect.Map:
s.compareMap(t, vx, vy)
case reflect.Ptr:
s.comparePtr(t, vx, vy)
case reflect.Interface:
s.compareInterface(t, vx, vy)
default:
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
}
}
func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
// Evaluate all filters and apply the remaining options.
if opt := s.opts.filter(s, t, vx, vy); opt != nil {
opt.apply(s, vx, vy)
return true
}
return false
}
func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
// Check if this type even has an Equal method.
m, ok := t.MethodByName("Equal")
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
return false
}
eq := s.callTTBFunc(m.Func, vx, vy)
s.report(eq, reportByMethod)
return true
}
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{v})[0]
}
// Run the function twice and ensure that we get the same results back.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, v)
got := <-c
want := f.Call([]reflect.Value{v})[0]
if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
// To avoid false-positives with non-reflexive equality operations,
// we sanity check whether a value is equal to itself.
if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
return want
}
panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
}
return want
}
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{x, y})[0].Bool()
}
// Swapping the input arguments is sufficient to check that
// f is symmetric and deterministic.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, y, x)
got := <-c
want := f.Call([]reflect.Value{x, y})[0].Bool()
if !got.IsValid() || got.Bool() != want {
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
}
return want
}
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
var ret reflect.Value
defer func() {
recover() // Ignore panics, let the other call to f panic instead
c <- ret
}()
ret = f.Call(vs)[0]
}
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
var addr bool
var vax, vay reflect.Value // Addressable versions of vx and vy
var mayForce, mayForceInit bool
step := StructField{&structField{}}
for i := 0; i < t.NumField(); i++ {
step.typ = t.Field(i).Type
step.vx = vx.Field(i)
step.vy = vy.Field(i)
step.name = t.Field(i).Name
step.idx = i
step.unexported = !isExported(step.name)
if step.unexported {
if step.name == "_" {
continue
}
// Defer checking of unexported fields until later to give an
// Ignore a chance to ignore the field.
if !vax.IsValid() || !vay.IsValid() {
// For retrieveUnexportedField to work, the parent struct must
// be addressable. Create a new copy of the values if
// necessary to make them addressable.
addr = vx.CanAddr() || vy.CanAddr()
vax = makeAddressable(vx)
vay = makeAddressable(vy)
}
if !mayForceInit {
for _, xf := range s.exporters {
mayForce = mayForce || xf(t)
}
mayForceInit = true
}
step.mayForce = mayForce
step.paddr = addr
step.pvx = vax
step.pvy = vay
step.field = t.Field(i)
}
s.compareAny(step)
}
}
func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
isSlice := t.Kind() == reflect.Slice
if isSlice && (vx.IsNil() || vy.IsNil()) {
s.report(vx.IsNil() && vy.IsNil(), 0)
return
}
// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
// since slices represents a list of pointers, rather than a single pointer.
// The pointer checking logic must be handled on a per-element basis
// in compareAny.
//
// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
// pointer P, a length N, and a capacity C. Supposing each slice element has
// a memory size of M, then the slice is equivalent to the list of pointers:
// [P+i*M for i in range(N)]
//
// For example, v[:0] and v[:1] are slices with the same starting pointer,
// but they are clearly different values. Using the slice pointer alone
// violates the assumption that equal pointers implies equal values.
step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
withIndexes := func(ix, iy int) SliceIndex {
if ix >= 0 {
step.vx, step.xkey = vx.Index(ix), ix
} else {
step.vx, step.xkey = reflect.Value{}, -1
}
if iy >= 0 {
step.vy, step.ykey = vy.Index(iy), iy
} else {
step.vy, step.ykey = reflect.Value{}, -1
}
return step
}
// Ignore options are able to ignore missing elements in a slice.
// However, detecting these reliably requires an optimal differencing
// algorithm, for which diff.Difference is not.
//
// Instead, we first iterate through both slices to detect which elements
// would be ignored if standing alone. The index of non-discarded elements
// are stored in a separate slice, which diffing is then performed on.
var indexesX, indexesY []int
var ignoredX, ignoredY []bool
for ix := 0; ix < vx.Len(); ix++ {
ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
if !ignored {
indexesX = append(indexesX, ix)
}
ignoredX = append(ignoredX, ignored)
}
for iy := 0; iy < vy.Len(); iy++ {
ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
if !ignored {
indexesY = append(indexesY, iy)
}
ignoredY = append(ignoredY, ignored)
}
// Compute an edit-script for slices vx and vy (excluding ignored elements).
edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
})
// Replay the ignore-scripts and the edit-script.
var ix, iy int
for ix < vx.Len() || iy < vy.Len() {
var e diff.EditType
switch {
case ix < len(ignoredX) && ignoredX[ix]:
e = diff.UniqueX
case iy < len(ignoredY) && ignoredY[iy]:
e = diff.UniqueY
default:
e, edits = edits[0], edits[1:]
}
switch e {
case diff.UniqueX:
s.compareAny(withIndexes(ix, -1))
ix++
case diff.UniqueY:
s.compareAny(withIndexes(-1, iy))
iy++
default:
s.compareAny(withIndexes(ix, iy))
ix++
iy++
}
}
}
func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), 0)
return
}
// Cycle-detection for maps.
if eq, visited := s.curPtrs.Push(vx, vy); visited {
s.report(eq, reportByCycle)
return
}
defer s.curPtrs.Pop(vx, vy)
// We combine and sort the two map keys so that we can perform the
// comparisons in a deterministic order.
step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
step.vx = vx.MapIndex(k)
step.vy = vy.MapIndex(k)
step.key = k
if !step.vx.IsValid() && !step.vy.IsValid() {
// It is possible for both vx and vy to be invalid if the
// key contained a NaN value in it.
//
// Even with the ability to retrieve NaN keys in Go 1.12,
// there still isn't a sensible way to compare the values since
// a NaN key may map to multiple unordered values.
// The most reasonable way to compare NaNs would be to compare the
// set of values. However, this is impossible to do efficiently
// since set equality is provably an O(n^2) operation given only
// an Equal function. If we had a Less function or Hash function,
// this could be done in O(n*log(n)) or O(n), respectively.
//
// Rather than adding complex logic to deal with NaNs, make it
// the user's responsibility to compare such obscure maps.
const help = "consider providing a Comparer to compare the map"
panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
}
s.compareAny(step)
}
}
func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), 0)
return
}
// Cycle-detection for pointers.
if eq, visited := s.curPtrs.Push(vx, vy); visited {
s.report(eq, reportByCycle)
return
}
defer s.curPtrs.Pop(vx, vy)
vx, vy = vx.Elem(), vy.Elem()
s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
}
func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), 0)
return
}
vx, vy = vx.Elem(), vy.Elem()
if vx.Type() != vy.Type() {
s.report(false, 0)
return
}
s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
}
func (s *state) report(eq bool, rf resultFlags) {
if rf&reportByIgnore == 0 {
if eq {
s.result.NumSame++
rf |= reportEqual
} else {
s.result.NumDiff++
rf |= reportUnequal
}
}
for _, r := range s.reporters {
r.Report(Result{flags: rf})
}
}
// recChecker tracks the state needed to periodically perform checks that
// user provided transformers are not stuck in an infinitely recursive cycle.
type recChecker struct{ next int }
// Check scans the Path for any recursive transformers and panics when any
// recursive transformers are detected. Note that the presence of a
// recursive Transformer does not necessarily imply an infinite cycle.
// As such, this check only activates after some minimal number of path steps.
func (rc *recChecker) Check(p Path) {
const minLen = 1 << 16
if rc.next == 0 {
rc.next = minLen
}
if len(p) < rc.next {
return
}
rc.next <<= 1
// Check whether the same transformer has appeared at least twice.
var ss []string
m := map[Option]int{}
for _, ps := range p {
if t, ok := ps.(Transform); ok {
t := t.Option()
if m[t] == 1 { // Transformer was used exactly once before
tf := t.(*transformer).fnc.Type()
ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
}
m[t]++
}
}
if len(ss) > 0 {
const warning = "recursive set of Transformers detected"
const help = "consider using cmpopts.AcyclicTransformer"
set := strings.Join(ss, "\n\t")
panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
}
}
// dynChecker tracks the state needed to periodically perform checks that
// user provided functions are symmetric and deterministic.
// The zero value is safe for immediate use.
type dynChecker struct{ curr, next int }
// Next increments the state and reports whether a check should be performed.
//
// Checks occur every Nth function call, where N is a triangular number:
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
// See https://en.wikipedia.org/wiki/Triangular_number
//
// This sequence ensures that the cost of checks drops significantly as
// the number of functions calls grows larger.
func (dc *dynChecker) Next() bool {
ok := dc.curr == dc.next
if ok {
dc.curr = 0
dc.next++
}
dc.curr++
return ok
}
// makeAddressable returns a value that is always addressable.
// It returns the input verbatim if it is already addressable,
// otherwise it creates a new value and returns an addressable copy.
func makeAddressable(v reflect.Value) reflect.Value {
if v.CanAddr() {
return v
}
vc := reflect.New(v.Type()).Elem()
vc.Set(v)
return vc
}

16
vendor/github.com/google/go-cmp/cmp/export_panic.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
package cmp
import "reflect"
const supportExporters = false
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
panic("no support for forcibly accessing unexported fields")
}

36
vendor/github.com/google/go-cmp/cmp/export_unsafe.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
package cmp
import (
"reflect"
"unsafe"
)
const supportExporters = true
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
// a struct such that the value has read-write permissions.
//
// The parent struct, v, must be addressable, while f must be a StructField
// describing the field to retrieve. If addr is false,
// then the returned value will be shallowed copied to be non-addressable.
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
if !addr {
// A field is addressable if and only if the struct is addressable.
// If the original parent value was not addressable, shallow copy the
// value to make it non-addressable to avoid leaking an implementation
// detail of how forcibly exporting a field works.
if ve.Kind() == reflect.Interface && ve.IsNil() {
return reflect.Zero(f.Type)
}
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
}
return ve
}

View File

@ -0,0 +1,18 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !cmp_debug
// +build !cmp_debug
package diff
var debug debugger
type debugger struct{}
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
return f
}
func (debugger) Update() {}
func (debugger) Finish() {}

View File

@ -0,0 +1,123 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cmp_debug
// +build cmp_debug
package diff
import (
"fmt"
"strings"
"sync"
"time"
)
// The algorithm can be seen running in real-time by enabling debugging:
// go test -tags=cmp_debug -v
//
// Example output:
// === RUN TestDifference/#34
// ┌───────────────────────────────┐
// │ \ · · · · · · · · · · · · · · │
// │ · # · · · · · · · · · · · · · │
// │ · \ · · · · · · · · · · · · · │
// │ · · \ · · · · · · · · · · · · │
// │ · · · X # · · · · · · · · · · │
// │ · · · # \ · · · · · · · · · · │
// │ · · · · · # # · · · · · · · · │
// │ · · · · · # \ · · · · · · · · │
// │ · · · · · · · \ · · · · · · · │
// │ · · · · · · · · \ · · · · · · │
// │ · · · · · · · · · \ · · · · · │
// │ · · · · · · · · · · \ · · # · │
// │ · · · · · · · · · · · \ # # · │
// │ · · · · · · · · · · · # # # · │
// │ · · · · · · · · · · # # # # · │
// │ · · · · · · · · · # # # # # · │
// │ · · · · · · · · · · · · · · \ │
// └───────────────────────────────┘
// [.Y..M.XY......YXYXY.|]
//
// The grid represents the edit-graph where the horizontal axis represents
// list X and the vertical axis represents list Y. The start of the two lists
// is the top-left, while the ends are the bottom-right. The '·' represents
// an unexplored node in the graph. The '\' indicates that the two symbols
// from list X and Y are equal. The 'X' indicates that two symbols are similar
// (but not exactly equal) to each other. The '#' indicates that the two symbols
// are different (and not similar). The algorithm traverses this graph trying to
// make the paths starting in the top-left and the bottom-right connect.
//
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
// the currently established path from the forward and reverse searches,
// separated by a '|' character.
const (
updateDelay = 100 * time.Millisecond
finishDelay = 500 * time.Millisecond
ansiTerminal = true // ANSI escape codes used to move terminal cursor
)
var debug debugger
type debugger struct {
sync.Mutex
p1, p2 EditScript
fwdPath, revPath *EditScript
grid []byte
lines int
}
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
dbg.Lock()
dbg.fwdPath, dbg.revPath = p1, p2
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
row := "│ " + strings.Repeat("· ", nx) + "│\n"
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
dbg.lines = strings.Count(dbg.String(), "\n")
fmt.Print(dbg)
// Wrap the EqualFunc so that we can intercept each result.
return func(ix, iy int) (r Result) {
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
for i := range cell {
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
}
switch r = f(ix, iy); {
case r.Equal():
cell[0] = '\\'
case r.Similar():
cell[0] = 'X'
default:
cell[0] = '#'
}
return
}
}
func (dbg *debugger) Update() {
dbg.print(updateDelay)
}
func (dbg *debugger) Finish() {
dbg.print(finishDelay)
dbg.Unlock()
}
func (dbg *debugger) String() string {
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
}
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
}
func (dbg *debugger) print(d time.Duration) {
if ansiTerminal {
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
}
fmt.Print(dbg)
time.Sleep(d)
}

View File

@ -0,0 +1,398 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package diff implements an algorithm for producing edit-scripts.
// The edit-script is a sequence of operations needed to transform one list
// of symbols into another (or vice-versa). The edits allowed are insertions,
// deletions, and modifications. The summation of all edits is called the
// Levenshtein distance as this problem is well-known in computer science.
//
// This package prioritizes performance over accuracy. That is, the run time
// is more important than obtaining a minimal Levenshtein distance.
package diff
import (
"math/rand"
"time"
"github.com/google/go-cmp/cmp/internal/flags"
)
// EditType represents a single operation within an edit-script.
type EditType uint8
const (
// Identity indicates that a symbol pair is identical in both list X and Y.
Identity EditType = iota
// UniqueX indicates that a symbol only exists in X and not Y.
UniqueX
// UniqueY indicates that a symbol only exists in Y and not X.
UniqueY
// Modified indicates that a symbol pair is a modification of each other.
Modified
)
// EditScript represents the series of differences between two lists.
type EditScript []EditType
// String returns a human-readable string representing the edit-script where
// Identity, UniqueX, UniqueY, and Modified are represented by the
// '.', 'X', 'Y', and 'M' characters, respectively.
func (es EditScript) String() string {
b := make([]byte, len(es))
for i, e := range es {
switch e {
case Identity:
b[i] = '.'
case UniqueX:
b[i] = 'X'
case UniqueY:
b[i] = 'Y'
case Modified:
b[i] = 'M'
default:
panic("invalid edit-type")
}
}
return string(b)
}
// stats returns a histogram of the number of each type of edit operation.
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
for _, e := range es {
switch e {
case Identity:
s.NI++
case UniqueX:
s.NX++
case UniqueY:
s.NY++
case Modified:
s.NM++
default:
panic("invalid edit-type")
}
}
return
}
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
// lists X and Y are equal.
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
// LenX is the length of the X list.
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
// LenY is the length of the Y list.
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
// When called by Difference, the index is guaranteed to be within nx and ny.
type EqualFunc func(ix int, iy int) Result
// Result is the result of comparison.
// NumSame is the number of sub-elements that are equal.
// NumDiff is the number of sub-elements that are not equal.
type Result struct{ NumSame, NumDiff int }
// BoolResult returns a Result that is either Equal or not Equal.
func BoolResult(b bool) Result {
if b {
return Result{NumSame: 1} // Equal, Similar
} else {
return Result{NumDiff: 2} // Not Equal, not Similar
}
}
// Equal indicates whether the symbols are equal. Two symbols are equal
// if and only if NumDiff == 0. If Equal, then they are also Similar.
func (r Result) Equal() bool { return r.NumDiff == 0 }
// Similar indicates whether two symbols are similar and may be represented
// by using the Modified type. As a special case, we consider binary comparisons
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
//
// The exact ratio of NumSame to NumDiff to determine similarity may change.
func (r Result) Similar() bool {
// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
return r.NumSame+1 >= r.NumDiff
}
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
// Difference reports whether two lists of lengths nx and ny are equal
// given the definition of equality provided as f.
//
// This function returns an edit-script, which is a sequence of operations
// needed to convert one list into the other. The following invariants for
// the edit-script are maintained:
// • eq == (es.Dist()==0)
// • nx == es.LenX()
// • ny == es.LenY()
//
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
// produces an edit-script with a minimal Levenshtein distance). This algorithm
// favors performance over optimality. The exact output is not guaranteed to
// be stable and may change over time.
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// • fwdFrontier.X < revFrontier.X
// • fwdFrontier.Y < revFrontier.Y
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// • Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner). The goal of
// the search is connect with the search from the opposite corner.
// • As we search, we build a path in a greedy manner, where the first
// match seen is added to the path (this is sub-optimal, but provides a
// decent result in practice). When matches are found, we try the next pair
// of symbols in the lists and follow all matches as far as possible.
// • When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found, we advance the
// frontier towards the opposite corner.
// • This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Non-deterministically start with either the forward or reverse direction
// to introduce some deliberate instability so that we have the flexibility
// to change this algorithm in the future.
if flags.Deterministic || randBool {
goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
goto forwardSearch
}
finishSearch:
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
}
type path struct {
dir int // +1 if forward, -1 if reverse
point // Leading point of the EditScript path
es EditScript
}
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
// to the edit-script to connect p.point to dst.
func (p *path) connect(dst point, f EqualFunc) {
if p.dir > 0 {
// Connect in forward direction.
for dst.X > p.X && dst.Y > p.Y {
switch r := f(p.X, p.Y); {
case r.Equal():
p.append(Identity)
case r.Similar():
p.append(Modified)
case dst.X-p.X >= dst.Y-p.Y:
p.append(UniqueX)
default:
p.append(UniqueY)
}
}
for dst.X > p.X {
p.append(UniqueX)
}
for dst.Y > p.Y {
p.append(UniqueY)
}
} else {
// Connect in reverse direction.
for p.X > dst.X && p.Y > dst.Y {
switch r := f(p.X-1, p.Y-1); {
case r.Equal():
p.append(Identity)
case r.Similar():
p.append(Modified)
case p.Y-dst.Y >= p.X-dst.X:
p.append(UniqueY)
default:
p.append(UniqueX)
}
}
for p.X > dst.X {
p.append(UniqueX)
}
for p.Y > dst.Y {
p.append(UniqueY)
}
}
}
func (p *path) append(t EditType) {
p.es = append(p.es, t)
switch t {
case Identity, Modified:
p.add(p.dir, p.dir)
case UniqueX:
p.add(p.dir, 0)
case UniqueY:
p.add(0, p.dir)
}
debug.Update()
}
type point struct{ X, Y int }
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
func zigzag(x int) int {
if x&1 != 0 {
x = ^x
}
return x >> 1
}

View File

@ -0,0 +1,9 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flags
// Deterministic controls whether the output of Diff should be deterministic.
// This is only used for testing.
var Deterministic bool

View File

@ -0,0 +1,99 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package function provides functionality for identifying function types.
package function
import (
"reflect"
"regexp"
"runtime"
"strings"
)
type funcType int
const (
_ funcType = iota
tbFunc // func(T) bool
ttbFunc // func(T, T) bool
trbFunc // func(T, R) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
Equal = ttbFunc // func(T, T) bool
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool
ValuePredicate = tbFunc // func(T) bool
KeyValuePredicate = trbFunc // func(T, R) bool
)
var boolType = reflect.TypeOf(true)
// IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool {
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
return false
}
ni, no := t.NumIn(), t.NumOut()
switch ft {
case tbFunc: // func(T) bool
if ni == 1 && no == 1 && t.Out(0) == boolType {
return true
}
case ttbFunc: // func(T, T) bool
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
case trbFunc: // func(T, R) bool
if ni == 2 && no == 1 && t.Out(0) == boolType {
return true
}
case tibFunc: // func(T, I) bool
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
return true
}
case trFunc: // func(T) R
if ni == 1 && no == 1 {
return true
}
}
return false
}
var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
// NameOf returns the name of the function value.
func NameOf(v reflect.Value) string {
fnc := runtime.FuncForPC(v.Pointer())
if fnc == nil {
return "<unknown>"
}
fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
// Method closures have a "-fm" suffix.
fullName = strings.TrimSuffix(fullName, "-fm")
var name string
for len(fullName) > 0 {
inParen := strings.HasSuffix(fullName, ")")
fullName = strings.TrimSuffix(fullName, ")")
s := lastIdentRx.FindString(fullName)
if s == "" {
break
}
name = s + "." + name
fullName = strings.TrimSuffix(fullName, s)
if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
fullName = fullName[:i]
}
fullName = strings.TrimSuffix(fullName, ".")
}
return strings.TrimSuffix(name, ".")
}

View File

@ -0,0 +1,164 @@
// Copyright 2020, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package value
import (
"reflect"
"strconv"
)
var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
// TypeString is nearly identical to reflect.Type.String,
// but has an additional option to specify that full type names be used.
func TypeString(t reflect.Type, qualified bool) string {
return string(appendTypeName(nil, t, qualified, false))
}
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
// BUG: Go reflection provides no way to disambiguate two named types
// of the same name and within the same package,
// but declared within the namespace of different functions.
// Use the "any" alias instead of "interface{}" for better readability.
if t == anyType {
return append(b, "any"...)
}
// Named type.
if t.Name() != "" {
if qualified && t.PkgPath() != "" {
b = append(b, '"')
b = append(b, t.PkgPath()...)
b = append(b, '"')
b = append(b, '.')
b = append(b, t.Name()...)
} else {
b = append(b, t.String()...)
}
return b
}
// Unnamed type.
switch k := t.Kind(); k {
case reflect.Bool, reflect.String, reflect.UnsafePointer,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
b = append(b, k.String()...)
case reflect.Chan:
if t.ChanDir() == reflect.RecvDir {
b = append(b, "<-"...)
}
b = append(b, "chan"...)
if t.ChanDir() == reflect.SendDir {
b = append(b, "<-"...)
}
b = append(b, ' ')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Func:
if !elideFunc {
b = append(b, "func"...)
}
b = append(b, '(')
for i := 0; i < t.NumIn(); i++ {
if i > 0 {
b = append(b, ", "...)
}
if i == t.NumIn()-1 && t.IsVariadic() {
b = append(b, "..."...)
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
} else {
b = appendTypeName(b, t.In(i), qualified, false)
}
}
b = append(b, ')')
switch t.NumOut() {
case 0:
// Do nothing
case 1:
b = append(b, ' ')
b = appendTypeName(b, t.Out(0), qualified, false)
default:
b = append(b, " ("...)
for i := 0; i < t.NumOut(); i++ {
if i > 0 {
b = append(b, ", "...)
}
b = appendTypeName(b, t.Out(i), qualified, false)
}
b = append(b, ')')
}
case reflect.Struct:
b = append(b, "struct{ "...)
for i := 0; i < t.NumField(); i++ {
if i > 0 {
b = append(b, "; "...)
}
sf := t.Field(i)
if !sf.Anonymous {
if qualified && sf.PkgPath != "" {
b = append(b, '"')
b = append(b, sf.PkgPath...)
b = append(b, '"')
b = append(b, '.')
}
b = append(b, sf.Name...)
b = append(b, ' ')
}
b = appendTypeName(b, sf.Type, qualified, false)
if sf.Tag != "" {
b = append(b, ' ')
b = strconv.AppendQuote(b, string(sf.Tag))
}
}
if b[len(b)-1] == ' ' {
b = b[:len(b)-1]
} else {
b = append(b, ' ')
}
b = append(b, '}')
case reflect.Slice, reflect.Array:
b = append(b, '[')
if k == reflect.Array {
b = strconv.AppendUint(b, uint64(t.Len()), 10)
}
b = append(b, ']')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Map:
b = append(b, "map["...)
b = appendTypeName(b, t.Key(), qualified, false)
b = append(b, ']')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Ptr:
b = append(b, '*')
b = appendTypeName(b, t.Elem(), qualified, false)
case reflect.Interface:
b = append(b, "interface{ "...)
for i := 0; i < t.NumMethod(); i++ {
if i > 0 {
b = append(b, "; "...)
}
m := t.Method(i)
if qualified && m.PkgPath != "" {
b = append(b, '"')
b = append(b, m.PkgPath...)
b = append(b, '"')
b = append(b, '.')
}
b = append(b, m.Name...)
b = appendTypeName(b, m.Type, qualified, true)
}
if b[len(b)-1] == ' ' {
b = b[:len(b)-1]
} else {
b = append(b, ' ')
}
b = append(b, '}')
default:
panic("invalid kind: " + k.String())
}
return b
}

View File

@ -0,0 +1,34 @@
// Copyright 2018, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
package value
import "reflect"
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
type Pointer struct {
p uintptr
t reflect.Type
}
// PointerOf returns a Pointer from v, which must be a
// reflect.Ptr, reflect.Slice, or reflect.Map.
func PointerOf(v reflect.Value) Pointer {
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
// assumes that the GC implementation does not use a moving collector.
return Pointer{v.Pointer(), v.Type()}
}
// IsNil reports whether the pointer is nil.
func (p Pointer) IsNil() bool {
return p.p == 0
}
// Uintptr returns the pointer as a uintptr.
func (p Pointer) Uintptr() uintptr {
return p.p
}

View File

@ -0,0 +1,37 @@
// Copyright 2018, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
package value
import (
"reflect"
"unsafe"
)
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
type Pointer struct {
p unsafe.Pointer
t reflect.Type
}
// PointerOf returns a Pointer from v, which must be a
// reflect.Ptr, reflect.Slice, or reflect.Map.
func PointerOf(v reflect.Value) Pointer {
// The proper representation of a pointer is unsafe.Pointer,
// which is necessary if the GC ever uses a moving collector.
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
}
// IsNil reports whether the pointer is nil.
func (p Pointer) IsNil() bool {
return p.p == nil
}
// Uintptr returns the pointer as a uintptr.
func (p Pointer) Uintptr() uintptr {
return uintptr(p.p)
}

View File

@ -0,0 +1,106 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package value
import (
"fmt"
"math"
"reflect"
"sort"
)
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
// The type of each value must be comparable.
func SortKeys(vs []reflect.Value) []reflect.Value {
if len(vs) == 0 {
return vs
}
// Sort the map keys.
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
// Deduplicate keys (fails for NaNs).
vs2 := vs[:1]
for _, v := range vs[1:] {
if isLess(vs2[len(vs2)-1], v) {
vs2 = append(vs2, v)
}
}
return vs2
}
// isLess is a generic function for sorting arbitrary map keys.
// The inputs must be of the same type and must be comparable.
func isLess(x, y reflect.Value) bool {
switch x.Type().Kind() {
case reflect.Bool:
return !x.Bool() && y.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return x.Int() < y.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return x.Uint() < y.Uint()
case reflect.Float32, reflect.Float64:
// NOTE: This does not sort -0 as less than +0
// since Go maps treat -0 and +0 as equal keys.
fx, fy := x.Float(), y.Float()
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
case reflect.Complex64, reflect.Complex128:
cx, cy := x.Complex(), y.Complex()
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
}
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
return x.Pointer() < y.Pointer()
case reflect.String:
return x.String() < y.String()
case reflect.Array:
for i := 0; i < x.Len(); i++ {
if isLess(x.Index(i), y.Index(i)) {
return true
}
if isLess(y.Index(i), x.Index(i)) {
return false
}
}
return false
case reflect.Struct:
for i := 0; i < x.NumField(); i++ {
if isLess(x.Field(i), y.Field(i)) {
return true
}
if isLess(y.Field(i), x.Field(i)) {
return false
}
}
return false
case reflect.Interface:
vx, vy := x.Elem(), y.Elem()
if !vx.IsValid() || !vy.IsValid() {
return !vx.IsValid() && vy.IsValid()
}
tx, ty := vx.Type(), vy.Type()
if tx == ty {
return isLess(x.Elem(), y.Elem())
}
if tx.Kind() != ty.Kind() {
return vx.Kind() < vy.Kind()
}
if tx.String() != ty.String() {
return tx.String() < ty.String()
}
if tx.PkgPath() != ty.PkgPath() {
return tx.PkgPath() < ty.PkgPath()
}
// This can happen in rare situations, so we fallback to just comparing
// the unique pointer for a reflect.Type. This guarantees deterministic
// ordering within a program, but it is obviously not stable.
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
default:
// Must be Func, Map, or Slice; which are not comparable.
panic(fmt.Sprintf("%T is not comparable", x.Type()))
}
}

View File

@ -0,0 +1,48 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package value
import (
"math"
"reflect"
)
// IsZero reports whether v is the zero value.
// This does not rely on Interface and so can be used on unexported fields.
func IsZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return v.Bool() == false
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return math.Float64bits(v.Float()) == 0
case reflect.Complex64, reflect.Complex128:
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
case reflect.String:
return v.String() == ""
case reflect.UnsafePointer:
return v.Pointer() == 0
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
return v.IsNil()
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if !IsZero(v.Index(i)) {
return false
}
}
return true
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if !IsZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

552
vendor/github.com/google/go-cmp/cmp/options.go generated vendored Normal file
View File

@ -0,0 +1,552 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/google/go-cmp/cmp/internal/function"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
type Option interface {
// filter applies all filters and returns the option that remains.
// Each option may only read s.curPath and call s.callTTBFunc.
//
// An Options is returned only if multiple comparers or transformers
// can apply simultaneously and will only contain values of those types
// or sub-Options containing values of those types.
filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
}
// applicableOption represents the following types:
// Fundamental: ignore | validator | *comparer | *transformer
// Grouping: Options
type applicableOption interface {
Option
// apply executes the option, which may mutate s or panic.
apply(s *state, vx, vy reflect.Value)
}
// coreOption represents the following types:
// Fundamental: ignore | validator | *comparer | *transformer
// Filters: *pathFilter | *valuesFilter
type coreOption interface {
Option
isCore()
}
type core struct{}
func (core) isCore() {}
// Options is a list of Option values that also satisfies the Option interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
// on all individual options held within.
type Options []Option
func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
for _, opt := range opts {
switch opt := opt.filter(s, t, vx, vy); opt.(type) {
case ignore:
return ignore{} // Only ignore can short-circuit evaluation
case validator:
out = validator{} // Takes precedence over comparer or transformer
case *comparer, *transformer, Options:
switch out.(type) {
case nil:
out = opt
case validator:
// Keep validator
case *comparer, *transformer, Options:
out = Options{out, opt} // Conflicting comparers or transformers
}
}
}
return out
}
func (opts Options) apply(s *state, _, _ reflect.Value) {
const warning = "ambiguous set of applicable options"
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
var ss []string
for _, opt := range flattenOptions(nil, opts) {
ss = append(ss, fmt.Sprint(opt))
}
set := strings.Join(ss, "\n\t")
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
}
func (opts Options) String() string {
var ss []string
for _, opt := range opts {
ss = append(ss, fmt.Sprint(opt))
}
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
// This filter is called even if a slice element or map entry is missing and
// provides an opportunity to ignore such cases. The filter function must be
// symmetric such that the filter result is identical regardless of whether the
// missing value is from x or y.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
}
if opt := normalizeOption(opt); opt != nil {
return &pathFilter{fnc: f, opt: opt}
}
return nil
}
type pathFilter struct {
core
fnc func(Path) bool
opt Option
}
func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
if f.fnc(s.curPath) {
return f.opt.filter(s, t, vx, vy)
}
return nil
}
func (f pathFilter) String() string {
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If either value is invalid or
// the type of the values is not assignable to T, then this filter implicitly
// returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
// deterministic (i.e., produces the same result when given the same inputs).
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
panic(fmt.Sprintf("invalid values filter function: %T", f))
}
if opt := normalizeOption(opt); opt != nil {
vf := &valuesFilter{fnc: v, opt: opt}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
vf.typ = ti
}
return vf
}
return nil
}
type valuesFilter struct {
core
typ reflect.Type // T
fnc reflect.Value // func(T, T) bool
opt Option
}
func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
return nil
}
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
return f.opt.filter(s, t, vx, vy)
}
return nil
}
func (f valuesFilter) String() string {
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
func Ignore() Option { return ignore{} }
type ignore struct{ core }
func (ignore) isFiltered() bool { return false }
func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
func (ignore) String() string { return "Ignore()" }
// validator is a sentinel Option type to indicate that some options could not
// be evaluated due to unexported fields, missing slice elements, or
// missing map entries. Both values are validator only for unexported fields.
type validator struct{ core }
func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
if !vx.IsValid() || !vy.IsValid() {
return validator{}
}
if !vx.CanInterface() || !vy.CanInterface() {
return validator{}
}
return nil
}
func (validator) apply(s *state, vx, vy reflect.Value) {
// Implies missing slice element or map entry.
if !vx.IsValid() || !vy.IsValid() {
s.report(vx.IsValid() == vy.IsValid(), 0)
return
}
// Unable to Interface implies unexported field without visibility access.
if !vx.CanInterface() || !vy.CanInterface() {
help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
var name string
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
}
} else {
// Unnamed type with unexported fields. Derive PkgPath from field.
var pkgPath string
for i := 0; i < t.NumField() && pkgPath == ""; i++ {
pkgPath = t.Field(i).PkgPath
}
name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
}
panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
}
panic("not reachable")
}
// identRx represents a valid identifier according to the Go specification.
const identRx = `[_\p{L}][_\p{L}\p{N}]*`
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
// type T to those of type R and is implicitly filtered to input values
// assignable to T. The transformer must not mutate T in any way.
//
// To help prevent some cases of infinite recursive cycles applying the
// same transform to the output of itself (e.g., in the case where the
// input and output types are the same), an implicit filter is added such that
// a transformer is applicable only if that exact transformer is not already
// in the tail of the Path since the last non-Transform step.
// For situations where the implicit filter is still insufficient,
// consider using cmpopts.AcyclicTransformer, which adds a filter
// to prevent the transformer from being recursively applied upon itself.
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep (and eventually shown in the Diff output).
// The name must be a valid identifier or qualified identifier in Go syntax.
// If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
name = function.NameOf(v)
if !identsRx.MatchString(name) {
name = "λ" // Lambda-symbol as placeholder name
}
} else if !identsRx.MatchString(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
tr.typ = ti
}
return tr
}
type transformer struct {
core
name string
typ reflect.Type // T
fnc reflect.Value // func(T) R
}
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
for i := len(s.curPath) - 1; i >= 0; i-- {
if t, ok := s.curPath[i].(Transform); !ok {
break // Hit most recent non-Transform step
} else if tr == t.trans {
return nil // Cannot directly use same Transform
}
}
if tr.typ == nil || t.AssignableTo(tr.typ) {
return tr
}
return nil
}
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
vvx := s.callTRFunc(tr.fnc, vx, step)
vvy := s.callTRFunc(tr.fnc, vy, step)
step.vx, step.vy = vvx, vvy
s.compareAny(step)
}
func (tr transformer) String() string {
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
}
// Comparer returns an Option that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
// filtered to input values assignable to T. If T is an interface, it is
// possible that f is called with two values of different concrete types that
// both implement T.
//
// The equality function must be:
// • Symmetric: equal(x, y) == equal(y, x)
// • Deterministic: equal(x, y) == equal(x, y)
// • Pure: equal(x, y) does not modify x or y
func Comparer(f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
panic(fmt.Sprintf("invalid comparer function: %T", f))
}
cm := &comparer{fnc: v}
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
cm.typ = ti
}
return cm
}
type comparer struct {
core
typ reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
if cm.typ == nil || t.AssignableTo(cm.typ) {
return cm
}
return nil
}
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
eq := s.callTTBFunc(cm.fnc, vx, vy)
s.report(eq, reportByFunc)
}
func (cm comparer) String() string {
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
}
// Exporter returns an Option that specifies whether Equal is allowed to
// introspect into the unexported fields of certain struct types.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// In many cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
func Exporter(f func(reflect.Type) bool) Option {
if !supportExporters {
panic("Exporter is not supported on purego builds")
}
return exporter(f)
}
type exporter func(reflect.Type) bool
func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
panic("not implemented")
}
// AllowUnexported returns an Options that allows Equal to forcibly introspect
// unexported fields of the specified struct types.
//
// See Exporter for the proper use of this option.
func AllowUnexported(types ...interface{}) Option {
m := make(map[reflect.Type]bool)
for _, typ := range types {
t := reflect.TypeOf(typ)
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
m[t] = true
}
return exporter(func(t reflect.Type) bool { return m[t] })
}
// Result represents the comparison result for a single node and
// is provided by cmp when calling Result (see Reporter).
type Result struct {
_ [0]func() // Make Result incomparable
flags resultFlags
}
// Equal reports whether the node was determined to be equal or not.
// As a special case, ignored nodes are considered equal.
func (r Result) Equal() bool {
return r.flags&(reportEqual|reportByIgnore) != 0
}
// ByIgnore reports whether the node is equal because it was ignored.
// This never reports true if Equal reports false.
func (r Result) ByIgnore() bool {
return r.flags&reportByIgnore != 0
}
// ByMethod reports whether the Equal method determined equality.
func (r Result) ByMethod() bool {
return r.flags&reportByMethod != 0
}
// ByFunc reports whether a Comparer function determined equality.
func (r Result) ByFunc() bool {
return r.flags&reportByFunc != 0
}
// ByCycle reports whether a reference cycle was detected.
func (r Result) ByCycle() bool {
return r.flags&reportByCycle != 0
}
type resultFlags uint
const (
_ resultFlags = (1 << iota) / 2
reportEqual
reportUnequal
reportByIgnore
reportByMethod
reportByFunc
reportByCycle
)
// Reporter is an Option that can be passed to Equal. When Equal traverses
// the value trees, it calls PushStep as it descends into each node in the
// tree and PopStep as it ascend out of the node. The leaves of the tree are
// either compared (determined to be equal or not equal) or ignored and reported
// as such by calling the Report method.
func Reporter(r interface {
// PushStep is called when a tree-traversal operation is performed.
// The PathStep itself is only valid until the step is popped.
// The PathStep.Values are valid for the duration of the entire traversal
// and must not be mutated.
//
// Equal always calls PushStep at the start to provide an operation-less
// PathStep used to report the root values.
//
// Within a slice, the exact set of inserted, removed, or modified elements
// is unspecified and may change in future implementations.
// The entries of a map are iterated through in an unspecified order.
PushStep(PathStep)
// Report is called exactly once on leaf nodes to report whether the
// comparison identified the node as equal, unequal, or ignored.
// A leaf node is one that is immediately preceded by and followed by
// a pair of PushStep and PopStep calls.
Report(Result)
// PopStep ascends back up the value tree.
// There is always a matching pop call for every push call.
PopStep()
}) Option {
return reporter{r}
}
type reporter struct{ reporterIface }
type reporterIface interface {
PushStep(PathStep)
Report(Result)
PopStep()
}
func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
panic("not implemented")
}
// normalizeOption normalizes the input options such that all Options groups
// are flattened and groups with a single element are reduced to that element.
// Only coreOptions and Options containing coreOptions are allowed.
func normalizeOption(src Option) Option {
switch opts := flattenOptions(nil, Options{src}); len(opts) {
case 0:
return nil
case 1:
return opts[0]
default:
return opts
}
}
// flattenOptions copies all options in src to dst as a flat list.
// Only coreOptions and Options containing coreOptions are allowed.
func flattenOptions(dst, src Options) Options {
for _, opt := range src {
switch opt := opt.(type) {
case nil:
continue
case Options:
dst = flattenOptions(dst, opt)
case coreOption:
dst = append(dst, opt)
default:
panic(fmt.Sprintf("invalid option type: %T", opt))
}
}
return dst
}

378
vendor/github.com/google/go-cmp/cmp/path.go generated vendored Normal file
View File

@ -0,0 +1,378 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"fmt"
"reflect"
"strings"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp/internal/value"
)
// Path is a list of PathSteps describing the sequence of operations to get
// from some root type to the current position in the value tree.
// The first Path element is always an operation-less PathStep that exists
// simply to identify the initial type.
//
// When traversing structs with embedded structs, the embedded struct will
// always be accessed as a field before traversing the fields of the
// embedded struct themselves. That is, an exported field from the
// embedded struct will never be accessed directly from the parent struct.
type Path []PathStep
// PathStep is a union-type for specific operations to traverse
// a value's tree structure. Users of this package never need to implement
// these types as values of this type will be returned by this package.
//
// Implementations of this interface are
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
type PathStep interface {
String() string
// Type is the resulting type after performing the path step.
Type() reflect.Type
// Values is the resulting values after performing the path step.
// The type of each valid value is guaranteed to be identical to Type.
//
// In some cases, one or both may be invalid or have restrictions:
// • For StructField, both are not interface-able if the current field
// is unexported and the struct type is not explicitly permitted by
// an Exporter to traverse unexported fields.
// • For SliceIndex, one may be invalid if an element is missing from
// either the x or y slice.
// • For MapIndex, one may be invalid if an entry is missing from
// either the x or y map.
//
// The provided values must not be mutated.
Values() (vx, vy reflect.Value)
}
var (
_ PathStep = StructField{}
_ PathStep = SliceIndex{}
_ PathStep = MapIndex{}
_ PathStep = Indirect{}
_ PathStep = TypeAssertion{}
_ PathStep = Transform{}
)
func (pa *Path) push(s PathStep) {
*pa = append(*pa, s)
}
func (pa *Path) pop() {
*pa = (*pa)[:len(*pa)-1]
}
// Last returns the last PathStep in the Path.
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
func (pa Path) Last() PathStep {
return pa.Index(-1)
}
// Index returns the ith step in the Path and supports negative indexing.
// A negative index starts counting from the tail of the Path such that -1
// refers to the last step, -2 refers to the second-to-last step, and so on.
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
func (pa Path) Index(i int) PathStep {
if i < 0 {
i = len(pa) + i
}
if i < 0 || i >= len(pa) {
return pathStep{}
}
return pa[i]
}
// String returns the simplified path to a node.
// The simplified path only contains struct field accesses.
//
// For example:
// MyMap.MySlices.MyField
func (pa Path) String() string {
var ss []string
for _, s := range pa {
if _, ok := s.(StructField); ok {
ss = append(ss, s.String())
}
}
return strings.TrimPrefix(strings.Join(ss, ""), ".")
}
// GoString returns the path to a specific node using Go syntax.
//
// For example:
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
func (pa Path) GoString() string {
var ssPre, ssPost []string
var numIndirect int
for i, s := range pa {
var nextStep PathStep
if i+1 < len(pa) {
nextStep = pa[i+1]
}
switch s := s.(type) {
case Indirect:
numIndirect++
pPre, pPost := "(", ")"
switch nextStep.(type) {
case Indirect:
continue // Next step is indirection, so let them batch up
case StructField:
numIndirect-- // Automatic indirection on struct fields
case nil:
pPre, pPost = "", "" // Last step; no need for parenthesis
}
if numIndirect > 0 {
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
ssPost = append(ssPost, pPost)
}
numIndirect = 0
continue
case Transform:
ssPre = append(ssPre, s.trans.name+"(")
ssPost = append(ssPost, ")")
continue
}
ssPost = append(ssPost, s.String())
}
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
}
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
}
type pathStep struct {
typ reflect.Type
vx, vy reflect.Value
}
func (ps pathStep) Type() reflect.Type { return ps.typ }
func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
func (ps pathStep) String() string {
if ps.typ == nil {
return "<nil>"
}
s := ps.typ.String()
if s == "" || strings.ContainsAny(s, "{}\n") {
return "root" // Type too simple or complex to print
}
return fmt.Sprintf("{%s}", s)
}
// StructField represents a struct field access on a field called Name.
type StructField struct{ *structField }
type structField struct {
pathStep
name string
idx int
// These fields are used for forcibly accessing an unexported field.
// pvx, pvy, and field are only valid if unexported is true.
unexported bool
mayForce bool // Forcibly allow visibility
paddr bool // Was parent addressable?
pvx, pvy reflect.Value // Parent values (always addressable)
field reflect.StructField // Field information
}
func (sf StructField) Type() reflect.Type { return sf.typ }
func (sf StructField) Values() (vx, vy reflect.Value) {
if !sf.unexported {
return sf.vx, sf.vy // CanInterface reports true
}
// Forcibly obtain read-write access to an unexported struct field.
if sf.mayForce {
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
return vx, vy // CanInterface reports true
}
return sf.vx, sf.vy // CanInterface reports false
}
func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
// Name is the field name.
func (sf StructField) Name() string { return sf.name }
// Index is the index of the field in the parent struct type.
// See reflect.Type.Field.
func (sf StructField) Index() int { return sf.idx }
// SliceIndex is an index operation on a slice or array at some index Key.
type SliceIndex struct{ *sliceIndex }
type sliceIndex struct {
pathStep
xkey, ykey int
isSlice bool // False for reflect.Array
}
func (si SliceIndex) Type() reflect.Type { return si.typ }
func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
func (si SliceIndex) String() string {
switch {
case si.xkey == si.ykey:
return fmt.Sprintf("[%d]", si.xkey)
case si.ykey == -1:
// [5->?] means "I don't know where X[5] went"
return fmt.Sprintf("[%d->?]", si.xkey)
case si.xkey == -1:
// [?->3] means "I don't know where Y[3] came from"
return fmt.Sprintf("[?->%d]", si.ykey)
default:
// [5->3] means "X[5] moved to Y[3]"
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
}
}
// Key is the index key; it may return -1 if in a split state
func (si SliceIndex) Key() int {
if si.xkey != si.ykey {
return -1
}
return si.xkey
}
// SplitKeys are the indexes for indexing into slices in the
// x and y values, respectively. These indexes may differ due to the
// insertion or removal of an element in one of the slices, causing
// all of the indexes to be shifted. If an index is -1, then that
// indicates that the element does not exist in the associated slice.
//
// Key is guaranteed to return -1 if and only if the indexes returned
// by SplitKeys are not the same. SplitKeys will never return -1 for
// both indexes.
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
// MapIndex is an index operation on a map at some index Key.
type MapIndex struct{ *mapIndex }
type mapIndex struct {
pathStep
key reflect.Value
}
func (mi MapIndex) Type() reflect.Type { return mi.typ }
func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
// Key is the value of the map key.
func (mi MapIndex) Key() reflect.Value { return mi.key }
// Indirect represents pointer indirection on the parent type.
type Indirect struct{ *indirect }
type indirect struct {
pathStep
}
func (in Indirect) Type() reflect.Type { return in.typ }
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
func (in Indirect) String() string { return "*" }
// TypeAssertion represents a type assertion on an interface.
type TypeAssertion struct{ *typeAssertion }
type typeAssertion struct {
pathStep
}
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
// Transform is a transformation from the parent type to the current type.
type Transform struct{ *transform }
type transform struct {
pathStep
trans *transformer
}
func (tf Transform) Type() reflect.Type { return tf.typ }
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
// Name is the name of the Transformer.
func (tf Transform) Name() string { return tf.trans.name }
// Func is the function pointer to the transformer function.
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
// Option returns the originally constructed Transformer option.
// The == operator can be used to detect the exact option used.
func (tf Transform) Option() Option { return tf.trans }
// pointerPath represents a dual-stack of pointers encountered when
// recursively traversing the x and y values. This data structure supports
// detection of cycles and determining whether the cycles are equal.
// In Go, cycles can occur via pointers, slices, and maps.
//
// The pointerPath uses a map to represent a stack; where descension into a
// pointer pushes the address onto the stack, and ascension from a pointer
// pops the address from the stack. Thus, when traversing into a pointer from
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
// by checking whether the pointer has already been visited. The cycle detection
// uses a separate stack for the x and y values.
//
// If a cycle is detected we need to determine whether the two pointers
// should be considered equal. The definition of equality chosen by Equal
// requires two graphs to have the same structure. To determine this, both the
// x and y values must have a cycle where the previous pointers were also
// encountered together as a pair.
//
// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
// MapIndex with pointer information for the x and y values.
// Suppose px and py are two pointers to compare, we then search the
// Path for whether px was ever encountered in the Path history of x, and
// similarly so with py. If either side has a cycle, the comparison is only
// equal if both px and py have a cycle resulting from the same PathStep.
//
// Using a map as a stack is more performant as we can perform cycle detection
// in O(1) instead of O(N) where N is len(Path).
type pointerPath struct {
// mx is keyed by x pointers, where the value is the associated y pointer.
mx map[value.Pointer]value.Pointer
// my is keyed by y pointers, where the value is the associated x pointer.
my map[value.Pointer]value.Pointer
}
func (p *pointerPath) Init() {
p.mx = make(map[value.Pointer]value.Pointer)
p.my = make(map[value.Pointer]value.Pointer)
}
// Push indicates intent to descend into pointers vx and vy where
// visited reports whether either has been seen before. If visited before,
// equal reports whether both pointers were encountered together.
// Pop must be called if and only if the pointers were never visited.
//
// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
// and be non-nil.
func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
px := value.PointerOf(vx)
py := value.PointerOf(vy)
_, ok1 := p.mx[px]
_, ok2 := p.my[py]
if ok1 || ok2 {
equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
return equal, true
}
p.mx[px] = py
p.my[py] = px
return false, false
}
// Pop ascends from pointers vx and vy.
func (p pointerPath) Pop(vx, vy reflect.Value) {
delete(p.mx, value.PointerOf(vx))
delete(p.my, value.PointerOf(vy))
}
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}

54
vendor/github.com/google/go-cmp/cmp/report.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
// defaultReporter implements the reporter interface.
//
// As Equal serially calls the PushStep, Report, and PopStep methods, the
// defaultReporter constructs a tree-based representation of the compared value
// and the result of each comparison (see valueNode).
//
// When the String method is called, the FormatDiff method transforms the
// valueNode tree into a textNode tree, which is a tree-based representation
// of the textual output (see textNode).
//
// Lastly, the textNode.String method produces the final report as a string.
type defaultReporter struct {
root *valueNode
curr *valueNode
}
func (r *defaultReporter) PushStep(ps PathStep) {
r.curr = r.curr.PushStep(ps)
if r.root == nil {
r.root = r.curr
}
}
func (r *defaultReporter) Report(rs Result) {
r.curr.Report(rs)
}
func (r *defaultReporter) PopStep() {
r.curr = r.curr.PopStep()
}
// String provides a full report of the differences detected as a structured
// literal in pseudo-Go syntax. String may only be called after the entire tree
// has been traversed.
func (r *defaultReporter) String() string {
assert(r.root != nil && r.curr == nil)
if r.root.NumDiff == 0 {
return ""
}
ptrs := new(pointerReferences)
text := formatOptions{}.FormatDiff(r.root, ptrs)
resolveReferences(text)
return text.String()
}
func assert(ok bool) {
if !ok {
panic("assertion failure")
}
}

435
vendor/github.com/google/go-cmp/cmp/report_compare.go generated vendored Normal file
View File

@ -0,0 +1,435 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"fmt"
"reflect"
"github.com/google/go-cmp/cmp/internal/value"
)
// numContextRecords is the number of surrounding equal records to print.
const numContextRecords = 2
type diffMode byte
const (
diffUnknown diffMode = 0
diffIdentical diffMode = ' '
diffRemoved diffMode = '-'
diffInserted diffMode = '+'
)
type typeMode int
const (
// emitType always prints the type.
emitType typeMode = iota
// elideType never prints the type.
elideType
// autoType prints the type only for composite kinds
// (i.e., structs, slices, arrays, and maps).
autoType
)
type formatOptions struct {
// DiffMode controls the output mode of FormatDiff.
//
// If diffUnknown, then produce a diff of the x and y values.
// If diffIdentical, then emit values as if they were equal.
// If diffRemoved, then only emit x values (ignoring y values).
// If diffInserted, then only emit y values (ignoring x values).
DiffMode diffMode
// TypeMode controls whether to print the type for the current node.
//
// As a general rule of thumb, we always print the type of the next node
// after an interface, and always elide the type of the next node after
// a slice or map node.
TypeMode typeMode
// formatValueOptions are options specific to printing reflect.Values.
formatValueOptions
}
func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
opts.DiffMode = d
return opts
}
func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
opts.TypeMode = t
return opts
}
func (opts formatOptions) WithVerbosity(level int) formatOptions {
opts.VerbosityLevel = level
opts.LimitVerbosity = true
return opts
}
func (opts formatOptions) verbosity() uint {
switch {
case opts.VerbosityLevel < 0:
return 0
case opts.VerbosityLevel > 16:
return 16 // some reasonable maximum to avoid shift overflow
default:
return uint(opts.VerbosityLevel)
}
}
const maxVerbosityPreset = 6
// verbosityPreset modifies the verbosity settings given an index
// between 0 and maxVerbosityPreset, inclusive.
func verbosityPreset(opts formatOptions, i int) formatOptions {
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
if i > 0 {
opts.AvoidStringer = true
}
if i >= maxVerbosityPreset {
opts.PrintAddresses = true
opts.QualifiedNames = true
}
return opts
}
// FormatDiff converts a valueNode tree into a textNode tree, where the later
// is a textual representation of the differences detected in the former.
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
if opts.DiffMode == diffIdentical {
opts = opts.WithVerbosity(1)
} else if opts.verbosity() < 3 {
opts = opts.WithVerbosity(3)
}
// Check whether we have specialized formatting for this node.
// This is not necessary, but helpful for producing more readable outputs.
if opts.CanFormatDiffSlice(v) {
return opts.FormatDiffSlice(v)
}
var parentKind reflect.Kind
if v.parent != nil && v.parent.TransformerName == "" {
parentKind = v.parent.Type.Kind()
}
// For leaf nodes, format the value based on the reflect.Values alone.
// As a special case, treat equal []byte as a leaf nodes.
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
if v.MaxDepth == 0 || isEqualBytes {
switch opts.DiffMode {
case diffUnknown, diffIdentical:
// Format Equal.
if v.NumDiff == 0 {
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
if v.NumIgnored > 0 && v.NumSame == 0 {
return textEllipsis
} else if outx.Len() < outy.Len() {
return outx
} else {
return outy
}
}
// Format unequal.
assert(opts.DiffMode == diffUnknown)
var list textList
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
}
if outx != nil {
list = append(list, textRecord{Diff: '-', Value: outx})
}
if outy != nil {
list = append(list, textRecord{Diff: '+', Value: outy})
}
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
case diffRemoved:
return opts.FormatValue(v.ValueX, parentKind, ptrs)
case diffInserted:
return opts.FormatValue(v.ValueY, parentKind, ptrs)
default:
panic("invalid diff mode")
}
}
// Register slice element to support cycle detection.
if parentKind == reflect.Slice {
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
defer ptrs.Pop()
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
}
// Descend into the child value node.
if v.TransformerName != "" {
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
return opts.FormatType(v.Type, out)
} else {
switch k := v.Type.Kind(); k {
case reflect.Struct, reflect.Array, reflect.Slice:
out = opts.formatDiffList(v.Records, k, ptrs)
out = opts.FormatType(v.Type, out)
case reflect.Map:
// Register map to support cycle detection.
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
defer ptrs.Pop()
out = opts.formatDiffList(v.Records, k, ptrs)
out = wrapTrunkReferences(ptrRefs, out)
out = opts.FormatType(v.Type, out)
case reflect.Ptr:
// Register pointer to support cycle detection.
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
defer ptrs.Pop()
out = opts.FormatDiff(v.Value, ptrs)
out = wrapTrunkReferences(ptrRefs, out)
out = &textWrap{Prefix: "&", Value: out}
case reflect.Interface:
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
default:
panic(fmt.Sprintf("%v cannot have children", k))
}
return out
}
}
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
// Derive record name based on the data structure kind.
var name string
var formatKey func(reflect.Value) string
switch k {
case reflect.Struct:
name = "field"
opts = opts.WithTypeMode(autoType)
formatKey = func(v reflect.Value) string { return v.String() }
case reflect.Slice, reflect.Array:
name = "element"
opts = opts.WithTypeMode(elideType)
formatKey = func(reflect.Value) string { return "" }
case reflect.Map:
name = "entry"
opts = opts.WithTypeMode(elideType)
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
}
maxLen := -1
if opts.LimitVerbosity {
if opts.DiffMode == diffIdentical {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
} else {
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
}
opts.VerbosityLevel--
}
// Handle unification.
switch opts.DiffMode {
case diffIdentical, diffRemoved, diffInserted:
var list textList
var deferredEllipsis bool // Add final "..." to indicate records were dropped
for _, r := range recs {
if len(list) == maxLen {
deferredEllipsis = true
break
}
// Elide struct fields that are zero value.
if k == reflect.Struct {
var isZero bool
switch opts.DiffMode {
case diffIdentical:
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
case diffRemoved:
isZero = value.IsZero(r.Value.ValueX)
case diffInserted:
isZero = value.IsZero(r.Value.ValueY)
}
if isZero {
continue
}
}
// Elide ignored nodes.
if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
if !deferredEllipsis {
list.AppendEllipsis(diffStats{})
}
continue
}
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
}
}
if deferredEllipsis {
list.AppendEllipsis(diffStats{})
}
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
case diffUnknown:
default:
panic("invalid diff mode")
}
// Handle differencing.
var numDiffs int
var list textList
var keys []reflect.Value // invariant: len(list) == len(keys)
groups := coalesceAdjacentRecords(name, recs)
maxGroup := diffStats{Name: name}
for i, ds := range groups {
if maxLen >= 0 && numDiffs >= maxLen {
maxGroup = maxGroup.Append(ds)
continue
}
// Handle equal records.
if ds.NumDiff() == 0 {
// Compute the number of leading and trailing records to print.
var numLo, numHi int
numEqual := ds.NumIgnored + ds.NumIdentical
for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
break
}
numLo++
}
for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
break
}
numHi++
}
if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
numHi++ // Avoid pointless coalescing of a single equal record
}
// Format the equal values.
for _, r := range recs[:numLo] {
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
}
if numEqual > numLo+numHi {
ds.NumIdentical -= numLo + numHi
list.AppendEllipsis(ds)
for len(keys) < len(list) {
keys = append(keys, reflect.Value{})
}
}
for _, r := range recs[numEqual-numHi : numEqual] {
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
}
recs = recs[numEqual:]
continue
}
// Handle unequal records.
for _, r := range recs[:ds.NumDiff()] {
switch {
case opts.CanFormatDiffSlice(r.Value):
out := opts.FormatDiffSlice(r.Value)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
case r.Value.NumChildren == r.Value.MaxDepth:
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
opts2 := verbosityPreset(opts, i)
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
}
if outx != nil {
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
keys = append(keys, r.Key)
}
if outy != nil {
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
keys = append(keys, r.Key)
}
default:
out := opts.FormatDiff(r.Value, ptrs)
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
keys = append(keys, r.Key)
}
}
recs = recs[ds.NumDiff():]
numDiffs += ds.NumDiff()
}
if maxGroup.IsZero() {
assert(len(recs) == 0)
} else {
list.AppendEllipsis(maxGroup)
for len(keys) < len(list) {
keys = append(keys, reflect.Value{})
}
}
assert(len(list) == len(keys))
// For maps, the default formatting logic uses fmt.Stringer which may
// produce ambiguous output. Avoid calling String to disambiguate.
if k == reflect.Map {
var ambiguous bool
seenKeys := map[string]reflect.Value{}
for i, currKey := range keys {
if currKey.IsValid() {
strKey := list[i].Key
prevKey, seen := seenKeys[strKey]
if seen && prevKey.CanInterface() && currKey.CanInterface() {
ambiguous = prevKey.Interface() != currKey.Interface()
if ambiguous {
break
}
}
seenKeys[strKey] = currKey
}
}
if ambiguous {
for i, k := range keys {
if k.IsValid() {
list[i].Key = formatMapKey(k, true, ptrs)
}
}
}
}
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
}
// coalesceAdjacentRecords coalesces the list of records into groups of
// adjacent equal, or unequal counts.
func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
var prevCase int // Arbitrary index into which case last occurred
lastStats := func(i int) *diffStats {
if prevCase != i {
groups = append(groups, diffStats{Name: name})
prevCase = i
}
return &groups[len(groups)-1]
}
for _, r := range recs {
switch rv := r.Value; {
case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
lastStats(1).NumIgnored++
case rv.NumDiff == 0:
lastStats(1).NumIdentical++
case rv.NumDiff > 0 && !rv.ValueY.IsValid():
lastStats(2).NumRemoved++
case rv.NumDiff > 0 && !rv.ValueX.IsValid():
lastStats(2).NumInserted++
default:
lastStats(2).NumModified++
}
}
return groups
}

View File

@ -0,0 +1,264 @@
// Copyright 2020, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp/internal/flags"
"github.com/google/go-cmp/cmp/internal/value"
)
const (
pointerDelimPrefix = "⟪"
pointerDelimSuffix = "⟫"
)
// formatPointer prints the address of the pointer.
func formatPointer(p value.Pointer, withDelims bool) string {
v := p.Uintptr()
if flags.Deterministic {
v = 0xdeadf00f // Only used for stable testing purposes
}
if withDelims {
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
}
return formatHex(uint64(v))
}
// pointerReferences is a stack of pointers visited so far.
type pointerReferences [][2]value.Pointer
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
if deref && vx.IsValid() {
vx = vx.Addr()
}
if deref && vy.IsValid() {
vy = vy.Addr()
}
switch d {
case diffUnknown, diffIdentical:
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
case diffRemoved:
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
case diffInserted:
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
}
*ps = append(*ps, pp)
return pp
}
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
p = value.PointerOf(v)
for _, pp := range *ps {
if p == pp[0] || p == pp[1] {
return p, true
}
}
*ps = append(*ps, [2]value.Pointer{p, p})
return p, false
}
func (ps *pointerReferences) Pop() {
*ps = (*ps)[:len(*ps)-1]
}
// trunkReferences is metadata for a textNode indicating that the sub-tree
// represents the value for either pointer in a pair of references.
type trunkReferences struct{ pp [2]value.Pointer }
// trunkReference is metadata for a textNode indicating that the sub-tree
// represents the value for the given pointer reference.
type trunkReference struct{ p value.Pointer }
// leafReference is metadata for a textNode indicating that the value is
// truncated as it refers to another part of the tree (i.e., a trunk).
type leafReference struct{ p value.Pointer }
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
switch {
case pp[0].IsNil():
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
case pp[1].IsNil():
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
case pp[0] == pp[1]:
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
default:
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
}
}
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
var prefix string
if printAddress {
prefix = formatPointer(p, true)
}
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
}
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
var prefix string
if printAddress {
prefix = formatPointer(p, true)
}
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
}
// resolveReferences walks the textNode tree searching for any leaf reference
// metadata and resolves each against the corresponding trunk references.
// Since pointer addresses in memory are not particularly readable to the user,
// it replaces each pointer value with an arbitrary and unique reference ID.
func resolveReferences(s textNode) {
var walkNodes func(textNode, func(textNode))
walkNodes = func(s textNode, f func(textNode)) {
f(s)
switch s := s.(type) {
case *textWrap:
walkNodes(s.Value, f)
case textList:
for _, r := range s {
walkNodes(r.Value, f)
}
}
}
// Collect all trunks and leaves with reference metadata.
var trunks, leaves []*textWrap
walkNodes(s, func(s textNode) {
if s, ok := s.(*textWrap); ok {
switch s.Metadata.(type) {
case leafReference:
leaves = append(leaves, s)
case trunkReference, trunkReferences:
trunks = append(trunks, s)
}
}
})
// No leaf references to resolve.
if len(leaves) == 0 {
return
}
// Collect the set of all leaf references to resolve.
leafPtrs := make(map[value.Pointer]bool)
for _, leaf := range leaves {
leafPtrs[leaf.Metadata.(leafReference).p] = true
}
// Collect the set of trunk pointers that are always paired together.
// This allows us to assign a single ID to both pointers for brevity.
// If a pointer in a pair ever occurs by itself or as a different pair,
// then the pair is broken.
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
unpair := func(p value.Pointer) {
if !pairedTrunkPtrs[p].IsNil() {
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
}
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
}
for _, trunk := range trunks {
switch p := trunk.Metadata.(type) {
case trunkReference:
unpair(p.p) // standalone pointer cannot be part of a pair
case trunkReferences:
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
switch {
case !ok0 && !ok1:
// Register the newly seen pair.
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
// Exact pair already seen; do nothing.
default:
// Pair conflicts with some other pair; break all pairs.
unpair(p.pp[0])
unpair(p.pp[1])
}
}
}
// Correlate each pointer referenced by leaves to a unique identifier,
// and print the IDs for each trunk that matches those pointers.
var nextID uint
ptrIDs := make(map[value.Pointer]uint)
newID := func() uint {
id := nextID
nextID++
return id
}
for _, trunk := range trunks {
switch p := trunk.Metadata.(type) {
case trunkReference:
if print := leafPtrs[p.p]; print {
id, ok := ptrIDs[p.p]
if !ok {
id = newID()
ptrIDs[p.p] = id
}
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
}
case trunkReferences:
print0 := leafPtrs[p.pp[0]]
print1 := leafPtrs[p.pp[1]]
if print0 || print1 {
id0, ok0 := ptrIDs[p.pp[0]]
id1, ok1 := ptrIDs[p.pp[1]]
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
if isPair {
var id uint
assert(ok0 == ok1) // must be seen together or not at all
if ok0 {
assert(id0 == id1) // must have the same ID
id = id0
} else {
id = newID()
ptrIDs[p.pp[0]] = id
ptrIDs[p.pp[1]] = id
}
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
} else {
if print0 && !ok0 {
id0 = newID()
ptrIDs[p.pp[0]] = id0
}
if print1 && !ok1 {
id1 = newID()
ptrIDs[p.pp[1]] = id1
}
switch {
case print0 && print1:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
case print0:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
case print1:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
}
}
}
}
}
// Update all leaf references with the unique identifier.
for _, leaf := range leaves {
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
}
}
}
func formatReference(id uint) string {
return fmt.Sprintf("ref#%d", id)
}
func updateReferencePrefix(prefix, ref string) string {
if prefix == "" {
return pointerDelimPrefix + ref + pointerDelimSuffix
}
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
return pointerDelimPrefix + ref + ": " + suffix
}

407
vendor/github.com/google/go-cmp/cmp/report_reflect.go generated vendored Normal file
View File

@ -0,0 +1,407 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp/internal/value"
)
type formatValueOptions struct {
// AvoidStringer controls whether to avoid calling custom stringer
// methods like error.Error or fmt.Stringer.String.
AvoidStringer bool
// PrintAddresses controls whether to print the address of all pointers,
// slice elements, and maps.
PrintAddresses bool
// QualifiedNames controls whether FormatType uses the fully qualified name
// (including the full package path as opposed to just the package name).
QualifiedNames bool
// VerbosityLevel controls the amount of output to produce.
// A higher value produces more output. A value of zero or lower produces
// no output (represented using an ellipsis).
// If LimitVerbosity is false, then the level is treated as infinite.
VerbosityLevel int
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
LimitVerbosity bool
}
// FormatType prints the type as if it were wrapping s.
// This may return s as-is depending on the current type and TypeMode mode.
func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
// Check whether to emit the type or not.
switch opts.TypeMode {
case autoType:
switch t.Kind() {
case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
if s.Equal(textNil) {
return s
}
default:
return s
}
if opts.DiffMode == diffIdentical {
return s // elide type for identical nodes
}
case elideType:
return s
}
// Determine the type label, applying special handling for unnamed types.
typeName := value.TypeString(t, opts.QualifiedNames)
if t.Name() == "" {
// According to Go grammar, certain type literals contain symbols that
// do not strongly bind to the next lexicographical token (e.g., *T).
switch t.Kind() {
case reflect.Chan, reflect.Func, reflect.Ptr:
typeName = "(" + typeName + ")"
}
}
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
}
// wrapParens wraps s with a set of parenthesis, but avoids it if the
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
// It handles unwrapping one level of pointer-reference nodes.
func wrapParens(s textNode) textNode {
var refNode *textWrap
if s2, ok := s.(*textWrap); ok {
// Unwrap a single pointer reference node.
switch s2.Metadata.(type) {
case leafReference, trunkReference, trunkReferences:
refNode = s2
if s3, ok := refNode.Value.(*textWrap); ok {
s2 = s3
}
}
// Already has delimiters that make parenthesis unnecessary.
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
if hasParens || hasBraces {
return s
}
}
if refNode != nil {
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
return s
}
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
}
// FormatValue prints the reflect.Value, taking extra care to avoid descending
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
if !v.IsValid() {
return nil
}
t := v.Type()
// Check slice element for cycles.
if parentKind == reflect.Slice {
ptrRef, visited := ptrs.Push(v.Addr())
if visited {
return makeLeafReference(ptrRef, false)
}
defer ptrs.Pop()
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
}
// Check whether there is an Error or String method to call.
if !opts.AvoidStringer && v.CanInterface() {
// Avoid calling Error or String methods on nil receivers since many
// implementations crash when doing so.
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
var prefix, strVal string
func() {
// Swallow and ignore any panics from String or Error.
defer func() { recover() }()
switch v := v.Interface().(type) {
case error:
strVal = v.Error()
prefix = "e"
case fmt.Stringer:
strVal = v.String()
prefix = "s"
}
}()
if prefix != "" {
return opts.formatString(prefix, strVal)
}
}
}
// Check whether to explicitly wrap the result with the type.
var skipType bool
defer func() {
if !skipType {
out = opts.FormatType(t, out)
}
}()
switch t.Kind() {
case reflect.Bool:
return textLine(fmt.Sprint(v.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return textLine(fmt.Sprint(v.Int()))
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return textLine(fmt.Sprint(v.Uint()))
case reflect.Uint8:
if parentKind == reflect.Slice || parentKind == reflect.Array {
return textLine(formatHex(v.Uint()))
}
return textLine(fmt.Sprint(v.Uint()))
case reflect.Uintptr:
return textLine(formatHex(v.Uint()))
case reflect.Float32, reflect.Float64:
return textLine(fmt.Sprint(v.Float()))
case reflect.Complex64, reflect.Complex128:
return textLine(fmt.Sprint(v.Complex()))
case reflect.String:
return opts.formatString("", v.String())
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
return textLine(formatPointer(value.PointerOf(v), true))
case reflect.Struct:
var list textList
v := makeAddressable(v) // needed for retrieveUnexportedField
maxLen := v.NumField()
if opts.LimitVerbosity {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
opts.VerbosityLevel--
}
for i := 0; i < v.NumField(); i++ {
vv := v.Field(i)
if value.IsZero(vv) {
continue // Elide fields with zero values
}
if len(list) == maxLen {
list.AppendEllipsis(diffStats{})
break
}
sf := t.Field(i)
if supportExporters && !isExported(sf.Name) {
vv = retrieveUnexportedField(v, sf, true)
}
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
list = append(list, textRecord{Key: sf.Name, Value: s})
}
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
case reflect.Slice:
if v.IsNil() {
return textNil
}
// Check whether this is a []byte of text data.
if t.Elem() == reflect.TypeOf(byte(0)) {
b := v.Bytes()
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
out = opts.formatString("", string(b))
skipType = true
return opts.FormatType(t, out)
}
}
fallthrough
case reflect.Array:
maxLen := v.Len()
if opts.LimitVerbosity {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
opts.VerbosityLevel--
}
var list textList
for i := 0; i < v.Len(); i++ {
if len(list) == maxLen {
list.AppendEllipsis(diffStats{})
break
}
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
list = append(list, textRecord{Value: s})
}
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
if t.Kind() == reflect.Slice && opts.PrintAddresses {
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
}
return out
case reflect.Map:
if v.IsNil() {
return textNil
}
// Check pointer for cycles.
ptrRef, visited := ptrs.Push(v)
if visited {
return makeLeafReference(ptrRef, opts.PrintAddresses)
}
defer ptrs.Pop()
maxLen := v.Len()
if opts.LimitVerbosity {
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
opts.VerbosityLevel--
}
var list textList
for _, k := range value.SortKeys(v.MapKeys()) {
if len(list) == maxLen {
list.AppendEllipsis(diffStats{})
break
}
sk := formatMapKey(k, false, ptrs)
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
list = append(list, textRecord{Key: sk, Value: sv})
}
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
return out
case reflect.Ptr:
if v.IsNil() {
return textNil
}
// Check pointer for cycles.
ptrRef, visited := ptrs.Push(v)
if visited {
out = makeLeafReference(ptrRef, opts.PrintAddresses)
return &textWrap{Prefix: "&", Value: out}
}
defer ptrs.Pop()
// Skip the name only if this is an unnamed pointer type.
// Otherwise taking the address of a value does not reproduce
// the named pointer type.
if v.Type().Name() == "" {
skipType = true // Let the underlying value print the type instead
}
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
out = &textWrap{Prefix: "&", Value: out}
return out
case reflect.Interface:
if v.IsNil() {
return textNil
}
// Interfaces accept different concrete types,
// so configure the underlying value to explicitly print the type.
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
default:
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
}
}
func (opts formatOptions) formatString(prefix, s string) textNode {
maxLen := len(s)
maxLines := strings.Count(s, "\n") + 1
if opts.LimitVerbosity {
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
}
// For multiline strings, use the triple-quote syntax,
// but only use it when printing removed or inserted nodes since
// we only want the extra verbosity for those cases.
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
for i := 0; i < len(lines) && isTripleQuoted; i++ {
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
isPrintable := func(r rune) bool {
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
}
line := lines[i]
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
}
if isTripleQuoted {
var list textList
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
for i, line := range lines {
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
break
}
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
}
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
}
// Format the string as a single-line quoted string.
if len(s) > maxLen+len(textEllipsis) {
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
}
return textLine(prefix + formatString(s))
}
// formatMapKey formats v as if it were a map key.
// The result is guaranteed to be a single line.
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
var opts formatOptions
opts.DiffMode = diffIdentical
opts.TypeMode = elideType
opts.PrintAddresses = disambiguate
opts.AvoidStringer = disambiguate
opts.QualifiedNames = disambiguate
opts.VerbosityLevel = maxVerbosityPreset
opts.LimitVerbosity = true
s := opts.FormatValue(v, reflect.Map, ptrs).String()
return strings.TrimSpace(s)
}
// formatString prints s as a double-quoted or backtick-quoted string.
func formatString(s string) string {
// Use quoted string if it the same length as a raw string literal.
// Otherwise, attempt to use the raw string form.
qs := strconv.Quote(s)
if len(qs) == 1+len(s)+1 {
return qs
}
// Disallow newlines to ensure output is a single line.
// Only allow printable runes for readability purposes.
rawInvalid := func(r rune) bool {
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
}
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
return "`" + s + "`"
}
return qs
}
// formatHex prints u as a hexadecimal integer in Go notation.
func formatHex(u uint64) string {
var f string
switch {
case u <= 0xff:
f = "0x%02x"
case u <= 0xffff:
f = "0x%04x"
case u <= 0xffffff:
f = "0x%06x"
case u <= 0xffffffff:
f = "0x%08x"
case u <= 0xffffffffff:
f = "0x%010x"
case u <= 0xffffffffffff:
f = "0x%012x"
case u <= 0xffffffffffffff:
f = "0x%014x"
case u <= 0xffffffffffffffff:
f = "0x%016x"
}
return fmt.Sprintf(f, u)
}

613
vendor/github.com/google/go-cmp/cmp/report_slices.go generated vendored Normal file
View File

@ -0,0 +1,613 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"bytes"
"fmt"
"math"
"reflect"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp/internal/diff"
)
// CanFormatDiffSlice reports whether we support custom formatting for nodes
// that are slices of primitive kinds or strings.
func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
switch {
case opts.DiffMode != diffUnknown:
return false // Must be formatting in diff mode
case v.NumDiff == 0:
return false // No differences detected
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
return false // Both values must be valid
case v.NumIgnored > 0:
return false // Some ignore option was used
case v.NumTransformed > 0:
return false // Some transform option was used
case v.NumCompared > 1:
return false // More than one comparison was used
case v.NumCompared == 1 && v.Type.Name() != "":
// The need for cmp to check applicability of options on every element
// in a slice is a significant performance detriment for large []byte.
// The workaround is to specify Comparer(bytes.Equal),
// which enables cmp to compare []byte more efficiently.
// If they differ, we still want to provide batched diffing.
// The logic disallows named types since they tend to have their own
// String method, with nicer formatting than what this provides.
return false
}
// Check whether this is an interface with the same concrete types.
t := v.Type
vx, vy := v.ValueX, v.ValueY
if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
vx, vy = vx.Elem(), vy.Elem()
t = vx.Type()
}
// Check whether we provide specialized diffing for this type.
switch t.Kind() {
case reflect.String:
case reflect.Array, reflect.Slice:
// Only slices of primitive types have specialized handling.
switch t.Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
default:
return false
}
// Both slice values have to be non-empty.
if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
return false
}
// If a sufficient number of elements already differ,
// use specialized formatting even if length requirement is not met.
if v.NumDiff > v.NumSame {
return true
}
default:
return false
}
// Use specialized string diffing for longer slices or strings.
const minLength = 32
return vx.Len() >= minLength && vy.Len() >= minLength
}
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
// This provides custom-tailored logic to make printing of differences in
// textual strings and slices of primitive kinds more readable.
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
assert(opts.DiffMode == diffUnknown)
t, vx, vy := v.Type, v.ValueX, v.ValueY
if t.Kind() == reflect.Interface {
vx, vy = vx.Elem(), vy.Elem()
t = vx.Type()
opts = opts.WithTypeMode(emitType)
}
// Auto-detect the type of the data.
var sx, sy string
var ssx, ssy []string
var isString, isMostlyText, isPureLinedText, isBinary bool
switch {
case t.Kind() == reflect.String:
sx, sy = vx.String(), vy.String()
isString = true
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
sx, sy = string(vx.Bytes()), string(vy.Bytes())
isString = true
case t.Kind() == reflect.Array:
// Arrays need to be addressable for slice operations to work.
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
vx2.Set(vx)
vy2.Set(vy)
vx, vy = vx2, vy2
}
if isString {
var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
for i, r := range sx + sy {
numTotalRunes++
if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
numValidRunes++
}
if r == '\n' {
if maxLineLen < i-lastLineIdx {
maxLineLen = i - lastLineIdx
}
lastLineIdx = i + 1
numLines++
}
}
isPureText := numValidRunes == numTotalRunes
isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
isBinary = !isMostlyText
// Avoid diffing by lines if it produces a significantly more complex
// edit script than diffing by bytes.
if isPureLinedText {
ssx = strings.Split(sx, "\n")
ssy = strings.Split(sy, "\n")
esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
return diff.BoolResult(ssx[ix] == ssy[iy])
})
esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
return diff.BoolResult(sx[ix] == sy[iy])
})
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
isPureLinedText = efficiencyLines < 4*efficiencyBytes
}
}
// Format the string into printable records.
var list textList
var delim string
switch {
// If the text appears to be multi-lined text,
// then perform differencing across individual lines.
case isPureLinedText:
list = opts.formatDiffSlice(
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
func(v reflect.Value, d diffMode) textRecord {
s := formatString(v.Index(0).String())
return textRecord{Diff: d, Value: textLine(s)}
},
)
delim = "\n"
// If possible, use a custom triple-quote (""") syntax for printing
// differences in a string literal. This format is more readable,
// but has edge-cases where differences are visually indistinguishable.
// This format is avoided under the following conditions:
// • A line starts with `"""`
// • A line starts with "..."
// • A line contains non-printable characters
// • Adjacent different lines differ only by whitespace
//
// For example:
// """
// ... // 3 identical lines
// foo
// bar
// - baz
// + BAZ
// """
isTripleQuoted := true
prevRemoveLines := map[string]bool{}
prevInsertLines := map[string]bool{}
var list2 textList
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
for _, r := range list {
if !r.Value.Equal(textEllipsis) {
line, _ := strconv.Unquote(string(r.Value.(textLine)))
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
normLine := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1 // drop whitespace to avoid visually indistinguishable output
}
return r
}, line)
isPrintable := func(r rune) bool {
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
}
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
switch r.Diff {
case diffRemoved:
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
prevRemoveLines[normLine] = true
case diffInserted:
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
prevInsertLines[normLine] = true
}
if !isTripleQuoted {
break
}
r.Value = textLine(line)
r.ElideComma = true
}
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
prevRemoveLines = map[string]bool{}
prevInsertLines = map[string]bool{}
}
list2 = append(list2, r)
}
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
list2 = list2[:len(list2)-1] // elide single empty line at the end
}
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
if isTripleQuoted {
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
switch t.Kind() {
case reflect.String:
if t != reflect.TypeOf(string("")) {
out = opts.FormatType(t, out)
}
case reflect.Slice:
// Always emit type for slices since the triple-quote syntax
// looks like a string (not a slice).
opts = opts.WithTypeMode(emitType)
out = opts.FormatType(t, out)
}
return out
}
// If the text appears to be single-lined text,
// then perform differencing in approximately fixed-sized chunks.
// The output is printed as quoted strings.
case isMostlyText:
list = opts.formatDiffSlice(
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
func(v reflect.Value, d diffMode) textRecord {
s := formatString(v.String())
return textRecord{Diff: d, Value: textLine(s)}
},
)
// If the text appears to be binary data,
// then perform differencing in approximately fixed-sized chunks.
// The output is inspired by hexdump.
case isBinary:
list = opts.formatDiffSlice(
reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
func(v reflect.Value, d diffMode) textRecord {
var ss []string
for i := 0; i < v.Len(); i++ {
ss = append(ss, formatHex(v.Index(i).Uint()))
}
s := strings.Join(ss, ", ")
comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
},
)
// For all other slices of primitive types,
// then perform differencing in approximately fixed-sized chunks.
// The size of each chunk depends on the width of the element kind.
default:
var chunkSize int
if t.Elem().Kind() == reflect.Bool {
chunkSize = 16
} else {
switch t.Elem().Bits() {
case 8:
chunkSize = 16
case 16:
chunkSize = 12
case 32:
chunkSize = 8
default:
chunkSize = 8
}
}
list = opts.formatDiffSlice(
vx, vy, chunkSize, t.Elem().Kind().String(),
func(v reflect.Value, d diffMode) textRecord {
var ss []string
for i := 0; i < v.Len(); i++ {
switch t.Elem().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
case reflect.Uint8, reflect.Uintptr:
ss = append(ss, formatHex(v.Index(i).Uint()))
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
}
}
s := strings.Join(ss, ", ")
return textRecord{Diff: d, Value: textLine(s)}
},
)
}
// Wrap the output with appropriate type information.
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
if !isMostlyText {
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
// Emit the type for extra clarity (e.g. "string{...}").
if t.Kind() == reflect.String {
opts = opts.WithTypeMode(emitType)
}
return opts.FormatType(t, out)
}
switch t.Kind() {
case reflect.String:
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
if t != reflect.TypeOf(string("")) {
out = opts.FormatType(t, out)
}
case reflect.Slice:
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
if t != reflect.TypeOf([]byte(nil)) {
out = opts.FormatType(t, out)
}
}
return out
}
// formatASCII formats s as an ASCII string.
// This is useful for printing binary strings in a semi-legible way.
func formatASCII(s string) string {
b := bytes.Repeat([]byte{'.'}, len(s))
for i := 0; i < len(s); i++ {
if ' ' <= s[i] && s[i] <= '~' {
b[i] = s[i]
}
}
return string(b)
}
func (opts formatOptions) formatDiffSlice(
vx, vy reflect.Value, chunkSize int, name string,
makeRec func(reflect.Value, diffMode) textRecord,
) (list textList) {
eq := func(ix, iy int) bool {
return vx.Index(ix).Interface() == vy.Index(iy).Interface()
}
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
return diff.BoolResult(eq(ix, iy))
})
appendChunks := func(v reflect.Value, d diffMode) int {
n0 := v.Len()
for v.Len() > 0 {
n := chunkSize
if n > v.Len() {
n = v.Len()
}
list = append(list, makeRec(v.Slice(0, n), d))
v = v.Slice(n, v.Len())
}
return n0 - v.Len()
}
var numDiffs int
maxLen := -1
if opts.LimitVerbosity {
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
opts.VerbosityLevel--
}
groups := coalesceAdjacentEdits(name, es)
groups = coalesceInterveningIdentical(groups, chunkSize/4)
groups = cleanupSurroundingIdentical(groups, eq)
maxGroup := diffStats{Name: name}
for i, ds := range groups {
if maxLen >= 0 && numDiffs >= maxLen {
maxGroup = maxGroup.Append(ds)
continue
}
// Print equal.
if ds.NumDiff() == 0 {
// Compute the number of leading and trailing equal bytes to print.
var numLo, numHi int
numEqual := ds.NumIgnored + ds.NumIdentical
for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
numLo++
}
for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
numHi++
}
if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
}
// Print the equal bytes.
appendChunks(vx.Slice(0, numLo), diffIdentical)
if numEqual > numLo+numHi {
ds.NumIdentical -= numLo + numHi
list.AppendEllipsis(ds)
}
appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
vx = vx.Slice(numEqual, vx.Len())
vy = vy.Slice(numEqual, vy.Len())
continue
}
// Print unequal.
len0 := len(list)
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
vx = vx.Slice(nx, vx.Len())
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
vy = vy.Slice(ny, vy.Len())
numDiffs += len(list) - len0
}
if maxGroup.IsZero() {
assert(vx.Len() == 0 && vy.Len() == 0)
} else {
list.AppendEllipsis(maxGroup)
}
return list
}
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
// equal or unequal counts.
//
// Example:
//
// Input: "..XXY...Y"
// Output: [
// {NumIdentical: 2},
// {NumRemoved: 2, NumInserted 1},
// {NumIdentical: 3},
// {NumInserted: 1},
// ]
//
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
var prevMode byte
lastStats := func(mode byte) *diffStats {
if prevMode != mode {
groups = append(groups, diffStats{Name: name})
prevMode = mode
}
return &groups[len(groups)-1]
}
for _, e := range es {
switch e {
case diff.Identity:
lastStats('=').NumIdentical++
case diff.UniqueX:
lastStats('!').NumRemoved++
case diff.UniqueY:
lastStats('!').NumInserted++
case diff.Modified:
lastStats('!').NumModified++
}
}
return groups
}
// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
// equal groups into adjacent unequal groups that currently result in a
// dual inserted/removed printout. This acts as a high-pass filter to smooth
// out high-frequency changes within the windowSize.
//
// Example:
//
// WindowSize: 16,
// Input: [
// {NumIdentical: 61}, // group 0
// {NumRemoved: 3, NumInserted: 1}, // group 1
// {NumIdentical: 6}, // ├── coalesce
// {NumInserted: 2}, // ├── coalesce
// {NumIdentical: 1}, // ├── coalesce
// {NumRemoved: 9}, // └── coalesce
// {NumIdentical: 64}, // group 2
// {NumRemoved: 3, NumInserted: 1}, // group 3
// {NumIdentical: 6}, // ├── coalesce
// {NumInserted: 2}, // ├── coalesce
// {NumIdentical: 1}, // ├── coalesce
// {NumRemoved: 7}, // ├── coalesce
// {NumIdentical: 1}, // ├── coalesce
// {NumRemoved: 2}, // └── coalesce
// {NumIdentical: 63}, // group 4
// ]
// Output: [
// {NumIdentical: 61},
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
// {NumIdentical: 64},
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
// {NumIdentical: 63},
// ]
//
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
groups, groupsOrig := groups[:0], groups
for i, ds := range groupsOrig {
if len(groups) >= 2 && ds.NumDiff() > 0 {
prev := &groups[len(groups)-2] // Unequal group
curr := &groups[len(groups)-1] // Equal group
next := &groupsOrig[i] // Unequal group
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
*prev = prev.Append(*curr).Append(*next)
groups = groups[:len(groups)-1] // Truncate off equal group
continue
}
}
groups = append(groups, ds)
}
return groups
}
// cleanupSurroundingIdentical scans through all unequal groups, and
// moves any leading sequence of equal elements to the preceding equal group and
// moves and trailing sequence of equal elements to the succeeding equal group.
//
// This is necessary since coalesceInterveningIdentical may coalesce edit groups
// together such that leading/trailing spans of equal elements becomes possible.
// Note that this can occur even with an optimal diffing algorithm.
//
// Example:
//
// Input: [
// {NumIdentical: 61},
// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
// {NumIdentical: 67},
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
// {NumIdentical: 54},
// ]
// Output: [
// {NumIdentical: 64}, // incremented by 3
// {NumRemoved: 9},
// {NumIdentical: 67},
// {NumRemoved: 9},
// {NumIdentical: 64}, // incremented by 10
// ]
//
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
var ix, iy int // indexes into sequence x and y
for i, ds := range groups {
// Handle equal group.
if ds.NumDiff() == 0 {
ix += ds.NumIdentical
iy += ds.NumIdentical
continue
}
// Handle unequal group.
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
var numLeadingIdentical, numTrailingIdentical int
for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
numLeadingIdentical++
}
for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
numTrailingIdentical++
}
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
if numLeadingIdentical > 0 {
// Remove leading identical span from this group and
// insert it into the preceding group.
if i-1 >= 0 {
groups[i-1].NumIdentical += numLeadingIdentical
} else {
// No preceding group exists, so prepend a new group,
// but do so after we finish iterating over all groups.
defer func() {
groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
}()
}
// Increment indexes since the preceding group would have handled this.
ix += numLeadingIdentical
iy += numLeadingIdentical
}
if numTrailingIdentical > 0 {
// Remove trailing identical span from this group and
// insert it into the succeeding group.
if i+1 < len(groups) {
groups[i+1].NumIdentical += numTrailingIdentical
} else {
// No succeeding group exists, so append a new group,
// but do so after we finish iterating over all groups.
defer func() {
groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
}()
}
// Do not increment indexes since the succeeding group will handle this.
}
// Update this group since some identical elements were removed.
nx -= numIdentical
ny -= numIdentical
groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
}
ix += nx
iy += ny
}
return groups
}

431
vendor/github.com/google/go-cmp/cmp/report_text.go generated vendored Normal file
View File

@ -0,0 +1,431 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import (
"bytes"
"fmt"
"math/rand"
"strings"
"time"
"unicode/utf8"
"github.com/google/go-cmp/cmp/internal/flags"
)
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
const maxColumnLength = 80
type indentMode int
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
// The output of Diff is documented as being unstable to provide future
// flexibility in changing the output for more humanly readable reports.
// This logic intentionally introduces instability to the exact output
// so that users can detect accidental reliance on stability early on,
// rather than much later when an actual change to the format occurs.
if flags.Deterministic || randBool {
// Use regular spaces (U+0020).
switch d {
case diffUnknown, diffIdentical:
b = append(b, " "...)
case diffRemoved:
b = append(b, "- "...)
case diffInserted:
b = append(b, "+ "...)
}
} else {
// Use non-breaking spaces (U+00a0).
switch d {
case diffUnknown, diffIdentical:
b = append(b, "  "...)
case diffRemoved:
b = append(b, "- "...)
case diffInserted:
b = append(b, "+ "...)
}
}
return repeatCount(n).appendChar(b, '\t')
}
type repeatCount int
func (n repeatCount) appendChar(b []byte, c byte) []byte {
for ; n > 0; n-- {
b = append(b, c)
}
return b
}
// textNode is a simplified tree-based representation of structured text.
// Possible node types are textWrap, textList, or textLine.
type textNode interface {
// Len reports the length in bytes of a single-line version of the tree.
// Nested textRecord.Diff and textRecord.Comment fields are ignored.
Len() int
// Equal reports whether the two trees are structurally identical.
// Nested textRecord.Diff and textRecord.Comment fields are compared.
Equal(textNode) bool
// String returns the string representation of the text tree.
// It is not guaranteed that len(x.String()) == x.Len(),
// nor that x.String() == y.String() implies that x.Equal(y).
String() string
// formatCompactTo formats the contents of the tree as a single-line string
// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
// fields are ignored.
//
// However, not all nodes in the tree should be collapsed as a single-line.
// If a node can be collapsed as a single-line, it is replaced by a textLine
// node. Since the top-level node cannot replace itself, this also returns
// the current node itself.
//
// This does not mutate the receiver.
formatCompactTo([]byte, diffMode) ([]byte, textNode)
// formatExpandedTo formats the contents of the tree as a multi-line string
// to the provided buffer. In order for column alignment to operate well,
// formatCompactTo must be called before calling formatExpandedTo.
formatExpandedTo([]byte, diffMode, indentMode) []byte
}
// textWrap is a wrapper that concatenates a prefix and/or a suffix
// to the underlying node.
type textWrap struct {
Prefix string // e.g., "bytes.Buffer{"
Value textNode // textWrap | textList | textLine
Suffix string // e.g., "}"
Metadata interface{} // arbitrary metadata; has no effect on formatting
}
func (s *textWrap) Len() int {
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
}
func (s1 *textWrap) Equal(s2 textNode) bool {
if s2, ok := s2.(*textWrap); ok {
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
}
return false
}
func (s *textWrap) String() string {
var d diffMode
var n indentMode
_, s2 := s.formatCompactTo(nil, d)
b := n.appendIndent(nil, d) // Leading indent
b = s2.formatExpandedTo(b, d, n) // Main body
b = append(b, '\n') // Trailing newline
return string(b)
}
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
n0 := len(b) // Original buffer length
b = append(b, s.Prefix...)
b, s.Value = s.Value.formatCompactTo(b, d)
b = append(b, s.Suffix...)
if _, ok := s.Value.(textLine); ok {
return b, textLine(b[n0:])
}
return b, s
}
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
b = append(b, s.Prefix...)
b = s.Value.formatExpandedTo(b, d, n)
b = append(b, s.Suffix...)
return b
}
// textList is a comma-separated list of textWrap or textLine nodes.
// The list may be formatted as multi-lines or single-line at the discretion
// of the textList.formatCompactTo method.
type textList []textRecord
type textRecord struct {
Diff diffMode // e.g., 0 or '-' or '+'
Key string // e.g., "MyField"
Value textNode // textWrap | textLine
ElideComma bool // avoid trailing comma
Comment fmt.Stringer // e.g., "6 identical fields"
}
// AppendEllipsis appends a new ellipsis node to the list if none already
// exists at the end. If cs is non-zero it coalesces the statistics with the
// previous diffStats.
func (s *textList) AppendEllipsis(ds diffStats) {
hasStats := !ds.IsZero()
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
if hasStats {
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
} else {
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
}
return
}
if hasStats {
(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
}
}
func (s textList) Len() (n int) {
for i, r := range s {
n += len(r.Key)
if r.Key != "" {
n += len(": ")
}
n += r.Value.Len()
if i < len(s)-1 {
n += len(", ")
}
}
return n
}
func (s1 textList) Equal(s2 textNode) bool {
if s2, ok := s2.(textList); ok {
if len(s1) != len(s2) {
return false
}
for i := range s1 {
r1, r2 := s1[i], s2[i]
if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
return false
}
}
return true
}
return false
}
func (s textList) String() string {
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
}
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
s = append(textList(nil), s...) // Avoid mutating original
// Determine whether we can collapse this list as a single line.
n0 := len(b) // Original buffer length
var multiLine bool
for i, r := range s {
if r.Diff == diffInserted || r.Diff == diffRemoved {
multiLine = true
}
b = append(b, r.Key...)
if r.Key != "" {
b = append(b, ": "...)
}
b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
if _, ok := s[i].Value.(textLine); !ok {
multiLine = true
}
if r.Comment != nil {
multiLine = true
}
if i < len(s)-1 {
b = append(b, ", "...)
}
}
// Force multi-lined output when printing a removed/inserted node that
// is sufficiently long.
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
multiLine = true
}
if !multiLine {
return b, textLine(b[n0:])
}
return b, s
}
func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
alignKeyLens := s.alignLens(
func(r textRecord) bool {
_, isLine := r.Value.(textLine)
return r.Key == "" || !isLine
},
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
)
alignValueLens := s.alignLens(
func(r textRecord) bool {
_, isLine := r.Value.(textLine)
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
},
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
)
// Format lists of simple lists in a batched form.
// If the list is sequence of only textLine values,
// then batch multiple values on a single line.
var isSimple bool
for _, r := range s {
_, isLine := r.Value.(textLine)
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
if !isSimple {
break
}
}
if isSimple {
n++
var batch []byte
emitBatch := func() {
if len(batch) > 0 {
b = n.appendIndent(append(b, '\n'), d)
b = append(b, bytes.TrimRight(batch, " ")...)
batch = batch[:0]
}
}
for _, r := range s {
line := r.Value.(textLine)
if len(batch)+len(line)+len(", ") > maxColumnLength {
emitBatch()
}
batch = append(batch, line...)
batch = append(batch, ", "...)
}
emitBatch()
n--
return n.appendIndent(append(b, '\n'), d)
}
// Format the list as a multi-lined output.
n++
for i, r := range s {
b = n.appendIndent(append(b, '\n'), d|r.Diff)
if r.Key != "" {
b = append(b, r.Key+": "...)
}
b = alignKeyLens[i].appendChar(b, ' ')
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
if !r.ElideComma {
b = append(b, ',')
}
b = alignValueLens[i].appendChar(b, ' ')
if r.Comment != nil {
b = append(b, " // "+r.Comment.String()...)
}
}
n--
return n.appendIndent(append(b, '\n'), d)
}
func (s textList) alignLens(
skipFunc func(textRecord) bool,
lenFunc func(textRecord) int,
) []repeatCount {
var startIdx, endIdx, maxLen int
lens := make([]repeatCount, len(s))
for i, r := range s {
if skipFunc(r) {
for j := startIdx; j < endIdx && j < len(s); j++ {
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
}
startIdx, endIdx, maxLen = i+1, i+1, 0
} else {
if maxLen < lenFunc(r) {
maxLen = lenFunc(r)
}
endIdx = i + 1
}
}
for j := startIdx; j < endIdx && j < len(s); j++ {
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
}
return lens
}
// textLine is a single-line segment of text and is always a leaf node
// in the textNode tree.
type textLine []byte
var (
textNil = textLine("nil")
textEllipsis = textLine("...")
)
func (s textLine) Len() int {
return len(s)
}
func (s1 textLine) Equal(s2 textNode) bool {
if s2, ok := s2.(textLine); ok {
return bytes.Equal([]byte(s1), []byte(s2))
}
return false
}
func (s textLine) String() string {
return string(s)
}
func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
return append(b, s...), s
}
func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
return append(b, s...)
}
type diffStats struct {
Name string
NumIgnored int
NumIdentical int
NumRemoved int
NumInserted int
NumModified int
}
func (s diffStats) IsZero() bool {
s.Name = ""
return s == diffStats{}
}
func (s diffStats) NumDiff() int {
return s.NumRemoved + s.NumInserted + s.NumModified
}
func (s diffStats) Append(ds diffStats) diffStats {
assert(s.Name == ds.Name)
s.NumIgnored += ds.NumIgnored
s.NumIdentical += ds.NumIdentical
s.NumRemoved += ds.NumRemoved
s.NumInserted += ds.NumInserted
s.NumModified += ds.NumModified
return s
}
// String prints a humanly-readable summary of coalesced records.
//
// Example:
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
func (s diffStats) String() string {
var ss []string
var sum int
labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
for i, n := range counts {
if n > 0 {
ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
}
sum += n
}
// Pluralize the name (adjusting for some obscure English grammar rules).
name := s.Name
if sum > 1 {
name += "s"
if strings.HasSuffix(name, "ys") {
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
}
}
// Format the list according to English grammar (with Oxford comma).
switch n := len(ss); n {
case 0:
return ""
case 1, 2:
return strings.Join(ss, " and ") + " " + name
default:
return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
}
}
type commentString string
func (s commentString) String() string { return string(s) }

121
vendor/github.com/google/go-cmp/cmp/report_value.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
// Copyright 2019, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmp
import "reflect"
// valueNode represents a single node within a report, which is a
// structured representation of the value tree, containing information
// regarding which nodes are equal or not.
type valueNode struct {
parent *valueNode
Type reflect.Type
ValueX reflect.Value
ValueY reflect.Value
// NumSame is the number of leaf nodes that are equal.
// All descendants are equal only if NumDiff is 0.
NumSame int
// NumDiff is the number of leaf nodes that are not equal.
NumDiff int
// NumIgnored is the number of leaf nodes that are ignored.
NumIgnored int
// NumCompared is the number of leaf nodes that were compared
// using an Equal method or Comparer function.
NumCompared int
// NumTransformed is the number of non-leaf nodes that were transformed.
NumTransformed int
// NumChildren is the number of transitive descendants of this node.
// This counts from zero; thus, leaf nodes have no descendants.
NumChildren int
// MaxDepth is the maximum depth of the tree. This counts from zero;
// thus, leaf nodes have a depth of zero.
MaxDepth int
// Records is a list of struct fields, slice elements, or map entries.
Records []reportRecord // If populated, implies Value is not populated
// Value is the result of a transformation, pointer indirect, of
// type assertion.
Value *valueNode // If populated, implies Records is not populated
// TransformerName is the name of the transformer.
TransformerName string // If non-empty, implies Value is populated
}
type reportRecord struct {
Key reflect.Value // Invalid for slice element
Value *valueNode
}
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
vx, vy := ps.Values()
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
switch s := ps.(type) {
case StructField:
assert(parent.Value == nil)
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
case SliceIndex:
assert(parent.Value == nil)
parent.Records = append(parent.Records, reportRecord{Value: child})
case MapIndex:
assert(parent.Value == nil)
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
case Indirect:
assert(parent.Value == nil && parent.Records == nil)
parent.Value = child
case TypeAssertion:
assert(parent.Value == nil && parent.Records == nil)
parent.Value = child
case Transform:
assert(parent.Value == nil && parent.Records == nil)
parent.Value = child
parent.TransformerName = s.Name()
parent.NumTransformed++
default:
assert(parent == nil) // Must be the root step
}
return child
}
func (r *valueNode) Report(rs Result) {
assert(r.MaxDepth == 0) // May only be called on leaf nodes
if rs.ByIgnore() {
r.NumIgnored++
} else {
if rs.Equal() {
r.NumSame++
} else {
r.NumDiff++
}
}
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
if rs.ByMethod() {
r.NumCompared++
}
if rs.ByFunc() {
r.NumCompared++
}
assert(r.NumCompared <= 1)
}
func (child *valueNode) PopStep() (parent *valueNode) {
if child.parent == nil {
return nil
}
parent = child.parent
parent.NumSame += child.NumSame
parent.NumDiff += child.NumDiff
parent.NumIgnored += child.NumIgnored
parent.NumCompared += child.NumCompared
parent.NumTransformed += child.NumTransformed
parent.NumChildren += child.NumChildren + 1
if parent.MaxDepth < child.MaxDepth+1 {
parent.MaxDepth = child.MaxDepth + 1
}
return parent
}

9
vendor/github.com/google/uuid/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,9 @@
language: go
go:
- 1.4.3
- 1.5.3
- tip
script:
- go test -v ./...

10
vendor/github.com/google/uuid/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

9
vendor/github.com/google/uuid/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,9 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

27
vendor/github.com/google/uuid/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Some files were not shown because too many files have changed in this diff Show More