1
0
Fork 0
mirror of https://github.com/alice-lg/birdwatcher.git synced 2025-03-09 00:00:05 +01:00

Merge branch 'DECIX-develop'

This commit is contained in:
Matthias Hannig 2019-03-20 11:23:13 +01:00
commit 6fe4505bde
No known key found for this signature in database
GPG key ID: 62E226E47DDCE58D
101 changed files with 6244 additions and 456 deletions

102
CHANGELOG
View file

@ -1,5 +1,105 @@
1.12.4
* Add the ability to switch between redis and the classic
memory cache backend
1.12.3
* Fix community value misssing when parsing multiline communities
1.12.2
* Remove unused OrigTTL attribute from cache info
1.12.1
* Parser: ignore whitespace at the end of the protocol line
* Fix parser tests failing
1.12.0
* Parser: Fix a bug when parsing 'BGP.*community' output spanning multiple lines
* Fix the default config path to '/etc/birdwatcher/birdwatcher.conf'
1.11.5
This version brings a major redesign of the cache feature. The cache is more
efficient leading to fewer executions of birdc commands in high load scenarios.
Other improvements are:
* Fix testcase for /protocols/bgp
* New birdc query 'RoutesFilteredCount()'
* Queue for birdc commands, prevents running the same birdc command multiple
times in parallel on concurrent API requests.
* Cache: redesign cache structure, separation of Parsed and Meta cache
* allows independent cache access
* implement convenience methods for interaction with the cache
1.11.4
* Fix race condition between main() and InstallRateLimitReset()
* Fix endpoint /routes/count/table now returns integer instead of routes
* Fix endpoint /routes/count/protocol now returns integer instead of routes
* Fix endpoint /routes/prefixed ignores URL paremeter
* Fix endpoint /symbols
* Fix TestParseProtocolBgp()
* Fix some filtered routes missing in /routes/dump
* Fix parser return correct type on error
* New endpoint for total number of best-paths '/routes/count/primary'
* Parser: support extended communities and test-cases
* Introduce new data structure for extended communities replacing the Parsed type
* Cache: avoid duplicate cache writes in Status()
1.11.3
* Move code to Alice-LG GitHub organization
* Include caching information alongside ttl
* Repair endpoint 'protocols'
* Documentation on interpretation of route count numbers
* Improve example configuration
* Caching of parsed responses from birdc.
Eliminates an additional parser run for cache hits.
* Fix quotes in arguments of birdc commands in Routes{Table,Lookup}* methods.
This eliminates the "IP address expected" error in BIRD.
1.11.2
* Parser: protocol parser can now parse all types of protocols
* Parser: improved parsing of route change statistics
* Add TLS support for the HTTP listener
* Configuration: add new option for TLS 'enable_tls', 'crt' and 'key'
* Improved logging of API requests, similar to a webserver
* Execute birdc in restricted mode 'birdc -r'
1.11.1
* Fix detection of BIRD v2.x.y
* Fix birdc command in RoutesFiltered
* Use worker-threads to parse in parallel. This speeds up parsing of large responses e.g. BGP full-table.
* Add flag "worker-pool-size" to control number of threads while parsing
* Configuration: add setting for ttl value to control caching of bird responses
* Configuration: change default location to /etc/birdwatcher
1.11.0
* Parser: support BIRD v2.x with multiprotocol BGP and channels
* Parser: major refactoring
* Parser: decrease memory footprint
* use io.Reader interface
* use line-iterator for birdc output
* Detection of BIRD v1.6.x or BIRD v2.x
* Extend test coverage for IPv6
* Tests for BIRD v2.x and IPv4
* Dependencies managed by now managed by dep
1.10.2
* Workaround for interface conversion issue
1.10.1
* Fix import/export naming for routes in API
1.10.0
* Added support for dumping all routes

24
Gopkg.lock generated
View file

@ -7,11 +7,17 @@
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
version = "v0.3.0"
[[projects]]
name = "github.com/gorilla/handlers"
packages = ["."]
revision = "90663712d74cb411cbef281bc1e08c19d1a76145"
version = "v1.3.0"
[[projects]]
name = "github.com/imdario/mergo"
packages = ["."]
revision = "f1ac5984e69fed03e0574a92f70c59f132616ea2"
version = "0.3.0"
revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
version = "v0.3.5"
[[projects]]
name = "github.com/julienschmidt/httprouter"
@ -19,9 +25,21 @@
revision = "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
version = "v1.1"
[[projects]]
name = "github.com/kr/text"
packages = ["."]
revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/tonnerre/golang-pretty"
packages = ["."]
revision = "e7fccc03e91bad289b96c21aa3312a220689bdd7"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "0284f4182faee0cc68c76356a89e5a12150bb3fe97b47751f319bfa741170fed"
inputs-digest = "cbd7e903341c5f91e2fca727fa706866a0a62457f7b73b68bec5088a3f029242"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -1,6 +1,6 @@
BSD 3-Clause License
Copyright (c) 2016, ECIX
Copyright (c) 2016, ECIX, DE-CIX
All rights reserved.
Redistribution and use in source and binary forms, with or without

View file

@ -1,5 +1,5 @@
#
# Ecix Birdseye Makefile
# Birdseye Makefile
#
PROG=birdwatcher
@ -44,8 +44,8 @@ endif
dist: clean linux
mkdir -p $(DIST)opt/ecix/birdwatcher/bin
mkdir -p $(DIST)etc/ecix
mkdir -p $(DIST)opt/birdwatcher/birdwatcher/bin
mkdir -p $(DIST)etc/birdwatcher
ifeq ($(SYSTEM_INIT), systemd)
# Installing systemd services
@ -59,11 +59,11 @@ endif
# Copy config and startup script
cp etc/ecix/* DIST/etc/ecix/.
rm -f DIST/etc/ecix/*.local.*
cp etc/birdwatcher/* DIST/etc/birdwatcher/.
rm -f DIST/etc/birdwatcher/*.local.*
# Copy bin
cp $(PROG)-linux-$(ARCH) DIST/opt/ecix/birdwatcher/bin/.
cp $(PROG)-linux-$(ARCH) DIST/opt/birdwatcher/birdwatcher/bin/.
release: linux
@ -71,7 +71,7 @@ release: linux
mkdir -p ../birdseye-static/birdwatcher-builds/$(APP_VERSION)/
cp birdwatcher-linux-amd64 ../birdseye-static/birdwatcher-builds/$(APP_VERSION)/
rm -f ../birdseye-static/birdwatcher-builds/latest
cd ../birdseye-static/birdwatcher-builds && ln -s $(APP_VERSION) latest
cd ../birdseye-static/birdwatcher-builds && ln -s $(APP_VERSION) latest
rpm: dist
@ -81,7 +81,7 @@ rpm: dist
# Create RPM from dist
fpm -s dir -t rpm -n $(PROG) -v $(VERSION) -C $(DIST) \
--config-files /etc/ecix/birdwatcher.conf \
--config-files /etc/birdwatcher/birdwatcher.conf \
opt/ etc/
mv $(RPM) $(LOCAL_RPMS)
@ -95,14 +95,21 @@ remote_rpm: build_server dist
ssh $(BUILD_SERVER) -- rm -rf $(REMOTE_DIST)
scp -r $(DIST) $(BUILD_SERVER):$(REMOTE_DIST)
ssh $(BUILD_SERVER) -- fpm -s dir -t rpm -n $(PROG) -v $(VERSION) -C $(REMOTE_DIST) \
--config-files /etc/ecix/birdwatcher.conf \
--config-files /etc/birdwatcher/birdwatcher.conf \
opt/ etc/
# Get rpm from server
scp $(BUILD_SERVER):$(RPM) $(LOCAL_RPMS)/.
.PHONY: test clean
test:
go test -v
cd endpoints/ && go test -v
cd bird/ && go test -v
clean:
rm -f $(PROG)-osx-$(ARCH)
rm -f $(PROG)-linux-$(ARCH)
rm -rf $(DIST)

View file

@ -14,7 +14,7 @@ regular binaries, which means deployment and maintenance might be
more convenient.
Our version also has a few more capabilities, as you will
discover when looking at [the modules section](https://github.com/alice-lg/birdwatcher/blob/master/etc/ecix/birdwatcher.conf)
discover when looking at [the modules section](https://github.com/alice-lg/birdwatcher/blob/master/etc/birdwatcher/birdwatcher.conf)
of the config.
## Installation
@ -42,8 +42,8 @@ the time format to be `iso long`. You need to configure
in your `/etc/bird[6].conf` for birdwatcher to work.
#### BIRD keep filtered routes
To also see the filtered routes in BIRD you need to make sure that you
have enabled the 'import keep filtered on' option for your BGP peers.
To also see the filtered routes in BIRD you need to make sure that you
have enabled the 'import keep filtered on' option for your BGP peers.
protocol bgp 'peerX' {
...
@ -54,10 +54,10 @@ have enabled the 'import keep filtered on' option for your BGP peers.
Now you should be able to do a 'show route filtered' in BIRD.
Do note that 'import keep filtered on' does NOT work for BIRD's pipe protocol
which is used when you have per peer tables, often used with Route Servers. If
your BIRD configuration has its import filters set on the BIRD pipe protocols
themselves then you will not be able to show the filtered routes.
However, you could move the import filters from the pipes to the BGP protocols
which is used when you have per peer tables, often used with Route Servers. If
your BIRD configuration has its import filters set on the BIRD pipe protocols
themselves then you will not be able to show the filtered routes.
However, you could move the import filters from the pipes to the BGP protocols
directly. For example:
table master;
@ -135,7 +135,7 @@ We do not currently support other deployment methods.
## Configuration
An example config with sane defaults is provided in
[etc/ecix/birdwatcher.conf](https://github.com/alice-lg/birdwatcher/blob/master/etc/ecix/birdwatcher.conf).
[etc/birdwatcher/birdwatcher.conf](https://github.com/alice-lg/birdwatcher/blob/master/etc/birdwatcher/birdwatcher.conf).
You should be able to use it out of the box. If you need
to change it, it is well-commented and hopefully intuitive.
If you do not know how to configure it, please consider opening

View file

@ -1 +1 @@
1.11.0
1.12.4

View file

@ -13,93 +13,92 @@ import (
"os/exec"
)
type Cache interface {
Set(key string, val Parsed, ttl int) error
Get(key string) (Parsed, error)
}
var ClientConf BirdConfig
var StatusConf StatusConfig
var IPVersion = "4"
var cache Cache // stores parsed birdc output
var RateLimitConf struct {
sync.RWMutex
Conf RateLimitConfig
}
var CacheMap = struct {
sync.RWMutex
m map[string]Parsed
}{m: make(map[string]Parsed)}
var RunQueue sync.Map // queue birdc commands before execution
var CacheRedis *RedisCache
var NilParse Parsed = (Parsed)(nil)
var NilParse Parsed = (Parsed)(nil) // special Parsed values
var BirdError Parsed = Parsed{"error": "bird unreachable"}
func isSpecial(ret Parsed) bool {
func IsSpecial(ret Parsed) bool { // test for special Parsed values
return reflect.DeepEqual(ret, NilParse) || reflect.DeepEqual(ret, BirdError)
}
func fromCacheMemory(key string) (Parsed, bool) {
CacheMap.RLock()
val, ok := CacheMap.m[key]
CacheMap.RUnlock()
if !ok {
return NilParse, false
}
ttl, correct := val["ttl"].(time.Time)
if !correct || ttl.Before(time.Now()) {
return NilParse, false
}
return val, ok
// intitialize the Cache once during setup with either a MemoryCache or
// RedisCache implementation.
// TODO implement singleton pattern
func InitializeCache(c Cache) {
cache = c
}
func fromCacheRedis(key string) (Parsed, bool) {
key = "B" + IPVersion + "_" + key
val, err := CacheRedis.Get(key)
if err != nil {
return NilParse, false
}
ttl, correct := val["ttl"].(time.Time)
if !correct || ttl.Before(time.Now()) {
return NilParse, false
}
return val, true
}
func fromCache(key string) (Parsed, bool) {
if CacheRedis == nil {
return fromCacheMemory(key)
}
return fromCacheRedis(key)
}
func toCacheMemory(key string, val Parsed) {
val["ttl"] = time.Now().Add(5 * time.Minute)
CacheMap.Lock()
CacheMap.m[key] = val
CacheMap.Unlock()
}
func toCacheRedis(key string, val Parsed) {
key = "B" + IPVersion + "_" + key
val["ttl"] = time.Now().Add(5 * time.Minute)
err := CacheRedis.Set(key, val)
if err != nil {
log.Println("Could not set cache for key:", key, "Error:", err)
}
}
func toCache(key string, val Parsed) {
if CacheRedis == nil {
toCacheMemory(key, val)
/* Convenience method to make new entries in the cache.
* Abstracts over the specific caching implementation and the ability to set
* individual TTL values for entries. Always use the default TTL value from the
* config.
*/
func toCache(key string, val Parsed) bool {
var ttl int
if ClientConf.CacheTtl > 0 {
ttl = ClientConf.CacheTtl
} else {
toCacheRedis(key, val)
ttl = 5 // five minutes
}
if err := cache.Set(key, val, ttl); err == nil {
return true
} else {
log.Println(err)
return false
}
}
/* Convenience method to retrieve entries from the cache.
* Abstracts over the specific caching implementations.
* If err returned by cache.Get(key) is set, the value from the cache is not
* used. There is either a fault e.g. missing entry or the ttl is expired.
* Handling of specific error conditions e.g. ttl expired but entry present is
* possible but currently not implemented.
*/
func fromCache(key string) (Parsed, bool) {
val, err := cache.Get(key)
if err == nil {
return val, true
} else {
return val, false
}
//DEBUG log.Println(err)
}
// Determines the key in the cache, where the result of specific functions are stored.
// Eliminates the need to know what command was executed by that function.
func GetCacheKey(fname string, fargs ...interface{}) string {
key := strings.ToLower(fname)
for _, arg := range fargs {
switch arg.(type) {
case string:
key += "_" + strings.ToLower(arg.(string))
}
}
return key
}
func Run(args string) (io.Reader, error) {
args = "show " + args
args = "-r " + "show " + args // enforce birdc in restricted mode with "-r" argument
argsList := strings.Split(args, " ")
out, err := exec.Command(ClientConf.BirdCmd, argsList...).Output()
@ -144,117 +143,175 @@ func checkRateLimit() bool {
return true
}
func RunAndParse(cmd string, parser func(io.Reader) Parsed) (Parsed, bool) {
func RunAndParse(key string, cmd string, parser func(io.Reader) Parsed, updateCache func(*Parsed)) (Parsed, bool) {
if val, ok := fromCache(cmd); ok {
return val, true
}
var wg sync.WaitGroup
wg.Add(1)
if queueGroup, queueLoaded := RunQueue.LoadOrStore(cmd, &wg); queueLoaded {
(*queueGroup.(*sync.WaitGroup)).Wait()
if val, ok := fromCache(cmd); ok {
return val, true
} else {
// TODO BirdError should also be signaled somehow
return NilParse, false
}
}
if !checkRateLimit() {
wg.Done()
RunQueue.Delete(cmd)
return NilParse, false
}
out, err := Run(cmd)
if err != nil {
// ignore errors for now
wg.Done()
RunQueue.Delete(cmd)
return BirdError, false
}
parsed := parser(out)
if updateCache != nil {
updateCache(&parsed)
}
toCache(cmd, parsed)
wg.Done()
RunQueue.Delete(cmd)
return parsed, false
}
func Status() (Parsed, bool) {
birdStatus, ok := RunAndParse("status", parseStatus)
if isSpecial(birdStatus) {
return birdStatus, ok
}
status := birdStatus["status"].(Parsed)
updateParsedCache := func(p *Parsed) {
status := (*p)["status"].(Parsed)
// Last Reconfig Timestamp source:
var lastReconfig string
switch StatusConf.ReconfigTimestampSource {
case "bird":
lastReconfig = status["last_reconfig"].(string)
break
case "config_modified":
lastReconfig = lastReconfigTimestampFromFileStat(
ClientConf.ConfigFilename,
)
case "config_regex":
lastReconfig = lastReconfigTimestampFromFileContent(
ClientConf.ConfigFilename,
StatusConf.ReconfigTimestampMatch,
)
}
// Last Reconfig Timestamp source:
var lastReconfig string
switch StatusConf.ReconfigTimestampSource {
case "bird":
lastReconfig = status["last_reconfig"].(string)
break
case "config_modified":
lastReconfig = lastReconfigTimestampFromFileStat(
ClientConf.ConfigFilename,
)
case "config_regex":
lastReconfig = lastReconfigTimestampFromFileContent(
ClientConf.ConfigFilename,
StatusConf.ReconfigTimestampMatch,
)
}
status["last_reconfig"] = lastReconfig
status["last_reconfig"] = lastReconfig
// Filter fields
for _, field := range StatusConf.FilterFields {
status[field] = nil
}
birdStatus["status"] = status
return birdStatus, ok
}
func Protocols() (Parsed, bool) {
return RunAndParse("protocols all", parseProtocols)
}
func ProtocolsBgp() (Parsed, bool) {
p, from_cache := Protocols()
if isSpecial(p) {
return p, from_cache
}
protocols := p["protocols"].([]string)
bgpProto := Parsed{}
for _, v := range protocols {
if strings.Contains(v, " BGP ") {
key := strings.Split(v, " ")[0]
bgpProto[key] = parseBgp(v)
// Filter fields
for _, field := range StatusConf.FilterFields {
status[field] = nil
}
}
return Parsed{"protocols": bgpProto, "ttl": p["ttl"]}, from_cache
birdStatus, from_cache := RunAndParse(GetCacheKey("Status"), "status", parseStatus, updateParsedCache)
return birdStatus, from_cache
}
func Protocols() (Parsed, bool) {
createMetaCache := func(p *Parsed) {
metaProtocol := Parsed{"protocols": Parsed{"bird_protocol": Parsed{}}}
for key, _ := range (*p)["protocols"].(Parsed) {
parsed := (*p)["protocols"].(Parsed)[key].(Parsed)
protocol := parsed["protocol"].(string)
birdProtocol := parsed["bird_protocol"].(string)
// Check if the structure for the current birdProtocol already exists inside the metaProtocol cache, if not create it (BGP|Pipe|etc)
if _, ok := metaProtocol["protocols"].(Parsed)["bird_protocol"].(Parsed)[birdProtocol]; !ok {
metaProtocol["protocols"].(Parsed)["bird_protocol"].(Parsed)[birdProtocol] = Parsed{}
}
metaProtocol["protocols"].(Parsed)["bird_protocol"].(Parsed)[birdProtocol].(Parsed)[protocol] = &parsed
}
toCache(GetCacheKey("metaProtocol"), metaProtocol)
}
res, from_cache := RunAndParse(GetCacheKey("metaProtocol"), "protocols all", parseProtocols, createMetaCache)
return res, from_cache
}
func ProtocolsBgp() (Parsed, bool) {
protocols, from_cache := Protocols()
if IsSpecial(protocols) {
return protocols, from_cache
}
protocolsMeta, _ := fromCache(GetCacheKey("metaProtocol"))
metaProtocol := protocolsMeta["protocols"].(Parsed)
bgpProtocols := Parsed{}
for key, protocol := range metaProtocol["bird_protocol"].(Parsed)["BGP"].(Parsed) {
bgpProtocols[key] = *(protocol.(*Parsed))
}
return Parsed{"protocols": bgpProtocols,
"ttl": protocols["ttl"],
"cached_at": protocols["cached_at"]}, from_cache
}
func Symbols() (Parsed, bool) {
return RunAndParse("symbols", parseSymbols)
return RunAndParse(GetCacheKey("Symbols"), "symbols", parseSymbols, nil)
}
func RoutesPrefixed(prefix string) (Parsed, bool) {
cmd := routeQueryForChannel("route all")
return RunAndParse(cmd, parseRoutes)
cmd := routeQueryForChannel("route " + prefix + " all")
return RunAndParse(GetCacheKey("RoutesPrefixed", prefix), cmd, parseRoutes, nil)
}
func RoutesProto(protocol string) (Parsed, bool) {
cmd := routeQueryForChannel("route all protocol " + protocol)
return RunAndParse(cmd, parseRoutes)
return RunAndParse(GetCacheKey("RoutesProto", protocol), cmd, parseRoutes, nil)
}
func RoutesProtoCount(protocol string) (Parsed, bool) {
cmd := routeQueryForChannel("route protocol "+protocol) + " count"
return RunAndParse(cmd, parseRoutes)
return RunAndParse(GetCacheKey("RoutesProtoCount", protocol), cmd, parseRoutesCount, nil)
}
func RoutesProtoPrimaryCount(protocol string) (Parsed, bool) {
cmd := routeQueryForChannel("route primary protocol "+protocol) + " count"
return RunAndParse(GetCacheKey("RoutesProtoPrimaryCount", protocol), cmd, parseRoutesCount, nil)
}
func PipeRoutesFilteredCount(pipe string, table string, neighborAddress string) (Parsed, bool) {
cmd := "route table " + table + " noexport " + pipe + " where from=" + neighborAddress + " count"
return RunAndParse(GetCacheKey("PipeRoutesFilteredCount", table, pipe, neighborAddress), cmd, parseRoutesCount, nil)
}
func PipeRoutesFiltered(pipe string, table string) (Parsed, bool) {
cmd := routeQueryForChannel("route table '" + table + "' noexport '" + pipe + "' all")
return RunAndParse(GetCacheKey("PipeRoutesFiltered", table, pipe), cmd, parseRoutes, nil)
}
func RoutesFiltered(protocol string) (Parsed, bool) {
cmd := routeQueryForChannel("route filtered protocol '" + protocol + "' all")
return RunAndParse(cmd, parseRoutes)
cmd := routeQueryForChannel("route all filtered protocol " + protocol)
return RunAndParse(GetCacheKey("RoutesFiltered", protocol), cmd, parseRoutes, nil)
}
func RoutesExport(protocol string) (Parsed, bool) {
cmd := routeQueryForChannel("route all export " + protocol)
return RunAndParse(cmd, parseRoutes)
return RunAndParse(GetCacheKey("RoutesExport", protocol), cmd, parseRoutes, nil)
}
func RoutesNoExport(protocol string) (Parsed, bool) {
// In case we have a multi table setup, we have to query
// the pipe protocol.
if ParserConf.PerPeerTables &&
@ -265,37 +322,38 @@ func RoutesNoExport(protocol string) (Parsed, bool) {
protocol[len(ParserConf.PeerProtocolPrefix):]
}
cmd := routeQueryForChannel("route noexport '" + protocol + "' all")
return RunAndParse(cmd, parseRoutes)
cmd := routeQueryForChannel("route all noexport " + protocol)
return RunAndParse(GetCacheKey("RoutesNoExport", protocol), cmd, parseRoutes, nil)
}
func RoutesExportCount(protocol string) (Parsed, bool) {
cmd := routeQueryForChannel("route export "+protocol) + " count"
return RunAndParse(cmd, parseRoutesCount)
return RunAndParse(GetCacheKey("RoutesExportCount", protocol), cmd, parseRoutesCount, nil)
}
func RoutesTable(table string) (Parsed, bool) {
return RunAndParse("route table '"+table+"' all", parseRoutes)
return RunAndParse(GetCacheKey("RoutesTable", table), "route table "+table+" all", parseRoutes, nil)
}
func RoutesTableCount(table string) (Parsed, bool) {
return RunAndParse("route table '"+table+"' count", parseRoutesCount)
return RunAndParse(GetCacheKey("RoutesTableCount", table), "route table "+table+" count", parseRoutesCount, nil)
}
func RoutesLookupTable(net string, table string) (Parsed, bool) {
return RunAndParse("route for '"+net+"' table '"+table+"' all", parseRoutes)
return RunAndParse(GetCacheKey("RoutesLookupTable", net, table), "route for "+net+" table "+table+" all", parseRoutes, nil)
}
func RoutesLookupProtocol(net string, protocol string) (Parsed, bool) {
return RunAndParse("route for '"+net+"' protocol '"+protocol+"' all", parseRoutes)
return RunAndParse(GetCacheKey("RoutesLookupProtocol", net, protocol), "route for "+net+" protocol "+protocol+" all", parseRoutes, nil)
}
func RoutesPeer(peer string) (Parsed, bool) {
cmd := routeQueryForChannel("route export " + peer)
return RunAndParse(cmd, parseRoutes)
return RunAndParse(GetCacheKey("RoutesPeer", peer), cmd, parseRoutes, nil)
}
func RoutesDump() (Parsed, bool) {
// TODO insert hook to update the cache with the route count information
if ParserConf.PerPeerTables {
return RoutesDumpPerPeerTable()
}
@ -304,8 +362,14 @@ func RoutesDump() (Parsed, bool) {
}
func RoutesDumpSingleTable() (Parsed, bool) {
importedRes, cached := RunAndParse(routeQueryForChannel("route all"), parseRoutes)
filteredRes, _ := RunAndParse(routeQueryForChannel("route all filtered"), parseRoutes)
importedRes, cached := RunAndParse(GetCacheKey("RoutesDumpSingleTable", "imported"), routeQueryForChannel("route all"), parseRoutes, nil)
if IsSpecial(importedRes) {
return importedRes, cached
}
filteredRes, cached := RunAndParse(GetCacheKey("RoutesDumpSingleTable", "filtered"), routeQueryForChannel("route all filtered"), parseRoutes, nil)
if IsSpecial(filteredRes) {
return filteredRes, cached
}
imported := importedRes["routes"]
filtered := filteredRes["routes"]
@ -319,19 +383,23 @@ func RoutesDumpSingleTable() (Parsed, bool) {
}
func RoutesDumpPerPeerTable() (Parsed, bool) {
importedRes, cached := RunAndParse(routeQueryForChannel("route all"), parseRoutes)
importedRes, cached := RunAndParse(GetCacheKey("RoutesDumpPerPeerTable", "imported"), routeQueryForChannel("route all"), parseRoutes, nil)
if IsSpecial(importedRes) {
return importedRes, cached
}
imported := importedRes["routes"]
filtered := []Parsed{}
// Get protocols with filtered routes
protocolsRes, _ := ProtocolsBgp()
protocolsRes, cached := ProtocolsBgp()
if IsSpecial(protocolsRes) {
return protocolsRes, cached
}
protocols := protocolsRes["protocols"].(Parsed)
for protocol, details := range protocols {
details, ok := details.(Parsed)
if !ok {
continue
}
details := details.(Parsed)
counters, ok := details["routes"].(Parsed)
if !ok {
continue
@ -342,7 +410,6 @@ func RoutesDumpPerPeerTable() (Parsed, bool) {
}
// Lookup filtered routes
pfilteredRes, _ := RoutesFiltered(protocol)
pfiltered, ok := pfilteredRes["routes"].([]Parsed)
if !ok {
continue // something went wrong...
@ -361,6 +428,10 @@ func RoutesDumpPerPeerTable() (Parsed, bool) {
func routeQueryForChannel(cmd string) string {
status, _ := Status()
if IsSpecial(status) {
return cmd
}
birdStatus, ok := status["status"].(Parsed)
if !ok {
return cmd

View file

@ -13,6 +13,7 @@ type BirdConfig struct {
Listen string
ConfigFilename string `toml:"config"`
BirdCmd string `toml:"birdc"`
CacheTtl int `toml:"ttl"`
}
type ParserConfig struct {

61
bird/memory_cache.go Normal file
View file

@ -0,0 +1,61 @@
package bird
import (
"errors"
"sync"
"time"
)
// Implementation of the MemoryCache backend.
type MemoryCache struct {
sync.RWMutex
m map[string]Parsed
}
func NewMemoryCache() (*MemoryCache, error) {
var cache *MemoryCache
cache = &MemoryCache{m: make(map[string]Parsed)}
return cache, nil
}
func (c *MemoryCache) Get(key string) (Parsed, error) {
c.RLock()
val, ok := c.m[key]
c.RUnlock()
if !ok { // cache miss
return NilParse, errors.New("Failed to retrive key '" + key + "' from MemoryCache.")
}
ttl, correct := val["ttl"].(time.Time)
if !correct {
return NilParse, errors.New("Invalid TTL value for key '" + key + "'")
}
if ttl.Before(time.Now()) {
return val, errors.New("TTL expired for key '" + key + "'") // TTL expired
} else {
return val, nil // cache hit
}
}
func (c *MemoryCache) Set(key string, val Parsed, ttl int) error {
switch {
case ttl == 0:
return nil // do not cache
case ttl > 0:
cachedAt := time.Now().UTC()
cacheTtl := cachedAt.Add(time.Duration(ttl) * time.Minute)
c.Lock()
// This is not a really ... clean way of doing this.
val["ttl"] = cacheTtl
val["cached_at"] = cachedAt
c.m[key] = val
c.Unlock()
return nil
default: // ttl negative - invalid
return errors.New("Negative TTL value for key" + key)
}
}

74
bird/memory_cache_test.go Normal file
View file

@ -0,0 +1,74 @@
package bird
import (
"testing"
)
func Test_MemoryCacheAccess(t *testing.T) {
cache, err := NewMemoryCache()
parsed := Parsed{
"foo": 23,
"bar": 42,
"baz": true,
}
t.Log("Setting memory cache...")
err = cache.Set("testkey", parsed, 5)
if err != nil {
t.Error(err)
}
t.Log("Fetching from memory cache...")
parsed, err = cache.Get("testkey")
if err != nil {
t.Error(err)
}
t.Log(parsed)
}
func Test_MemoryCacheAccessKeyMissing(t *testing.T) {
cache, err := NewMemoryCache()
parsed, err := cache.Get("test_missing_key")
if !IsSpecial(parsed) {
t.Error(err)
}
t.Log("Cache error:", err)
t.Log(parsed)
}
func Test_MemoryCacheRoutes(t *testing.T) {
f, err := openFile("routes_bird1_ipv4.sample")
if err != nil {
t.Error(err)
}
defer f.Close()
parsed := parseRoutes(f)
_, ok := parsed["routes"].([]Parsed)
if !ok {
t.Fatal("Error getting routes")
}
cache, err := NewMemoryCache()
err = cache.Set("routes_protocol_test", parsed, 5)
if err != nil {
t.Error(err)
}
parsed, err = cache.Get("routes_protocol_test")
if err != nil {
t.Error(err)
return
}
routes, ok := parsed["routes"].([]Parsed)
if !ok {
t.Error("Error getting routes")
}
t.Log("Retrieved routes:", len(routes))
}

View file

@ -6,29 +6,29 @@ import (
"regexp"
"strconv"
"strings"
"sync"
)
// WorkerPoolSize is the number of go routines used to parse routing tables concurrently
var WorkerPoolSize = 8
var (
ParserConf ParserConfig
regex struct {
lineSeperator *regexp.Regexp
status struct {
status struct {
startLine *regexp.Regexp
routerID *regexp.Regexp
currentServer *regexp.Regexp
lastReboot *regexp.Regexp
lastReconfig *regexp.Regexp
}
bgp struct {
channel *regexp.Regexp
protocol *regexp.Regexp
numericValue *regexp.Regexp
routes *regexp.Regexp
stringValue *regexp.Regexp
importUpdates *regexp.Regexp
importWithdraws *regexp.Regexp
exportUpdates *regexp.Regexp
exportWithdraws *regexp.Regexp
protocol struct {
channel *regexp.Regexp
protocol *regexp.Regexp
numericValue *regexp.Regexp
routes *regexp.Regexp
stringValue *regexp.Regexp
routeChanges *regexp.Regexp
}
symbols struct {
keyRx *regexp.Regexp
@ -37,15 +37,16 @@ var (
countRx *regexp.Regexp
}
routes struct {
startDefinition *regexp.Regexp
second *regexp.Regexp
routeType *regexp.Regexp
bgp *regexp.Regexp
community *regexp.Regexp
largeCommunity *regexp.Regexp
origin *regexp.Regexp
prefixBird2 *regexp.Regexp
gatewayBird2 *regexp.Regexp
startDefinition *regexp.Regexp
second *regexp.Regexp
routeType *regexp.Regexp
bgp *regexp.Regexp
community *regexp.Regexp
largeCommunity *regexp.Regexp
extendedCommunity *regexp.Regexp
origin *regexp.Regexp
prefixBird2 *regexp.Regexp
gatewayBird2 *regexp.Regexp
}
}
)
@ -63,15 +64,12 @@ func init() {
regex.routeCount.countRx = regexp.MustCompile(`^(\d+)\s+of\s+(\d+)\s+routes.*$`)
regex.bgp.channel = regexp.MustCompile("Channel ipv([46])")
regex.bgp.protocol = regexp.MustCompile(`^([\w\.:]+)\s+BGP\s+(\w+)\s+(\w+)\s+([0-9]{4}-[0-9]{2}-[0-9]{2}\s+[0-9]{2}:[0-9]{2}:[0-9]{2})\s*(\w+)?.*$`)
regex.bgp.numericValue = regexp.MustCompile(`^\s+([^:]+):\s+([\d]+)\s*$`)
regex.bgp.routes = regexp.MustCompile(`^\s+Routes:\s+(.*)`)
regex.bgp.stringValue = regexp.MustCompile(`^\s+([^:]+):\s+(.+)\s*$`)
regex.bgp.importUpdates = regexp.MustCompile(`^\s+Import updates:\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s*$`)
regex.bgp.importWithdraws = regexp.MustCompile(`^\s+Import withdraws:\s+(\d+)\s+(\d+)\s+\-\-\-\s+(\d+)\s+(\d+)\s*$`)
regex.bgp.exportUpdates = regexp.MustCompile(`^\s+Export updates:\s+(\d+)\s+(\d+)\s+(\d+)\s+\-\-\-\s+(\d+)\s*$`)
regex.bgp.exportWithdraws = regexp.MustCompile(`^\s+Export withdraws:\s+(\d+)(\s+\-\-\-){2}\s+(\d+)\s*$`)
regex.protocol.channel = regexp.MustCompile("Channel ipv([46])")
regex.protocol.protocol = regexp.MustCompile(`^(?:1002\-)?([^\s]+)\s+(BGP|Pipe|BFD|Direct|Device|Kernel)\s+([^\s]+)\s+([^\s]+)\s+(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}|[^\s]+)(?:\s+(.*?)\s*)?$`)
regex.protocol.numericValue = regexp.MustCompile(`^\s+([^:]+):\s+([\d]+)\s*$`)
regex.protocol.routes = regexp.MustCompile(`^\s+Routes:\s+(.*)`)
regex.protocol.stringValue = regexp.MustCompile(`^\s+([^:]+):\s+(.+)\s*$`)
regex.protocol.routeChanges = regexp.MustCompile(`(Import|Export) (updates|withdraws):\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s+(\d+|---)\s*$`)
regex.routes.startDefinition = regexp.MustCompile(`^([0-9a-f\.\:\/]+)\s+via\s+([0-9a-f\.\:]+)\s+on\s+([\w\.]+)\s+\[([\w\.:]+)\s+([0-9\-\:\s]+)(?:\s+from\s+([0-9a-f\.\:\/]+)){0,1}\]\s+(?:(\*)\s+){0,1}\((\d+)(?:\/\d+){0,1}\).*`)
regex.routes.second = regexp.MustCompile(`^\s+via\s+([0-9a-f\.\:]+)\s+on\s+([\w\.]+)\s+\[([\w\.:]+)\s+([0-9\-\:\s]+)(?:\s+from\s+([0-9a-f\.\:\/]+)){0,1}\]\s+(?:(\*)\s+){0,1}\((\d+)(?:\/\d+){0,1}\).*$`)
@ -79,6 +77,7 @@ func init() {
regex.routes.bgp = regexp.MustCompile(`^\s+BGP.(\w+):\s+(.+)\s*$`)
regex.routes.community = regexp.MustCompile(`^\((\d+),\s*(\d+)\)`)
regex.routes.largeCommunity = regexp.MustCompile(`^\((\d+),\s*(\d+),\s*(\d+)\)`)
regex.routes.extendedCommunity = regexp.MustCompile(`^\(([^,]+),\s*(\d+),\s*(\d+)\)`)
regex.routes.origin = regexp.MustCompile(`\([^\(]*\)\s*`)
regex.routes.prefixBird2 = regexp.MustCompile(`^([0-9a-f\.\:\/]+)?\s+unicast\s+\[([\w\.:]+)\s+([0-9\-\:\s]+)(?:\s+from\s+([0-9a-f\.\:\/]+))?\]\s+(?:(\*)\s+)?\((\d+)(?:\/\d+)?(?:\/[^\)]*)?\).*$`)
regex.routes.gatewayBird2 = regexp.MustCompile(`^\s+via\s+([0-9a-f\.\:]+)\s+on\s+([\w\.]+)\s*$`)
@ -135,7 +134,6 @@ func parseStatus(reader io.Reader) Parsed {
func parseProtocols(reader io.Reader) Parsed {
res := Parsed{}
protocols := []string{}
proto := ""
@ -145,7 +143,9 @@ func parseProtocols(reader io.Reader) Parsed {
if emptyString(line) {
if !emptyString(proto) {
protocols = append(protocols, proto)
parsed := parseProtocol(proto)
res[parsed["protocol"].(string)] = parsed
}
proto = ""
} else {
@ -153,8 +153,7 @@ func parseProtocols(reader io.Reader) Parsed {
}
}
res["protocols"] = protocols
return res
return Parsed{"protocols": res}
}
func parseSymbols(reader io.Reader) Parsed {
@ -170,23 +169,123 @@ func parseSymbols(reader io.Reader) Parsed {
if regex.symbols.keyRx.MatchString(line) {
groups := regex.symbols.keyRx.FindStringSubmatch(line)
res[groups[2]] = groups[1]
if _, ok := res[groups[2]]; !ok {
res[groups[2]] = []string{}
}
res[groups[2]] = append(res[groups[2]].([]string), groups[1])
}
}
return Parsed{"symbols": res}
}
func parseRoutes(reader io.Reader) Parsed {
res := Parsed{}
routes := []Parsed{}
route := Parsed{}
type blockJob struct {
lines []string
position int
}
type blockParsed struct {
items []Parsed
position int
}
func parseRoutes(reader io.Reader) Parsed {
jobs := make(chan blockJob)
out := startRouteWorkers(jobs)
res := startRouteConsumer(out)
defer close(res)
pos := 0
block := []string{}
lines := newLineIterator(reader, true)
for lines.next() {
line := lines.string()
if line[0] != 32 && line[0] != 9 && len(block) > 0 {
jobs <- blockJob{block, pos}
pos++
block = []string{}
}
block = append(block, line)
}
if len(block) > 0 {
jobs <- blockJob{block, pos}
}
close(jobs)
return <-res
}
func startRouteWorkers(jobs chan blockJob) chan blockParsed {
out := make(chan blockParsed)
wg := &sync.WaitGroup{}
wg.Add(WorkerPoolSize)
go func() {
for i := 0; i < WorkerPoolSize; i++ {
go workerForRouteBlockParsing(jobs, out, wg)
}
wg.Wait()
close(out)
}()
return out
}
func startRouteConsumer(out <-chan blockParsed) chan Parsed {
res := make(chan Parsed)
go func() {
byBlock := map[int][]Parsed{}
count := 0
for r := range out {
count++
byBlock[r.position] = r.items
}
res <- Parsed{"routes": sortedSliceForRouteBlocks(byBlock, count)}
}()
return res
}
func sortedSliceForRouteBlocks(byBlock map[int][]Parsed, numBlocks int) []Parsed {
res := []Parsed{}
for i := 0; i < numBlocks; i++ {
routes, ok := byBlock[i]
if !ok {
continue
}
res = append(res, routes...)
}
return res
}
func workerForRouteBlockParsing(jobs <-chan blockJob, out chan<- blockParsed, wg *sync.WaitGroup) {
for j := range jobs {
parseRouteLines(j.lines, j.position, out)
}
wg.Done()
}
func parseRouteLines(lines []string, position int, ch chan<- blockParsed) {
route := Parsed{}
routes := []Parsed{}
for i := 0; i < len(lines); {
line := lines[i]
if specialLine(line) {
i++
continue
}
@ -217,6 +316,28 @@ func parseRoutes(reader io.Reader) Parsed {
submatch := regex.routes.routeType.FindStringSubmatch(line)[1]
route["type"] = strings.Split(submatch, " ")
} else if regex.routes.bgp.MatchString(line) {
// BIRD has a static buffer to hold information which is sent to the client (birdc)
// If there is more information to be sent to the client than the buffer can hold,
// the output is split into multiple lines and the continuation of the previous
// line is indicated by 2 tab characters at the beginning of the next line
joinLines := func() {
for c := i+1; c < len(lines); c++ {
if strings.HasPrefix(lines[c], "\x09\x09") {
line += lines[c][2:]
i++
} else {
break
}
}
}
// The aforementioned behaviour was only observed for the *community fields
if strings.HasPrefix(line, "\x09BGP.community") ||
strings.HasPrefix(line, "\x09BGP.large_community") ||
strings.HasPrefix(line, "\x09BGP.ext_community") {
joinLines()
}
bgp := Parsed{}
if tmp, ok := route["bgp"]; ok {
if val, ok := tmp.(Parsed); ok {
@ -227,14 +348,15 @@ func parseRoutes(reader io.Reader) Parsed {
parseRoutesBgp(line, bgp)
route["bgp"] = bgp
}
i++
}
if len(route) > 0 {
routes = append(routes, route)
}
res["routes"] = routes
return res
ch <- blockParsed{routes, position}
}
func parseMainRouteDetail(groups []string, route Parsed) {
@ -307,6 +429,8 @@ func parseRoutesBgp(line string, bgp Parsed) {
parseRoutesCommunities(groups, bgp)
} else if groups[1] == "large_community" {
parseRoutesLargeCommunities(groups, bgp)
} else if groups[1] == "ext_community" {
parseRoutesExtendedCommunities(groups, bgp)
} else if groups[1] == "as_path" {
bgp["as_path"] = strings.Split(groups[2], " ")
} else {
@ -343,6 +467,19 @@ func parseRoutesLargeCommunities(groups []string, res Parsed) {
res["large_communities"] = communities
}
func parseRoutesExtendedCommunities(groups []string, res Parsed) {
communities := []interface{}{}
for _, community := range regex.routes.origin.FindAllString(groups[2], -1) {
if regex.routes.extendedCommunity.MatchString(community) {
communityGroups := regex.routes.extendedCommunity.FindStringSubmatch(community)
communities = append(communities, []interface{}{communityGroups[1], parseInt(communityGroups[2]), parseInt(communityGroups[3])})
}
}
res["ext_communities"] = communities
}
func parseRoutesCount(reader io.Reader) Parsed {
res := Parsed{}
@ -371,19 +508,16 @@ func isCorrectChannel(currentIPVersion string) bool {
return currentIPVersion == IPVersion
}
func parseBgp(lines string) Parsed {
func parseProtocol(lines string) Parsed {
res := Parsed{}
routeChanges := Parsed{}
handlers := []func(string) bool{
func(l string) bool { return parseBgpProtocol(l, res) },
func(l string) bool { return parseBgpRouteLine(l, res) },
func(l string) bool { return parseBgpImportUpdates(l, routeChanges) },
func(l string) bool { return parseBgpImportWithdraws(l, routeChanges) },
func(l string) bool { return parseBgpExportUpdates(l, routeChanges) },
func(l string) bool { return parseBgpExportWithdraws(l, routeChanges) },
func(l string) bool { return parseBgpNumberValuesRx(l, res) },
func(l string) bool { return parseBgpStringValuesRx(l, res) },
func(l string) bool { return parseProtocolHeader(l, res) },
func(l string) bool { return parseProtocolRouteLine(l, res) },
func(l string) bool { return parseProtocolRouteChanges(l, routeChanges) },
func(l string) bool { return parseProtocolNumberValuesRx(l, res) },
func(l string) bool { return parseProtocolStringValuesRx(l, res) },
}
ipVersion := ""
@ -393,7 +527,7 @@ func parseBgp(lines string) Parsed {
for scanner.Scan() {
line := scanner.Text()
if m := regex.bgp.channel.FindStringSubmatch(line); len(m) > 0 {
if m := regex.protocol.channel.FindStringSubmatch(line); len(m) > 0 {
ipVersion = m[1]
}
@ -406,10 +540,10 @@ func parseBgp(lines string) Parsed {
if _, ok := res["routes"]; !ok {
routes := Parsed{}
routes["accepted"] = 0
routes["filtered"] = 0
routes["exported"] = 0
routes["preferred"] = 0
routes["accepted"] = int64(0)
routes["filtered"] = int64(0)
routes["exported"] = int64(0)
routes["preferred"] = int64(0)
res["routes"] = routes
}
@ -425,97 +559,62 @@ func parseLine(line string, handlers []func(string) bool) {
}
}
func parseBgpProtocol(line string, res Parsed) bool {
groups := regex.bgp.protocol.FindStringSubmatch(line)
func parseProtocolHeader(line string, res Parsed) bool {
groups := regex.protocol.protocol.FindStringSubmatch(line)
if groups == nil {
return false
}
res["protocol"] = groups[1]
res["bird_protocol"] = "BGP"
res["table"] = groups[2]
res["state"] = groups[3]
res["state_changed"] = groups[4]
res["connection"] = groups[5]
res["bird_protocol"] = groups[2]
res["table"] = groups[3]
res["state"] = groups[4]
res["state_changed"] = groups[5]
res["connection"] = groups[6] // TODO eliminate
if groups[2] == "Pipe" {
res["peer_table"] = groups[6][3:]
}
return true
}
func parseBgpRouteLine(line string, res Parsed) bool {
groups := regex.bgp.routes.FindStringSubmatch(line)
func parseProtocolRouteLine(line string, res Parsed) bool {
groups := regex.protocol.routes.FindStringSubmatch(line)
if groups == nil {
return false
}
routes := parseBgpRoutes(groups[1])
routes := parseProtocolRoutes(groups[1])
res["routes"] = routes
return true
}
func parseBgpImportUpdates(line string, res Parsed) bool {
groups := regex.bgp.importUpdates.FindStringSubmatch(line)
func setChangeCount(name string, value string, res Parsed) {
if value == "---" { // field not available for protocol
return
}
res[name] = parseInt(value)
}
func parseProtocolRouteChanges(line string, res Parsed) bool {
groups := regex.protocol.routeChanges.FindStringSubmatch(line)
if groups == nil {
return false
}
updates := Parsed{}
updates["received"] = parseInt(groups[1])
updates["rejected"] = parseInt(groups[2])
updates["filtered"] = parseInt(groups[3])
updates["ignored"] = parseInt(groups[4])
updates["accepted"] = parseInt(groups[5])
setChangeCount("received", groups[3], updates)
setChangeCount("rejected", groups[4], updates)
setChangeCount("filtered", groups[5], updates)
setChangeCount("ignored", groups[6], updates)
setChangeCount("accepted", groups[7], updates)
res["import_updates"] = updates
key := strings.ToLower(groups[1]) + "_" + groups[2]
res[key] = updates
return true
}
func parseBgpImportWithdraws(line string, res Parsed) bool {
groups := regex.bgp.importWithdraws.FindStringSubmatch(line)
if groups == nil {
return false
}
updates := Parsed{}
updates["received"] = parseInt(groups[1])
updates["rejected"] = parseInt(groups[2])
updates["filtered"] = parseInt(groups[3])
updates["accepted"] = parseInt(groups[4])
res["import_withdraws"] = updates
return true
}
func parseBgpExportUpdates(line string, res Parsed) bool {
groups := regex.bgp.exportUpdates.FindStringSubmatch(line)
if groups == nil {
return false
}
updates := Parsed{}
updates["received"] = parseInt(groups[1])
updates["rejected"] = parseInt(groups[2])
updates["ignored"] = parseInt(groups[3])
updates["accepted"] = parseInt(groups[4])
res["export_updates"] = updates
return true
}
func parseBgpExportWithdraws(line string, res Parsed) bool {
groups := regex.bgp.exportWithdraws.FindStringSubmatch(line)
if groups == nil {
return false
}
updates := Parsed{}
updates["received"] = parseInt(groups[1])
updates["accepted"] = parseInt(groups[3])
res["export_withdraws"] = updates
return true
}
func parseBgpNumberValuesRx(line string, res Parsed) bool {
groups := regex.bgp.numericValue.FindStringSubmatch(line)
func parseProtocolNumberValuesRx(line string, res Parsed) bool {
groups := regex.protocol.numericValue.FindStringSubmatch(line)
if groups == nil {
return false
}
@ -525,8 +624,8 @@ func parseBgpNumberValuesRx(line string, res Parsed) bool {
return true
}
func parseBgpStringValuesRx(line string, res Parsed) bool {
groups := regex.bgp.stringValue.FindStringSubmatch(line)
func parseProtocolStringValuesRx(line string, res Parsed) bool {
groups := regex.protocol.stringValue.FindStringSubmatch(line)
if groups == nil {
return false
}
@ -547,13 +646,13 @@ func treatKey(key string) string {
func parseInt(from string) int64 {
val, err := strconv.ParseInt(from, 10, 64)
if err != nil {
return 0
return int64(0)
}
return val
}
func parseBgpRoutes(input string) Parsed {
func parseProtocolRoutes(input string) Parsed {
routes := Parsed{}
// Input: 1 imported, 0 filtered, 2 exported, 1 preferred

View file

@ -1,9 +1,13 @@
package bird
import (
"fmt"
"log"
"os"
"reflect"
"testing"
pretty "github.com/tonnerre/golang-pretty"
)
func openFile(filename string) (*os.File, error) {
@ -32,7 +36,7 @@ func TestParseBgpRoutes(t *testing.T) {
}
for i, in := range inputs {
routes := parseBgpRoutes(in)
routes := parseProtocolRoutes(in)
if !reflect.DeepEqual(routes, expected[i]) {
t.Error("Parse bgpRoutes:", routes, "expected:", expected[i])
}
@ -40,6 +44,25 @@ func TestParseBgpRoutes(t *testing.T) {
}
func TestParseProtocolBgp(t *testing.T) {
f, err := openFile("protocols_bgp_pipe.sample")
if err != nil {
t.Error(err)
}
defer f.Close()
p := parseProtocols(f)
log.Printf("%# v", pretty.Formatter(p))
protocols := p["protocols"].(Parsed)
if len(protocols) != 3 {
//log.Printf("%# v", pretty.Formatter(protocols))
t.Fatalf("Expected 3 protocols, found: %v", len(protocols))
}
fmt.Println(protocols)
}
func TestParseRoutesAllIpv4Bird1(t *testing.T) {
runTestForIpv4WithFile("routes_bird1_ipv4.sample", t)
}
@ -70,13 +93,64 @@ func runTestForIpv4WithFile(file string, t *testing.T) {
gateway: "1.2.3.16",
asPath: []string{"1340"},
community: [][]int64{
{65011, 3},
{9033, 3251},
{0, 5464},
{0, 8339},
{0, 8741},
{0, 8823},
{0, 12387},
{0, 13101},
{0, 16097},
{0, 16316},
{0, 20546},
{0, 20686},
{0, 20723},
{0, 21083},
{0, 21385},
{0, 24940},
{0, 25504},
{0, 28876},
{0, 29545},
{0, 30058},
{0, 31103},
{0, 31400},
{0, 39090},
{0, 39392},
{0, 39912},
{0, 42473},
{0, 43957},
{0, 44453},
{0, 47297},
{0, 47692},
{0, 48200},
{0, 50629},
{0, 51191},
{0, 51839},
{0, 51852},
{0, 54113},
{0, 56719},
{0, 57957},
{0, 60517},
{0, 60574},
{0, 61303},
{0, 62297},
{0, 62336},
{0, 62359},
{33891, 33892},
{33891, 50673},
{48793, 48793},
{50673, 500},
{65101, 11077},
{65102, 11000},
{65103, 724},
{65104, 150},
},
largeCommunities: [][]int64{
{9033, 65666, 12},
{9033, 65666, 9},
},
extendedCommunities: []interface{}{
[]interface{}{"rt", int64(48858), int64(50)},
},
metric: 100,
localPref: "100",
protocol: "ID8503_AS1340",
@ -95,6 +169,11 @@ func runTestForIpv4WithFile(file string, t *testing.T) {
{9033, 65666, 12},
{9033, 65666, 9},
},
extendedCommunities: []interface{}{
[]interface{}{"ro", int64(21414), int64(52001)},
[]interface{}{"ro", int64(21414), int64(52004)},
[]interface{}{"ro", int64(21414), int64(64515)},
},
metric: 100,
localPref: "100",
protocol: "ID8497_AS1339",
@ -113,6 +192,11 @@ func runTestForIpv4WithFile(file string, t *testing.T) {
{9033, 65666, 12},
{9033, 65666, 9},
},
extendedCommunities: []interface{}{
[]interface{}{"ro", int64(21414), int64(52001)},
[]interface{}{"ro", int64(21414), int64(52004)},
[]interface{}{"ro", int64(21414), int64(64515)},
},
metric: 100,
localPref: "100",
protocol: "ID8503_AS1340",
@ -131,6 +215,9 @@ func runTestForIpv4WithFile(file string, t *testing.T) {
{9033, 65666, 12},
{9033, 65666, 9},
},
extendedCommunities: []interface{}{
[]interface{}{"rt", int64(48858), int64(50)},
},
metric: 100,
localPref: "100",
protocol: "ID8503_AS1340",
@ -169,13 +256,66 @@ func runTestForIpv6WithFile(file string, t *testing.T) {
gateway: "fe80:ffff:ffff::1",
asPath: []string{"15169"},
community: [][]int64{
{9033, 3001},
{65000, 680},
{0, 5464},
{0, 8339},
{0, 8741},
{0, 8823},
{0, 12387},
{0, 13101},
{0, 16097},
{0, 16316},
{0, 20546},
{0, 20686},
{0, 20723},
{0, 21083},
{0, 21385},
{0, 24940},
{0, 25504},
{0, 28876},
{0, 29545},
{0, 30058},
{0, 31103},
{0, 31400},
{0, 39090},
{0, 39392},
{0, 39912},
{0, 42473},
{0, 43957},
{0, 44453},
{0, 47297},
{0, 47692},
{0, 48200},
{0, 50629},
{0, 51191},
{0, 51839},
{0, 51852},
{0, 54113},
{0, 56719},
{0, 57957},
{0, 60517},
{0, 60574},
{0, 61303},
{0, 62297},
{0, 62336},
{0, 62359},
{33891, 33892},
{33891, 50673},
{48793, 48793},
{50673, 500},
{65101, 11077},
{65102, 11000},
{65103, 724},
{65104, 150},
},
largeCommunities: [][]int64{
{48821, 0, 2000},
{48821, 0, 2100},
},
extendedCommunities: []interface{}{
[]interface{}{"ro", int64(21414), int64(52001)},
[]interface{}{"ro", int64(21414), int64(52004)},
[]interface{}{"ro", int64(21414), int64(64515)},
},
metric: 100,
localPref: "500",
primary: true,
@ -194,6 +334,11 @@ func runTestForIpv6WithFile(file string, t *testing.T) {
{48821, 0, 3000},
{48821, 0, 3100},
},
extendedCommunities: []interface{}{
[]interface{}{"ro", int64(21414), int64(52001)},
[]interface{}{"ro", int64(21414), int64(52004)},
[]interface{}{"ro", int64(21414), int64(64515)},
},
localPref: "100",
metric: 100,
primary: false,
@ -212,6 +357,9 @@ func runTestForIpv6WithFile(file string, t *testing.T) {
{48821, 0, 2000},
{48821, 0, 2100},
},
extendedCommunities: []interface{}{
[]interface{}{"unknown 0x4300", int64(0), int64(1)},
},
metric: 100,
localPref: "5000",
primary: true,
@ -257,6 +405,12 @@ func assertRouteIsEqual(expected expectedRoute, actual Parsed, name string, t *t
if largeCommunity := value(bgp, "large_communities", name, t).([][]int64); !reflect.DeepEqual(largeCommunity, expected.largeCommunities) {
t.Fatal(name, ": Expected large_community to be:", expected.largeCommunities, "not", largeCommunity)
}
if extendedCommunity, ok := bgp["ext_communities"]; ok {
if !reflect.DeepEqual(extendedCommunity.([]interface{}), expected.extendedCommunities) {
t.Fatal(name, ": Expected ext_community to be:", expected.extendedCommunities, "not", extendedCommunity)
}
}
}
func value(parsed Parsed, key, name string, t *testing.T) interface{} {
@ -269,14 +423,15 @@ func value(parsed Parsed, key, name string, t *testing.T) interface{} {
}
type expectedRoute struct {
network string
gateway string
asPath []string
community [][]int64
largeCommunities [][]int64
metric int64
protocol string
primary bool
localPref string
iface string
network string
gateway string
asPath []string
community [][]int64
largeCommunities [][]int64
extendedCommunities []interface{}
metric int64
protocol string
primary bool
localPref string
iface string
}

View file

@ -2,12 +2,15 @@ package bird
import (
"encoding/json"
"github.com/go-redis/redis"
"errors"
"time"
"github.com/go-redis/redis"
)
type RedisCache struct {
client *redis.Client
client *redis.Client
keyPrefix string
}
func NewRedisCache(config CacheConfig) (*RedisCache, error) {
@ -31,6 +34,7 @@ func NewRedisCache(config CacheConfig) (*RedisCache, error) {
}
func (self *RedisCache) Get(key string) (Parsed, error) {
key = self.keyPrefix + key //"B" + IPVersion + "_" + key
data, err := self.client.Get(key).Result()
if err != nil {
return NilParse, err
@ -39,15 +43,34 @@ func (self *RedisCache) Get(key string) (Parsed, error) {
parsed := Parsed{}
err = json.Unmarshal([]byte(data), &parsed)
return parsed, err
}
func (self *RedisCache) Set(key string, parsed Parsed) error {
payload, err := json.Marshal(parsed)
if err != nil {
return err
ttl, correct := parsed["ttl"].(time.Time)
if !correct {
return NilParse, errors.New("Invalid TTL value for key" + key)
}
_, err = self.client.Set(key, payload, time.Minute*5).Result()
return err
if ttl.Before(time.Now()) {
return NilParse, err // TTL expired
} else {
return parsed, err // cache hit
}
}
func (self *RedisCache) Set(key string, parsed Parsed, ttl int) error {
switch {
case ttl == 0:
return nil // do not cache
case ttl > 0:
key = self.keyPrefix + key //TODO "B" + IPVersion + "_" + key
payload, err := json.Marshal(parsed)
if err != nil {
return err
}
_, err = self.client.Set(key, payload, time.Duration(ttl)*time.Minute).Result()
return err
default: // ttl negative - invalid
return errors.New("Negative TTL value for key" + key)
}
}

View file

@ -23,7 +23,7 @@ func Test_RedisCacheAccess(t *testing.T) {
}
t.Log("Setting redis cache...")
err = cache.Set("testkey", parsed)
err = cache.Set("testkey", parsed, 5)
if err != nil {
t.Error(err)
}
@ -80,7 +80,7 @@ func Test_RedisCacheRoutes(t *testing.T) {
return
}
err = cache.Set("routes_protocol_test", parsed)
err = cache.Set("routes_protocol_test", parsed, 5)
if err != nil {
t.Error(err)
}

View file

@ -4,16 +4,19 @@ import (
"flag"
"log"
"net/http"
"os"
"strings"
"github.com/alice-lg/birdwatcher/bird"
"github.com/alice-lg/birdwatcher/endpoints"
"github.com/gorilla/handlers"
"github.com/julienschmidt/httprouter"
)
//go:generate versionize
var VERSION = "1.11.0"
var VERSION = "1.12.3"
func isModuleEnabled(module string, modulesEnabled []string) bool {
for _, enabled := range modulesEnabled {
@ -60,6 +63,9 @@ func makeRouter(config endpoints.ServerConfig) *httprouter.Router {
if isModuleEnabled("routes_count_table", whitelist) {
r.GET("/routes/count/table/:table", endpoints.Endpoint(endpoints.TableCount))
}
if isModuleEnabled("routes_count_primary", whitelist) {
r.GET("/routes/count/primary/:protocol", endpoints.Endpoint(endpoints.ProtoPrimaryCount))
}
if isModuleEnabled("routes_filtered", whitelist) {
r.GET("/routes/filtered/:protocol", endpoints.Endpoint(endpoints.RoutesFiltered))
}
@ -89,6 +95,7 @@ func PrintServiceInfo(conf *Config, birdConf bird.BirdConfig) {
log.Println("Starting Birdwatcher")
log.Println(" Using:", birdConf.BirdCmd)
log.Println(" Listen:", birdConf.Listen)
log.Println(" Cache TTL:", birdConf.CacheTtl)
// Endpoint Info
if len(conf.Server.AllowFrom) == 0 {
@ -112,20 +119,41 @@ func PrintServiceInfo(conf *Config, birdConf bird.BirdConfig) {
log.Println(" Per Peer Tables:", conf.Parser.PerPeerTables)
}
// MyLogger is our own log.Logger wrapper so we can customize it
type MyLogger struct {
logger *log.Logger
}
// Write implements the Write method of io.Writer
func (m *MyLogger) Write(p []byte) (n int, err error) {
m.logger.Print(string(p))
return len(p), nil
}
func main() {
// Disable timestamps for the default logger, as they are generated by the syslog implementation
log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime))
bird6 := flag.Bool("6", false, "Use bird6 instead of bird")
configfile := flag.String("config", "./etc/ecix/birdwatcher.conf", "Configuration file location")
workerPoolSize := flag.Int("worker-pool-size", 8, "Number of go routines used to parse routing tables concurrently")
configfile := flag.String("config", "/etc/birdwatcher/birdwatcher.conf", "Configuration file location")
flag.Parse()
endpoints.VERSION = VERSION
bird.InstallRateLimitReset()
// Load configurations
conf, err := LoadConfigs(ConfigOptions(*configfile))
bird.WorkerPoolSize = *workerPoolSize
conf, err := LoadConfigs([]string{*configfile})
if err != nil {
log.Fatal("Loading birdwatcher configuration failed:", err)
}
if conf.Server.EnableTLS {
if len(conf.Server.Crt) == 0 || len(conf.Server.Key) == 0 {
log.Fatalln("You have enabled TLS support. Please specify 'crt' and 'key' in birdwatcher config file.")
}
}
endpoints.VERSION = VERSION
bird.InstallRateLimitReset()
// Get config according to flags
birdConf := conf.Bird
if *bird6 {
@ -138,13 +166,23 @@ func main() {
// Configuration
bird.ClientConf = birdConf
bird.StatusConf = conf.Status
bird.RateLimitConf.Lock()
bird.RateLimitConf.Conf = conf.Ratelimit
bird.RateLimitConf.Unlock()
bird.ParserConf = conf.Parser
var cache bird.Cache
if conf.Cache.UseRedis {
bird.CacheRedis, err = bird.NewRedisCache(conf.Cache)
cache, err = bird.NewRedisCache(conf.Cache)
if err != nil {
log.Fatal("Could not initialize redis cache:", err)
log.Fatal("Could not initialize redis cache, falling back to MemoryCache:", err)
}
} else { // initialze the MemoryCache
cache, err = bird.NewMemoryCache()
if err != nil {
log.Fatal("Could not initialize MemoryCache:", err)
} else {
bird.InitializeCache(cache)
}
}
@ -152,5 +190,19 @@ func main() {
// Make server
r := makeRouter(conf.Server)
log.Fatal(http.ListenAndServe(birdConf.Listen, r))
// Set up our own custom log.Logger without a prefix
myquerylog := log.New(os.Stdout, "", 0)
// Disable timestamps, as they are contained in the query log
myquerylog.SetFlags(myquerylog.Flags() &^ (log.Ldate | log.Ltime))
mylogger := &MyLogger{myquerylog}
if conf.Server.EnableTLS {
if len(conf.Server.Crt) == 0 || len(conf.Server.Key) == 0 {
log.Fatalln("You have enabled TLS support but not specified both a .crt and a .key file in the config.")
}
log.Fatal(http.ListenAndServeTLS(birdConf.Listen, conf.Server.Crt, conf.Server.Key, handlers.LoggingHandler(mylogger, r)))
} else {
log.Fatal(http.ListenAndServe(birdConf.Listen, handlers.LoggingHandler(mylogger, r)))
}
}

View file

@ -7,9 +7,9 @@ import (
func TestLoadConfigs(t *testing.T) {
t.Log("Loading configs")
res, err := LoadConfigs([]string{
"./etc/ecix/birdwatcher.conf",
"/etc/ecix/birdwatcher.conf",
"./etc/ecix/birdwatcher.local.conf",
"./etc/birdwatcher/birdwatcher.conf",
"/etc/birdwatcher/birdwatcher.conf",
"./etc/birdwatcher/birdwatcher.local.conf",
})
t.Log(res)

View file

@ -0,0 +1,170 @@
## Semantics of birdc output
### Command `show protocols all`
Output is generated in `/nest/proto.c` (BIRD sourcecode).
#### BGP protocol example (DE-CIX)
In `/nest/proto.c:1476` method `proto_show_stats()` displays information from the statistics struct. All values originate from individual fields in the struct, there is no
redundant storage of information in Bird.
`birdc show protocols all`
```
R194_129 BGP T1241_nada_ripe up 2018-06-21 17:42:44 Established
Description: Nada & Co.
Preference: 100
Input filter: (unnamed)
Output filter: (unnamed)
Import limit: 200000
Action: disable
Routes: 161 imported, 0 filtered, 164282 exported, 123189 preferred
Route change stats: received rejected filtered ignored accepted
Import updates: 161 0 0 0 161
Import withdraws: 0 0 --- 0 0
Export updates: 226412 322 21 --- 226069
Export withdraws: 67 --- --- --- 67
BGP state: Established
Neighbor address: 172.31.194.129
Neighbor AS: 1241
Neighbor ID: 172.31.194.129
Neighbor caps: refresh enhanced-refresh AS4
Session: external route-server AS4
Source address: 172.31.192.157
Route limit: 161/200000
Hold timer: 118/180
Keepalive timer: 33/60
```
The meaning of the corresponding fields to values of the birdc output is evident
from the comments after the declarations in `/nest/protocol.h`
```
/* Protocol statistics */
struct proto_stats {
/* Import - from protocol to core */
u32 imp_routes; /* Number of routes successfully imported to the (adjacent) routing table */
u32 filt_routes; /* Number of routes rejected in import filter but kept in the routing table */
u32 pref_routes; /* Number of routes that are preferred, sum over all routing tables */
u32 imp_updates_received; /* Number of route updates received */
u32 imp_updates_invalid; /* Number of route updates rejected as invalid */
u32 imp_updates_filtered; /* Number of route updates rejected by filters */
u32 imp_updates_ignored; /* Number of route updates rejected as already in route table */
u32 imp_updates_accepted; /* Number of route updates accepted and imported */
u32 imp_withdraws_received; /* Number of route withdraws received */
u32 imp_withdraws_invalid; /* Number of route withdraws rejected as invalid */
u32 imp_withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
u32 imp_withdraws_accepted; /* Number of route withdraws accepted and processed */
/* Export - from core to protocol */
u32 exp_routes; /* Number of routes successfully exported to the protocol */
u32 exp_updates_received; /* Number of route updates received */
u32 exp_updates_rejected; /* Number of route updates rejected by protocol */
u32 exp_updates_filtered; /* Number of route updates rejected by filters */
u32 exp_updates_accepted; /* Number of route updates accepted and exported */
u32 exp_withdraws_received; /* Number of route withdraws received */
u32 exp_withdraws_accepted; /* Number of route withdraws accepted and processed */
};
```
What does the number in `Route limit: %d/%d` mean?
The first number is `stats.imp_routes + stats.filt_routes`, the second is the limit. In the protocol output `stats.imp_routes` is Routes %d imported and `stats.filt_routes` is Routes %d filtered.
The complete protocols of one example neighbor that has multiple routers.
```
M1241_nada_ripe Pipe master up 2018-06-21 17:39:31 => T1241_nada_ripe
Description: Nada & Co.
Preference: 70
Input filter: in_nada_ripe
Output filter: (unnamed)
Routes: 455 imported, 281241 exported
Route change stats: received rejected filtered ignored accepted
Import updates: 329913 329425 33 0 455
Import withdraws: 1249 0 --- 0 0
Export updates: 486963 455 157083 0 329425
Export withdraws: 1249 0 --- 0 1249
C1241_nada_ripe Pipe Collector up 2018-06-21 17:39:31 => T1241_nada_ripe
Description: Nada & Co.
Preference: 70
Input filter: in_nada_ripe
Output filter: REJECT
Routes: 455 imported, 0 exported
Route change stats: received rejected filtered ignored accepted
Import updates: 329913 0 329458 0 455
Import withdraws: 1249 0 --- 0 0
Export updates: 440307 455 439852 0 0
Export withdraws: 1252 0 --- 1252 0
R194_129 BGP T1241_nada_ripe up 2018-06-21 17:42:44 Established
Description: Nada & Co.
Preference: 100
Input filter: (unnamed)
Output filter: (unnamed)
Import limit: 200000
Action: disable
Routes: 161 imported, 0 filtered, 164282 exported, 123189 preferred
Route change stats: received rejected filtered ignored accepted
Import updates: 161 0 0 0 161
Import withdraws: 0 0 --- 0 0
Export updates: 226412 322 21 --- 226069
Export withdraws: 67 --- --- --- 67
BGP state: Established
Neighbor address: 172.31.194.129
Neighbor AS: 1241
Neighbor ID: 172.31.194.129
Neighbor caps: refresh enhanced-refresh AS4
Session: external route-server AS4
Source address: 172.31.192.157
Route limit: 161/200000
Hold timer: 118/180
Keepalive timer: 33/60
R195_130 BGP T1241_nada_ripe start 2018-06-21 17:39:31 Passive
Description: Nada & Co.
Preference: 100
Input filter: (unnamed)
Output filter: (unnamed)
Import limit: 200000
Action: disable
Routes: 0 imported, 0 filtered, 0 exported, 0 preferred
Route change stats: received rejected filtered ignored accepted
Import updates: 0 0 0 0 0
Import withdraws: 0 0 --- 0 0
Export updates: 0 0 0 --- 0
Export withdraws: 0 --- --- --- 0
BGP state: Passive
Neighbor address: 172.31.195.130
Neighbor AS: 1241
R193_231 BGP T1241_nada_ripe up 2018-06-21 17:50:04 Established
Description: Nada & Co.
Preference: 100
Input filter: (unnamed)
Output filter: (unnamed)
Import limit: 200000
Action: disable
Routes: 158 imported, 0 filtered, 164091 exported, 8291 preferred
Route change stats: received rejected filtered ignored accepted
Import updates: 158 0 0 0 158
Import withdraws: 0 0 --- 0 0
Export updates: 220336 20 172 --- 220144
Export withdraws: 67 --- --- --- 67
BGP state: Established
Neighbor address: 172.31.193.231
Neighbor AS: 1241
Neighbor ID: 172.31.193.231
Neighbor caps: refresh enhanced-refresh AS4
Session: external route-server AS4
Source address: 172.31.192.157
Route limit: 158/200000
Hold timer: 143/180
Keepalive timer: 33/60
```
### Command `show route `
Birdc output:
`198.49.1.0/24 via 172.31.194.42 on eno2 [R194_42 2018-07-27 18:47:27] * (100) [AS715i]`
`next-hop` obvious
`learnt_from` the IP after `from` (missing in example)
`gateway` IP address after `via`

View file

@ -4,4 +4,8 @@ package endpoints
type ServerConfig struct {
AllowFrom []string `toml:"allow_from"`
ModulesEnabled []string `toml:"modules_enabled"`
EnableTLS bool `toml:"enable_tls"`
Crt string `toml:"crt"`
Key string `toml:"key"`
}

View file

@ -54,8 +54,8 @@ func Endpoint(wrapped endpoint) httprouter.Handle {
}
res := make(map[string]interface{})
ret, from_cache := wrapped(r, ps)
if reflect.DeepEqual(ret, bird.NilParse) {
w.WriteHeader(http.StatusTooManyRequests)
return
@ -67,7 +67,7 @@ func Endpoint(wrapped endpoint) httprouter.Handle {
w.Write(js)
return
}
res["api"] = GetApiInfo(from_cache)
res["api"] = GetApiInfo(&ret, from_cache)
for k, v := range ret {
res[k] = v

View file

@ -14,7 +14,6 @@ func TestValidateProtocol(t *testing.T) {
invalidProtocols := []string{
"ID421_AS11171_123.8.127.lö19",
"Test123",
"ThisValueIsTooLong12345678901234567890123456789012345678901234567890123456789012345678901234567890",
}

View file

@ -58,8 +58,16 @@ func ProtoCount(r *http.Request, ps httprouter.Params) (bird.Parsed, bool) {
return bird.RoutesProtoCount(protocol)
}
func ProtoPrimaryCount(r *http.Request, ps httprouter.Params) (bird.Parsed, bool) {
protocol, err := ValidateProtocolParam(ps.ByName("protocol"))
if err != nil {
return bird.Parsed{"error": fmt.Sprintf("%s", err)}, false
}
return bird.RoutesProtoPrimaryCount(protocol)
}
func TableCount(r *http.Request, ps httprouter.Params) (bird.Parsed, bool) {
return bird.RoutesTable(ps.ByName("table"))
return bird.RoutesTableCount(ps.ByName("table"))
}
func RouteNet(r *http.Request, ps httprouter.Params) (bird.Parsed, bool) {

View file

@ -13,10 +13,16 @@ func Symbols(r *http.Request, ps httprouter.Params) (bird.Parsed, bool) {
func SymbolTables(r *http.Request, ps httprouter.Params) (bird.Parsed, bool) {
val, from_cache := bird.Symbols()
return bird.Parsed{"symbols": val["routing table"]}, from_cache
if bird.IsSpecial(val) {
return val, from_cache
}
return bird.Parsed{"symbols": val["symbols"].(bird.Parsed)["routing table"]}, from_cache
}
func SymbolProtocols(r *http.Request, ps httprouter.Params) (bird.Parsed, bool) {
val, from_cache := bird.Symbols()
return bird.Parsed{"symbols": val["protocols"]}, from_cache
if bird.IsSpecial(val) {
return val, from_cache
}
return bird.Parsed{"symbols": val["symbols"].(bird.Parsed)["protocol"]}, from_cache
}

View file

@ -1,13 +1,18 @@
package endpoints
import (
"time"
"github.com/alice-lg/birdwatcher/bird"
)
type TimeInfo struct {
Date string `json:"date"`
TimezoneType string `json:"timezone_type"`
Timezone string `json:"timezone"`
Date time.Time `json:"date"`
TimezoneType string `json:"timezone_type"`
Timezone string `json:"timezone"`
}
type CacheStatus struct {
OrigTTL int `json:"orig_ttl"`
CachedAt TimeInfo `json:"cached_at"`
}
@ -20,11 +25,39 @@ type APIInfo struct {
// go generate does not work in subdirectories. Beautious.
var VERSION string
func GetApiInfo(from_cache bool) *APIInfo {
func GetApiInfo(res *bird.Parsed, from_cache bool) *APIInfo {
ai := &APIInfo{}
ai.Version = VERSION
ai.ResultFromCache = from_cache
api := *res
// Derive cache status from TTL
cachedAt, ok := api["cached_at"].(time.Time)
if !ok {
cachedAt = time.Time{}
}
// tbh. I have no clue what the difference between
// timezone type and timezone actually is.
// I could trace back the timezonetype to the symphony framework
// Barry was using; the docs say it accepts timezones like
// "America/New_York", however nothing about UTC could be found.
//
// As we convert everything to UTC and let the client
// render it in local time, it is safe to set this to a fixed
// value.
cacheInfo := CacheStatus{
CachedAt: TimeInfo{
Date: cachedAt,
TimezoneType: "UTC",
Timezone: "UTC",
},
}
ai.CacheStatus = cacheInfo
return ai
}

View file

@ -6,24 +6,36 @@
# Restrict access to certain IPs. Leave empty to allow from all.
allow_from = []
# All modules:
# Available modules:
## low-level modules (translation from birdc output to JSON objects)
# status
# protocols
# protocols_bgp
# symbols
# symbols_tables
# symbols_protocols
# protocols
# protocols_bgp
# routes_protocol
# routes_table
# routes_count_protocol
# routes_count_table
# route_net
# routes_count_primary
# routes_filtered
# routes_prefixed
# routes_noexport
# route_net
## high-level modules (aggregated data from multiple birdc invocations)
# routes_dump
#
modules_enabled = ["status", "protocols_bgp", "routes_protocol", "routes_peer"]
# routes_peer
modules_enabled = ["status",
"protocols",
"protocols_bgp",
"routes_protocol",
"routes_peer",
"routes_prefixed",
"routes_dump"
]
[status]
#
@ -45,12 +57,13 @@ requests_per_minute = 10
listen = "0.0.0.0:29184"
config = "/etc/bird.conf"
birdc = "/sbin/birdc"
ttl = 5 # time to live (in minutes) for caching of cli output
[bird6]
listen = "0.0.0.0:29186"
config = "/etc/bird6.conf"
birdc = "/sbin/birdc6"
ttl = 5 # time to live (in minutes) for caching of cli output
[parser]
# Remove fields e.g. interface

View file

@ -1,11 +1,11 @@
[Unit]
Description=BIRDwatcher IPv4
Description=BIRDwatcher IPv4
Wants=network.target
After=network.target
[Service]
Type=simple
ExecStart=/opt/ecix/birdwatcher/bin/birdwatcher-linux-amd64
ExecStart=/opt/birdwatcher/birdwatcher/bin/birdwatcher-linux-amd64
[Install]
WantedBy=multi-user.target

View file

@ -1,11 +1,11 @@
[Unit]
Description=BIRDwatcher IPv6
Description=BIRDwatcher IPv6
Wants=network.target
After=network.target
[Service]
Type=simple
ExecStart=/opt/ecix/birdwatcher/bin/birdwatcher-linux-amd64 -6
ExecStart=/opt/birdwatcher/birdwatcher/bin/birdwatcher-linux-amd64 -6
[Install]
WantedBy=multi-user.target

View file

@ -1,5 +1,5 @@
# ECIX birdwatcher
# birdwatcher
description "birdwatcher ipv4"
author "Matthias Hannig <mha@ecix.net>"
@ -10,5 +10,4 @@ respawn limit 20 10
start on starting birdwatcher
stop on stopping birdwatcher
exec /opt/ecix/birdwatcher/bin/birdwatcher-linux-amd64 2>&1 | logger -i -t 'BIRD4 WATCHER'
exec /opt/birdwatcher/birdwatcher/bin/birdwatcher-linux-amd64 2>&1 | logger -i -t 'BIRD4 WATCHER'

View file

@ -1,5 +1,5 @@
# ECIX birdwatcher
# birdwatcher
description "birdwatcher ipv6"
author "Matthias Hannig <mha@ecix.net>"
@ -10,5 +10,4 @@ respawn limit 20 10
start on starting birdwatcher
stop on stopping birdwatcher
exec /opt/ecix/birdwatcher/bin/birdwatcher-linux-amd64 -6 2>&1 | logger -i -t 'BIRD6 WATCHER'
exec /opt/birdwatcher/birdwatcher/bin/birdwatcher-linux-amd64 -6 2>&1 | logger -i -t 'BIRD6 WATCHER'

View file

@ -0,0 +1,43 @@
M65001_nada_co_ripe Pipe master up 2018-05-31 15:38:58 => T65001_nada_co_ripe
Description: Nada Co
Preference: 70
Input filter: in_nada_co_ripe
Output filter: (unnamed)
Routes: 688 imported, 247259 exported
Route change stats: received rejected filtered ignored accepted
Import updates: 250795 250085 22 0 688
Import withdraws: 3 0 --- 0 0
Export updates: 803234 1376 307334 247262 247262
Export withdraws: 3 0 --- 0 3
C65003_nada2_co_ripe Pipe Collector 2018-05-31 16:39:01 => T65003_nada2_co_ripe
Description: Nada2 Co
Preference: 70
Input filter: in_nada2_co_ripe
Output filter: REJECT
R194_42 BGP T65001_nada_co_ripe up 2018-05-31 15:38:40 Established
Description: Nada Co
Preference: 100
Input filter: (unnamed)
Output filter: (unnamed)
Import limit: 200000
Action: disable
Routes: 710 imported, 0 filtered, 154998 exported, 376688 preferred
Route change stats: received rejected filtered ignored accepted
Import updates: 710 0 0 0 710
Import withdraws: 0 0 --- 0 0
Export updates: 172100 710 0 --- 171390
Export withdraws: 0 --- --- --- 0
BGP state: Established
Neighbor address: 172.31.194.42
Neighbor AS: 1764
Neighbor ID: 172.31.194.42
Neighbor caps: refresh enhanced-refresh AS4
Session: external route-server AS4
Source address: 172.31.192.157
Route limit: 710/200000
Hold timer: 151/180
Keepalive timer: 43/60
Last error: Socket: Connection closed

View file

@ -5,8 +5,10 @@ BIRD 1.6.3 ready.
BGP.as_path: 1340
BGP.next_hop: 1.2.3.16
BGP.local_pref: 100
BGP.community: (65011,3) (9033,3251)
BGP.community: (0,5464) (0,8339) (0,8741) (0,8823) (0,12387) (0,13101) (0,16097) (0,16316) (0,20546) (0,20686) (0,20723) (0,21083) (0,21385) (0,24940) (0,25504) (0,28876) (0,29545) (0,30058) (0,31103) (0,31400) (0,39090) (0,39392) (0,39912) (0,42473) (0,43957) (0,44453) (0,47297) (0,47692) (0,48200) (0,50629) (0,51191) (0,51839) (0,51852) (0,54113) (0,56719) (0,57957) (0,60517) (0,60574) (0,61303) (0,62297) (0,62336) (0,62359) (33891,33892) (33891,50673) (48793,48793) (50673,500)
(65101,11077) (65102,11000) (65103,724) (65104,150)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
BGP.ext_community: (rt, 48858, 50)
200.0.0.0/24 via 1.2.3.15 on eno7 [ID8497_AS1339 2017-06-21 08:17:31] * (100) [AS1339i]
Type: BGP unicast univ
BGP.origin: IGP
@ -15,6 +17,7 @@ BIRD 1.6.3 ready.
BGP.local_pref: 100
BGP.community: (65011,40) (9033,3251)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
via 1.2.3.16 on eno8 [ID8503_AS1340 2017-06-21 08:17:33] (100) [AS1340i]
Type: BGP unicast univ
BGP.origin: IGP
@ -23,6 +26,7 @@ BIRD 1.6.3 ready.
BGP.local_pref: 100
BGP.community: (65011,3) (9033,3251)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
16.0.0.0/24 via 1.2.3.16 on eno7 [ID8503_AS1340 2017-06-21 08:17:33] * (100) [AS1340i]
Type: BGP unicast univ
BGP.origin: IGP
@ -31,3 +35,4 @@ BIRD 1.6.3 ready.
BGP.local_pref: 100
BGP.community: (65011,3) (9033,3251)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
BGP.ext_community: (rt, 48858, 50)

View file

@ -1,27 +1,31 @@
BIRD 1.6.3 ready.
2001:4860::/32 via fe80:ffff:ffff::1 on eth2 [upstream1 2018-01-14 14:32:47 from fe80:ffff:ffff::1] * (100) [AS15169i]
Type: BGP unicast univ
BGP.origin: IGP
BGP.as_path: 15169
BGP.next_hop: fe80:ffff:ffff::1
BGP.med: 0
BGP.local_pref: 500
BGP.community: (9033,3001) (65000,680)
BGP.large_community: (48821, 0, 2000) (48821, 0, 2100)
via fe80:ffff:ffff::2 on eth3 [upstream2 2018-01-14 14:33:52] (100) [AS15169i]
Type: BGP unicast univ
BGP.origin: IGP
BGP.as_path: 50629 15169
BGP.next_hop: fe80:ffff:ffff::2
BGP.med: 71
BGP.local_pref: 100
BGP.community: (50629,200) (50629,201)
BGP.large_community: (48821, 0, 3000) (48821, 0, 3100)
Type: BGP unicast univ
BGP.origin: IGP
BGP.as_path: 15169
BGP.next_hop: fe80:ffff:ffff::1
BGP.med: 0
BGP.local_pref: 500
BGP.community: (0,5464) (0,8339) (0,8741) (0,8823) (0,12387) (0,13101) (0,16097) (0,16316) (0,20546) (0,20686) (0,20723) (0,21083) (0,21385) (0,24940) (0,25504) (0,28876) (0,29545) (0,30058) (0,31103) (0,31400) (0,39090) (0,39392) (0,39912) (0,42473) (0,43957) (0,44453) (0,47297) (0,47692) (0,48200) (0,50629) (0,51191) (0,51839) (0,51852) (0,54113) (0,56719) (0,57957) (0,60517) (0,60574) (0,61303) (0,62297) (0,62336) (0,62359) (33891,33892) (33891,50673) (48793,48793) (50673,500)
(65101,11077) (65102,11000) (65103,724) (65104,150)
BGP.large_community: (48821, 0, 2000) (48821, 0, 2100)
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
via fe80:ffff:ffff::2 on eth3 [upstream2 2018-01-14 14:33:52] (100) [AS15169i]
Type: BGP unicast univ
BGP.origin: IGP
BGP.as_path: 50629 15169
BGP.next_hop: fe80:ffff:ffff::2
BGP.med: 71
BGP.local_pref: 100
BGP.community: (50629,200) (50629,201)
BGP.large_community: (48821, 0, 3000) (48821, 0, 3100)
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
2001:678:1e0::/48 via fe80:ffff:ffff::2 on eth2 [upstream2 2018-01-14 15:04:17 from 2001:678:1e0::2] * (100) [AS202739i]
Type: BGP unicast univ
BGP.origin: IGP
BGP.as_path: 202739
BGP.next_hop: 2001:678:1e0::2
BGP.local_pref: 5000
BGP.community: (48821,2000) (48821,2100)
BGP.large_community: (48821, 0, 2000) (48821, 0, 2100)
Type: BGP unicast univ
BGP.origin: IGP
BGP.as_path: 202739
BGP.next_hop: 2001:678:1e0::2
BGP.local_pref: 5000
BGP.community: (48821,2000) (48821,2100)
BGP.large_community: (48821, 0, 2000) (48821, 0, 2100)
BGP.ext_community: (unknown 0x4300, 0, 1)

View file

@ -6,8 +6,10 @@ BIRD 1.6.3 ready.
BGP.as_path: 1340
BGP.next_hop: 1.2.3.16
BGP.local_pref: 100
BGP.community: (65011,3) (9033,3251)
BGP.community: (0,5464) (0,8339) (0,8741) (0,8823) (0,12387) (0,13101) (0,16097) (0,16316) (0,20546) (0,20686) (0,20723) (0,21083) (0,21385) (0,24940) (0,25504) (0,28876) (0,29545) (0,30058) (0,31103) (0,31400) (0,39090) (0,39392) (0,39912) (0,42473) (0,43957) (0,44453) (0,47297) (0,47692) (0,48200) (0,50629) (0,51191) (0,51839) (0,51852) (0,54113) (0,56719) (0,57957) (0,60517) (0,60574) (0,61303) (0,62297) (0,62336) (0,62359) (33891,33892) (33891,50673) (48793,48793) (50673,500)
(65101,11077) (65102,11000) (65103,724) (65104,150)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
BGP.ext_community: (rt, 48858, 50)
200.0.0.0/24 unicast [ID8497_AS1339 2017-06-21 08:17:31] * (100) [AS1339i]
via 1.2.3.15 on eno7
Type: BGP univ
@ -17,7 +19,8 @@ BIRD 1.6.3 ready.
BGP.local_pref: 100
BGP.community: (65011,40) (9033,3251)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
unicast [ID8503_AS1340 2017-06-21 08:17:33] (100/?) [AS1340i]
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
unicast [ID8503_AS1340 2017-06-21 08:17:33] (100/?) [AS1340i]
via 1.2.3.16 on eno8
Type: BGP univ
BGP.origin: IGP
@ -26,6 +29,7 @@ BIRD 1.6.3 ready.
BGP.local_pref: 100
BGP.community: (65011,3) (9033,3251)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
16.0.0.0/24 unicast [ID8503_AS1340 2017-06-21 08:17:33] * (100) [AS1340i]
via 1.2.3.16 on eno7
Type: BGP univ
@ -35,3 +39,4 @@ BIRD 1.6.3 ready.
BGP.local_pref: 100
BGP.community: (65011,3) (9033,3251)
BGP.large_community: (9033, 65666, 12) (9033, 65666, 9)
BGP.ext_community: (rt, 48858, 50)

View file

@ -7,8 +7,10 @@ BIRD 2.0.0 ready.
BGP.next_hop: fe80:ffff:ffff::1
BGP.med: 0
BGP.local_pref: 500
BGP.community: (9033,3001) (65000,680)
BGP.community: (0,5464) (0,8339) (0,8741) (0,8823) (0,12387) (0,13101) (0,16097) (0,16316) (0,20546) (0,20686) (0,20723) (0,21083) (0,21385) (0,24940) (0,25504) (0,28876) (0,29545) (0,30058) (0,31103) (0,31400) (0,39090) (0,39392) (0,39912) (0,42473) (0,43957) (0,44453) (0,47297) (0,47692) (0,48200) (0,50629) (0,51191) (0,51839) (0,51852) (0,54113) (0,56719) (0,57957) (0,60517) (0,60574) (0,61303) (0,62297) (0,62336) (0,62359) (33891,33892) (33891,50673) (48793,48793) (50673,500)
(65101,11077) (65102,11000) (65103,724) (65104,150)
BGP.large_community: (48821, 0, 2000) (48821, 0, 2100)
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
unicast [upstream2 2018-01-14 13:07:26 from fe80:ffff:ffff::2] (100/?) [AS15169i]
via fe80:ffff:ffff::2 on eth3
Type: BGP univ
@ -19,6 +21,7 @@ BIRD 2.0.0 ready.
BGP.local_pref: 100
BGP.community: (50629,200) (50629,201)
BGP.large_community: (48821, 0, 3000) (48821, 0, 3100)
BGP.ext_community: (ro, 21414, 52001) (ro, 21414, 52004) (ro, 21414, 64515)
2001:678:1e0::/48 unicast [upstream2 2018-01-15 20:31:39 from fe80:ffff:ffff::2] * (100/?) [i]
via fe80:ffff:ffff::2 on eth2
Type: BGP univ
@ -27,4 +30,5 @@ BIRD 2.0.0 ready.
BGP.next_hop: 2001:678:1e0::2
BGP.local_pref: 5000
BGP.community: (48821,2000) (48821,2100)
BGP.large_community: (48821, 0, 2000) (48821, 0, 2100)
BGP.large_community: (48821, 0, 2000) (48821, 0, 2100)
BGP.ext_community: (unknown 0x4300, 0, 1)

20
vendor/github.com/gorilla/handlers/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,20 @@
language: go
sudo: false
matrix:
include:
- go: 1.4
- go: 1.5
- go: 1.6
- go: 1.7
- go: 1.8
- go: tip
allow_failures:
- go: tip
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- go vet $(go list ./... | grep -v /vendor/)
- go test -v -race ./...

22
vendor/github.com/gorilla/handlers/LICENSE generated vendored Normal file
View file

@ -0,0 +1,22 @@
Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

55
vendor/github.com/gorilla/handlers/README.md generated vendored Normal file
View file

@ -0,0 +1,55 @@
gorilla/handlers
================
[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge)
Package handlers is a collection of handlers (aka "HTTP middleware") for use
with Go's `net/http` package (or any framework supporting `http.Handler`), including:
* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log
Format](http://httpd.apache.org/docs/2.2/logs.html#common).
* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log
Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by
both Apache and nginx.
* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses.
* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted
content types.
* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a
`map[string]http.Handler`
* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the
`X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded`
headers when running a Go server behind a HTTP reverse proxy.
* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple
domains (i.e. multiple CNAME aliases).
* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics.
Other handlers are documented [on the Gorilla
website](http://www.gorillatoolkit.org/pkg/handlers).
## Example
A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`:
```go
import (
"net/http"
"github.com/gorilla/handlers"
)
func main() {
r := http.NewServeMux()
// Only log requests to our admin dashboard to stdout
r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard)))
r.HandleFunc("/", ShowIndex)
// Wrap our server with our gzip handler to gzip compress all responses.
http.ListenAndServe(":8000", handlers.CompressHandler(r))
}
```
## License
BSD licensed. See the included LICENSE file for details.

74
vendor/github.com/gorilla/handlers/canonical.go generated vendored Normal file
View file

@ -0,0 +1,74 @@
package handlers
import (
"net/http"
"net/url"
"strings"
)
type canonical struct {
h http.Handler
domain string
code int
}
// CanonicalHost is HTTP middleware that re-directs requests to the canonical
// domain. It accepts a domain and a status code (e.g. 301 or 302) and
// re-directs clients to this domain. The existing request path is maintained.
//
// Note: If the provided domain is considered invalid by url.Parse or otherwise
// returns an empty scheme or host, clients are not re-directed.
//
// Example:
//
// r := mux.NewRouter()
// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302)
// r.HandleFunc("/route", YourHandler)
//
// log.Fatal(http.ListenAndServe(":7000", canonical(r)))
//
func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler {
fn := func(h http.Handler) http.Handler {
return canonical{h, domain, code}
}
return fn
}
func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) {
dest, err := url.Parse(c.domain)
if err != nil {
// Call the next handler if the provided domain fails to parse.
c.h.ServeHTTP(w, r)
return
}
if dest.Scheme == "" || dest.Host == "" {
// Call the next handler if the scheme or host are empty.
// Note that url.Parse won't fail on in this case.
c.h.ServeHTTP(w, r)
return
}
if !strings.EqualFold(cleanHost(r.Host), dest.Host) {
// Re-build the destination URL
dest := dest.Scheme + "://" + dest.Host + r.URL.Path
if r.URL.RawQuery != "" {
dest += "?" + r.URL.RawQuery
}
http.Redirect(w, r, dest, c.code)
return
}
c.h.ServeHTTP(w, r)
}
// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '.
// This is backported from Go 1.5 (in response to issue #11206) and attempts to
// mitigate malformed Host headers that do not match the format in RFC7230.
func cleanHost(in string) string {
if i := strings.IndexAny(in, " /"); i != -1 {
return in[:i]
}
return in
}

127
vendor/github.com/gorilla/handlers/canonical_test.go generated vendored Normal file
View file

@ -0,0 +1,127 @@
package handlers
import (
"bufio"
"bytes"
"log"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
)
func TestCleanHost(t *testing.T) {
tests := []struct {
in, want string
}{
{"www.google.com", "www.google.com"},
{"www.google.com foo", "www.google.com"},
{"www.google.com/foo", "www.google.com"},
{" first character is a space", ""},
}
for _, tt := range tests {
got := cleanHost(tt.in)
if tt.want != got {
t.Errorf("cleanHost(%q) = %q, want %q", tt.in, got, tt.want)
}
}
}
func TestCanonicalHost(t *testing.T) {
gorilla := "http://www.gorillatoolkit.org"
rr := httptest.NewRecorder()
r := newRequest("GET", "http://www.example.com/")
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
// Test a re-direct: should return a 302 Found.
CanonicalHost(gorilla, http.StatusFound)(testHandler).ServeHTTP(rr, r)
if rr.Code != http.StatusFound {
t.Fatalf("bad status: got %v want %v", rr.Code, http.StatusFound)
}
if rr.Header().Get("Location") != gorilla+r.URL.Path {
t.Fatalf("bad re-direct: got %q want %q", rr.Header().Get("Location"), gorilla+r.URL.Path)
}
}
func TestKeepsQueryString(t *testing.T) {
google := "https://www.google.com"
rr := httptest.NewRecorder()
querystring := url.Values{"q": {"golang"}, "format": {"json"}}.Encode()
r := newRequest("GET", "http://www.example.com/search?"+querystring)
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CanonicalHost(google, http.StatusFound)(testHandler).ServeHTTP(rr, r)
want := google + r.URL.Path + "?" + querystring
if rr.Header().Get("Location") != want {
t.Fatalf("bad re-direct: got %q want %q", rr.Header().Get("Location"), want)
}
}
func TestBadDomain(t *testing.T) {
rr := httptest.NewRecorder()
r := newRequest("GET", "http://www.example.com/")
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
// Test a bad domain - should return 200 OK.
CanonicalHost("%", http.StatusFound)(testHandler).ServeHTTP(rr, r)
if rr.Code != http.StatusOK {
t.Fatalf("bad status: got %v want %v", rr.Code, http.StatusOK)
}
}
func TestEmptyHost(t *testing.T) {
rr := httptest.NewRecorder()
r := newRequest("GET", "http://www.example.com/")
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
// Test a domain that returns an empty url.Host from url.Parse.
CanonicalHost("hello.com", http.StatusFound)(testHandler).ServeHTTP(rr, r)
if rr.Code != http.StatusOK {
t.Fatalf("bad status: got %v want %v", rr.Code, http.StatusOK)
}
}
func TestHeaderWrites(t *testing.T) {
gorilla := "http://www.gorillatoolkit.org"
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
})
// Catch the log output to ensure we don't write multiple headers.
var b bytes.Buffer
buf := bufio.NewWriter(&b)
tl := log.New(buf, "test: ", log.Lshortfile)
srv := httptest.NewServer(
CanonicalHost(gorilla, http.StatusFound)(testHandler))
defer srv.Close()
srv.Config.ErrorLog = tl
_, err := http.Get(srv.URL)
if err != nil {
t.Fatal(err)
}
err = buf.Flush()
if err != nil {
t.Fatal(err)
}
// We rely on the error not changing: net/http does not export it.
if strings.Contains(b.String(), "multiple response.WriteHeader calls") {
t.Fatalf("re-direct did not return early: multiple header writes")
}
}

148
vendor/github.com/gorilla/handlers/compress.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
// Copyright 2013 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handlers
import (
"compress/flate"
"compress/gzip"
"io"
"net/http"
"strings"
)
type compressResponseWriter struct {
io.Writer
http.ResponseWriter
http.Hijacker
http.Flusher
http.CloseNotifier
}
func (w *compressResponseWriter) WriteHeader(c int) {
w.ResponseWriter.Header().Del("Content-Length")
w.ResponseWriter.WriteHeader(c)
}
func (w *compressResponseWriter) Header() http.Header {
return w.ResponseWriter.Header()
}
func (w *compressResponseWriter) Write(b []byte) (int, error) {
h := w.ResponseWriter.Header()
if h.Get("Content-Type") == "" {
h.Set("Content-Type", http.DetectContentType(b))
}
h.Del("Content-Length")
return w.Writer.Write(b)
}
type flusher interface {
Flush() error
}
func (w *compressResponseWriter) Flush() {
// Flush compressed data if compressor supports it.
if f, ok := w.Writer.(flusher); ok {
f.Flush()
}
// Flush HTTP response.
if w.Flusher != nil {
w.Flusher.Flush()
}
}
// CompressHandler gzip compresses HTTP responses for clients that support it
// via the 'Accept-Encoding' header.
//
// Compressing TLS traffic may leak the page contents to an attacker if the
// page contains user input: http://security.stackexchange.com/a/102015/12208
func CompressHandler(h http.Handler) http.Handler {
return CompressHandlerLevel(h, gzip.DefaultCompression)
}
// CompressHandlerLevel gzip compresses HTTP responses with specified compression level
// for clients that support it via the 'Accept-Encoding' header.
//
// The compression level should be gzip.DefaultCompression, gzip.NoCompression,
// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive.
// gzip.DefaultCompression is used in case of invalid compression level.
func CompressHandlerLevel(h http.Handler, level int) http.Handler {
if level < gzip.DefaultCompression || level > gzip.BestCompression {
level = gzip.DefaultCompression
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
L:
for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") {
switch strings.TrimSpace(enc) {
case "gzip":
w.Header().Set("Content-Encoding", "gzip")
w.Header().Add("Vary", "Accept-Encoding")
gw, _ := gzip.NewWriterLevel(w, level)
defer gw.Close()
h, hok := w.(http.Hijacker)
if !hok { /* w is not Hijacker... oh well... */
h = nil
}
f, fok := w.(http.Flusher)
if !fok {
f = nil
}
cn, cnok := w.(http.CloseNotifier)
if !cnok {
cn = nil
}
w = &compressResponseWriter{
Writer: gw,
ResponseWriter: w,
Hijacker: h,
Flusher: f,
CloseNotifier: cn,
}
break L
case "deflate":
w.Header().Set("Content-Encoding", "deflate")
w.Header().Add("Vary", "Accept-Encoding")
fw, _ := flate.NewWriter(w, level)
defer fw.Close()
h, hok := w.(http.Hijacker)
if !hok { /* w is not Hijacker... oh well... */
h = nil
}
f, fok := w.(http.Flusher)
if !fok {
f = nil
}
cn, cnok := w.(http.CloseNotifier)
if !cnok {
cn = nil
}
w = &compressResponseWriter{
Writer: fw,
ResponseWriter: w,
Hijacker: h,
Flusher: f,
CloseNotifier: cn,
}
break L
}
}
h.ServeHTTP(w, r)
})
}

154
vendor/github.com/gorilla/handlers/compress_test.go generated vendored Normal file
View file

@ -0,0 +1,154 @@
// Copyright 2013 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handlers
import (
"bufio"
"io"
"net"
"net/http"
"net/http/httptest"
"strconv"
"testing"
)
var contentType = "text/plain; charset=utf-8"
func compressedRequest(w *httptest.ResponseRecorder, compression string) {
CompressHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Length", strconv.Itoa(9*1024))
w.Header().Set("Content-Type", contentType)
for i := 0; i < 1024; i++ {
io.WriteString(w, "Gorilla!\n")
}
})).ServeHTTP(w, &http.Request{
Method: "GET",
Header: http.Header{
"Accept-Encoding": []string{compression},
},
})
}
func TestCompressHandlerNoCompression(t *testing.T) {
w := httptest.NewRecorder()
compressedRequest(w, "")
if enc := w.HeaderMap.Get("Content-Encoding"); enc != "" {
t.Errorf("wrong content encoding, got %q want %q", enc, "")
}
if ct := w.HeaderMap.Get("Content-Type"); ct != contentType {
t.Errorf("wrong content type, got %q want %q", ct, contentType)
}
if w.Body.Len() != 1024*9 {
t.Errorf("wrong len, got %d want %d", w.Body.Len(), 1024*9)
}
if l := w.HeaderMap.Get("Content-Length"); l != "9216" {
t.Errorf("wrong content-length. got %q expected %d", l, 1024*9)
}
}
func TestCompressHandlerGzip(t *testing.T) {
w := httptest.NewRecorder()
compressedRequest(w, "gzip")
if w.HeaderMap.Get("Content-Encoding") != "gzip" {
t.Errorf("wrong content encoding, got %q want %q", w.HeaderMap.Get("Content-Encoding"), "gzip")
}
if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" {
t.Errorf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8")
}
if w.Body.Len() != 72 {
t.Errorf("wrong len, got %d want %d", w.Body.Len(), 72)
}
if l := w.HeaderMap.Get("Content-Length"); l != "" {
t.Errorf("wrong content-length. got %q expected %q", l, "")
}
}
func TestCompressHandlerDeflate(t *testing.T) {
w := httptest.NewRecorder()
compressedRequest(w, "deflate")
if w.HeaderMap.Get("Content-Encoding") != "deflate" {
t.Fatalf("wrong content encoding, got %q want %q", w.HeaderMap.Get("Content-Encoding"), "deflate")
}
if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" {
t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8")
}
if w.Body.Len() != 54 {
t.Fatalf("wrong len, got %d want %d", w.Body.Len(), 54)
}
}
func TestCompressHandlerGzipDeflate(t *testing.T) {
w := httptest.NewRecorder()
compressedRequest(w, "gzip, deflate ")
if w.HeaderMap.Get("Content-Encoding") != "gzip" {
t.Fatalf("wrong content encoding, got %q want %q", w.HeaderMap.Get("Content-Encoding"), "gzip")
}
if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" {
t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8")
}
}
type fullyFeaturedResponseWriter struct{}
// Header/Write/WriteHeader implement the http.ResponseWriter interface.
func (fullyFeaturedResponseWriter) Header() http.Header {
return http.Header{}
}
func (fullyFeaturedResponseWriter) Write([]byte) (int, error) {
return 0, nil
}
func (fullyFeaturedResponseWriter) WriteHeader(int) {}
// Flush implements the http.Flusher interface.
func (fullyFeaturedResponseWriter) Flush() {}
// Hijack implements the http.Hijacker interface.
func (fullyFeaturedResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return nil, nil, nil
}
// CloseNotify implements the http.CloseNotifier interface.
func (fullyFeaturedResponseWriter) CloseNotify() <-chan bool {
return nil
}
func TestCompressHandlerPreserveInterfaces(t *testing.T) {
// Compile time validation fullyFeaturedResponseWriter implements all the
// interfaces we're asserting in the test case below.
var (
_ http.Flusher = fullyFeaturedResponseWriter{}
_ http.CloseNotifier = fullyFeaturedResponseWriter{}
_ http.Hijacker = fullyFeaturedResponseWriter{}
)
var h http.Handler = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
comp := r.Header.Get("Accept-Encoding")
if _, ok := rw.(*compressResponseWriter); !ok {
t.Fatalf("ResponseWriter wasn't wrapped by compressResponseWriter, got %T type", rw)
}
if _, ok := rw.(http.Flusher); !ok {
t.Errorf("ResponseWriter lost http.Flusher interface for %q", comp)
}
if _, ok := rw.(http.CloseNotifier); !ok {
t.Errorf("ResponseWriter lost http.CloseNotifier interface for %q", comp)
}
if _, ok := rw.(http.Hijacker); !ok {
t.Errorf("ResponseWriter lost http.Hijacker interface for %q", comp)
}
})
h = CompressHandler(h)
var (
rw fullyFeaturedResponseWriter
)
r, err := http.NewRequest("GET", "/", nil)
if err != nil {
t.Fatalf("Failed to create test request: %v", err)
}
r.Header.Set("Accept-Encoding", "gzip")
h.ServeHTTP(rw, r)
r.Header.Set("Accept-Encoding", "deflate")
h.ServeHTTP(rw, r)
}

327
vendor/github.com/gorilla/handlers/cors.go generated vendored Normal file
View file

@ -0,0 +1,327 @@
package handlers
import (
"net/http"
"strconv"
"strings"
)
// CORSOption represents a functional option for configuring the CORS middleware.
type CORSOption func(*cors) error
type cors struct {
h http.Handler
allowedHeaders []string
allowedMethods []string
allowedOrigins []string
allowedOriginValidator OriginValidator
exposedHeaders []string
maxAge int
ignoreOptions bool
allowCredentials bool
}
// OriginValidator takes an origin string and returns whether or not that origin is allowed.
type OriginValidator func(string) bool
var (
defaultCorsMethods = []string{"GET", "HEAD", "POST"}
defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"}
// (WebKit/Safari v9 sends the Origin header by default in AJAX requests)
)
const (
corsOptionMethod string = "OPTIONS"
corsAllowOriginHeader string = "Access-Control-Allow-Origin"
corsExposeHeadersHeader string = "Access-Control-Expose-Headers"
corsMaxAgeHeader string = "Access-Control-Max-Age"
corsAllowMethodsHeader string = "Access-Control-Allow-Methods"
corsAllowHeadersHeader string = "Access-Control-Allow-Headers"
corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials"
corsRequestMethodHeader string = "Access-Control-Request-Method"
corsRequestHeadersHeader string = "Access-Control-Request-Headers"
corsOriginHeader string = "Origin"
corsVaryHeader string = "Vary"
corsOriginMatchAll string = "*"
)
func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get(corsOriginHeader)
if !ch.isOriginAllowed(origin) {
ch.h.ServeHTTP(w, r)
return
}
if r.Method == corsOptionMethod {
if ch.ignoreOptions {
ch.h.ServeHTTP(w, r)
return
}
if _, ok := r.Header[corsRequestMethodHeader]; !ok {
w.WriteHeader(http.StatusBadRequest)
return
}
method := r.Header.Get(corsRequestMethodHeader)
if !ch.isMatch(method, ch.allowedMethods) {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",")
allowedHeaders := []string{}
for _, v := range requestHeaders {
canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) {
continue
}
if !ch.isMatch(canonicalHeader, ch.allowedHeaders) {
w.WriteHeader(http.StatusForbidden)
return
}
allowedHeaders = append(allowedHeaders, canonicalHeader)
}
if len(allowedHeaders) > 0 {
w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ","))
}
if ch.maxAge > 0 {
w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge))
}
if !ch.isMatch(method, defaultCorsMethods) {
w.Header().Set(corsAllowMethodsHeader, method)
}
} else {
if len(ch.exposedHeaders) > 0 {
w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ","))
}
}
if ch.allowCredentials {
w.Header().Set(corsAllowCredentialsHeader, "true")
}
if len(ch.allowedOrigins) > 1 {
w.Header().Set(corsVaryHeader, corsOriginHeader)
}
returnOrigin := origin
for _, o := range ch.allowedOrigins {
// A configuration of * is different than explicitly setting an allowed
// origin. Returning arbitrary origin headers an an access control allow
// origin header is unsafe and is not required by any use case.
if o == corsOriginMatchAll {
returnOrigin = "*"
break
}
}
w.Header().Set(corsAllowOriginHeader, returnOrigin)
if r.Method == corsOptionMethod {
return
}
ch.h.ServeHTTP(w, r)
}
// CORS provides Cross-Origin Resource Sharing middleware.
// Example:
//
// import (
// "net/http"
//
// "github.com/gorilla/handlers"
// "github.com/gorilla/mux"
// )
//
// func main() {
// r := mux.NewRouter()
// r.HandleFunc("/users", UserEndpoint)
// r.HandleFunc("/projects", ProjectEndpoint)
//
// // Apply the CORS middleware to our top-level router, with the defaults.
// http.ListenAndServe(":8000", handlers.CORS()(r))
// }
//
func CORS(opts ...CORSOption) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
ch := parseCORSOptions(opts...)
ch.h = h
return ch
}
}
func parseCORSOptions(opts ...CORSOption) *cors {
ch := &cors{
allowedMethods: defaultCorsMethods,
allowedHeaders: defaultCorsHeaders,
allowedOrigins: []string{corsOriginMatchAll},
}
for _, option := range opts {
option(ch)
}
return ch
}
//
// Functional options for configuring CORS.
//
// AllowedHeaders adds the provided headers to the list of allowed headers in a
// CORS request.
// This is an append operation so the headers Accept, Accept-Language,
// and Content-Language are always allowed.
// Content-Type must be explicitly declared if accepting Content-Types other than
// application/x-www-form-urlencoded, multipart/form-data, or text/plain.
func AllowedHeaders(headers []string) CORSOption {
return func(ch *cors) error {
for _, v := range headers {
normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
if normalizedHeader == "" {
continue
}
if !ch.isMatch(normalizedHeader, ch.allowedHeaders) {
ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader)
}
}
return nil
}
}
// AllowedMethods can be used to explicitly allow methods in the
// Access-Control-Allow-Methods header.
// This is a replacement operation so you must also
// pass GET, HEAD, and POST if you wish to support those methods.
func AllowedMethods(methods []string) CORSOption {
return func(ch *cors) error {
ch.allowedMethods = []string{}
for _, v := range methods {
normalizedMethod := strings.ToUpper(strings.TrimSpace(v))
if normalizedMethod == "" {
continue
}
if !ch.isMatch(normalizedMethod, ch.allowedMethods) {
ch.allowedMethods = append(ch.allowedMethods, normalizedMethod)
}
}
return nil
}
}
// AllowedOrigins sets the allowed origins for CORS requests, as used in the
// 'Allow-Access-Control-Origin' HTTP header.
// Note: Passing in a []string{"*"} will allow any domain.
func AllowedOrigins(origins []string) CORSOption {
return func(ch *cors) error {
for _, v := range origins {
if v == corsOriginMatchAll {
ch.allowedOrigins = []string{corsOriginMatchAll}
return nil
}
}
ch.allowedOrigins = origins
return nil
}
}
// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the
// 'Allow-Access-Control-Origin' HTTP header.
func AllowedOriginValidator(fn OriginValidator) CORSOption {
return func(ch *cors) error {
ch.allowedOriginValidator = fn
return nil
}
}
// ExposeHeaders can be used to specify headers that are available
// and will not be stripped out by the user-agent.
func ExposedHeaders(headers []string) CORSOption {
return func(ch *cors) error {
ch.exposedHeaders = []string{}
for _, v := range headers {
normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v))
if normalizedHeader == "" {
continue
}
if !ch.isMatch(normalizedHeader, ch.exposedHeaders) {
ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader)
}
}
return nil
}
}
// MaxAge determines the maximum age (in seconds) between preflight requests. A
// maximum of 10 minutes is allowed. An age above this value will default to 10
// minutes.
func MaxAge(age int) CORSOption {
return func(ch *cors) error {
// Maximum of 10 minutes.
if age > 600 {
age = 600
}
ch.maxAge = age
return nil
}
}
// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead
// passing them through to the next handler. This is useful when your application
// or framework has a pre-existing mechanism for responding to OPTIONS requests.
func IgnoreOptions() CORSOption {
return func(ch *cors) error {
ch.ignoreOptions = true
return nil
}
}
// AllowCredentials can be used to specify that the user agent may pass
// authentication details along with the request.
func AllowCredentials() CORSOption {
return func(ch *cors) error {
ch.allowCredentials = true
return nil
}
}
func (ch *cors) isOriginAllowed(origin string) bool {
if origin == "" {
return false
}
if ch.allowedOriginValidator != nil {
return ch.allowedOriginValidator(origin)
}
for _, allowedOrigin := range ch.allowedOrigins {
if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll {
return true
}
}
return false
}
func (ch *cors) isMatch(needle string, haystack []string) bool {
for _, v := range haystack {
if v == needle {
return true
}
}
return false
}

371
vendor/github.com/gorilla/handlers/cors_test.go generated vendored Normal file
View file

@ -0,0 +1,371 @@
package handlers
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestDefaultCORSHandlerReturnsOk(t *testing.T) {
r := newRequest("GET", "http://www.example.com/")
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS()(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusFound)
}
}
func TestDefaultCORSHandlerReturnsOkWithOrigin(t *testing.T) {
r := newRequest("GET", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS()(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusFound)
}
}
func TestCORSHandlerIgnoreOptionsFallsThrough(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusTeapot)
})
CORS(IgnoreOptions())(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusTeapot {
t.Fatalf("bad status: got %v want %v", status, http.StatusTeapot)
}
}
func TestCORSHandlerSetsExposedHeaders(t *testing.T) {
// Test default configuration.
r := newRequest("GET", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS(ExposedHeaders([]string{"X-CORS-TEST"}))(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsExposeHeadersHeader)
if header != "X-Cors-Test" {
t.Fatal("bad header: expected X-Cors-Test header, got empty header for method.")
}
}
func TestCORSHandlerUnsetRequestMethodForPreflightBadRequest(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS(AllowedMethods([]string{"DELETE"}))(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusBadRequest {
t.Fatalf("bad status: got %v want %v", status, http.StatusBadRequest)
}
}
func TestCORSHandlerInvalidRequestMethodForPreflightMethodNotAllowed(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, "DELETE")
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS()(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusMethodNotAllowed {
t.Fatalf("bad status: got %v want %v", status, http.StatusMethodNotAllowed)
}
}
func TestCORSHandlerOptionsRequestMustNotBePassedToNextHandler(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, "GET")
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
t.Fatal("Options request must not be passed to next handler")
})
CORS()(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
}
func TestCORSHandlerAllowedMethodForPreflight(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, "DELETE")
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS(AllowedMethods([]string{"DELETE"}))(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsAllowMethodsHeader)
if header != "DELETE" {
t.Fatalf("bad header: expected DELETE method header, got empty header.")
}
}
func TestCORSHandlerAllowMethodsNotSetForSimpleRequestPreflight(t *testing.T) {
for _, method := range defaultCorsMethods {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, method)
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS()(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsAllowMethodsHeader)
if header != "" {
t.Fatalf("bad header: expected empty method header, got %s.", header)
}
}
}
func TestCORSHandlerAllowedHeaderNotSetForSimpleRequestPreflight(t *testing.T) {
for _, simpleHeader := range defaultCorsHeaders {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, "GET")
r.Header.Set(corsRequestHeadersHeader, simpleHeader)
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS()(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsAllowHeadersHeader)
if header != "" {
t.Fatalf("bad header: expected empty header, got %s.", header)
}
}
}
func TestCORSHandlerAllowedHeaderForPreflight(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, "POST")
r.Header.Set(corsRequestHeadersHeader, "Content-Type")
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS(AllowedHeaders([]string{"Content-Type"}))(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsAllowHeadersHeader)
if header != "Content-Type" {
t.Fatalf("bad header: expected Content-Type header, got empty header.")
}
}
func TestCORSHandlerInvalidHeaderForPreflightForbidden(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, "POST")
r.Header.Set(corsRequestHeadersHeader, "Content-Type")
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS()(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusForbidden {
t.Fatalf("bad status: got %v want %v", status, http.StatusForbidden)
}
}
func TestCORSHandlerMaxAgeForPreflight(t *testing.T) {
r := newRequest("OPTIONS", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
r.Header.Set(corsRequestMethodHeader, "POST")
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS(MaxAge(3500))(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsMaxAgeHeader)
if header != "600" {
t.Fatalf("bad header: expected %s to be %s, got %s.", corsMaxAgeHeader, "600", header)
}
}
func TestCORSHandlerAllowedCredentials(t *testing.T) {
r := newRequest("GET", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS(AllowCredentials())(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsAllowCredentialsHeader)
if header != "true" {
t.Fatalf("bad header: expected %s to be %s, got %s.", corsAllowCredentialsHeader, "true", header)
}
}
func TestCORSHandlerMultipleAllowOriginsSetsVaryHeader(t *testing.T) {
r := newRequest("GET", "http://www.example.com/")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
CORS(AllowedOrigins([]string{r.URL.String(), "http://google.com"}))(testHandler).ServeHTTP(rr, r)
if status := rr.Code; status != http.StatusOK {
t.Fatalf("bad status: got %v want %v", status, http.StatusOK)
}
header := rr.HeaderMap.Get(corsVaryHeader)
if header != corsOriginHeader {
t.Fatalf("bad header: expected %s to be %s, got %s.", corsVaryHeader, corsOriginHeader, header)
}
}
func TestCORSWithMultipleHandlers(t *testing.T) {
var lastHandledBy string
corsMiddleware := CORS()
testHandler1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lastHandledBy = "testHandler1"
})
testHandler2 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lastHandledBy = "testHandler2"
})
r1 := newRequest("GET", "http://www.example.com/")
rr1 := httptest.NewRecorder()
handler1 := corsMiddleware(testHandler1)
corsMiddleware(testHandler2)
handler1.ServeHTTP(rr1, r1)
if lastHandledBy != "testHandler1" {
t.Fatalf("bad CORS() registration: Handler served should be Handler registered")
}
}
func TestCORSHandlerWithCustomValidator(t *testing.T) {
r := newRequest("GET", "http://a.example.com")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
originValidator := func(origin string) bool {
if strings.HasSuffix(origin, ".example.com") {
return true
}
return false
}
// Specially craft a CORS object.
handleFunc := func(h http.Handler) http.Handler {
c := &cors{
allowedMethods: defaultCorsMethods,
allowedHeaders: defaultCorsHeaders,
allowedOrigins: []string{"http://a.example.com"},
h: h,
}
AllowedOriginValidator(originValidator)(c)
return c
}
handleFunc(testHandler).ServeHTTP(rr, r)
header := rr.HeaderMap.Get(corsAllowOriginHeader)
if header != r.URL.String() {
t.Fatalf("bad header: expected %s to be %s, got %s.", corsAllowOriginHeader, r.URL.String(), header)
}
}
func TestCORSAllowStar(t *testing.T) {
r := newRequest("GET", "http://a.example.com")
r.Header.Set("Origin", r.URL.String())
rr := httptest.NewRecorder()
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
originValidator := func(origin string) bool {
if strings.HasSuffix(origin, ".example.com") {
return true
}
return false
}
CORS(AllowedOriginValidator(originValidator))(testHandler).ServeHTTP(rr, r)
header := rr.HeaderMap.Get(corsAllowOriginHeader)
// Because * is the default CORS policy (which is safe), we should be
// expect a * returned here as the Access Control Allow Origin header
if header != "*" {
t.Fatalf("bad header: expected %s to be %s, got %s.", corsAllowOriginHeader, r.URL.String(), header)
}
}

9
vendor/github.com/gorilla/handlers/doc.go generated vendored Normal file
View file

@ -0,0 +1,9 @@
/*
Package handlers is a collection of handlers (aka "HTTP middleware") for use
with Go's net/http package (or any framework supporting http.Handler).
The package includes handlers for logging in standardised formats, compressing
HTTP responses, validating content types and other useful tools for manipulating
requests and responses.
*/
package handlers

399
vendor/github.com/gorilla/handlers/handlers.go generated vendored Normal file
View file

@ -0,0 +1,399 @@
// Copyright 2013 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handlers
import (
"bufio"
"fmt"
"io"
"net"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
// MethodHandler is an http.Handler that dispatches to a handler whose key in the
// MethodHandler's map matches the name of the HTTP request's method, eg: GET
//
// If the request's method is OPTIONS and OPTIONS is not a key in the map then
// the handler responds with a status of 200 and sets the Allow header to a
// comma-separated list of available methods.
//
// If the request's method doesn't match any of its keys the handler responds
// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a
// comma-separated list of available methods.
type MethodHandler map[string]http.Handler
func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if handler, ok := h[req.Method]; ok {
handler.ServeHTTP(w, req)
} else {
allow := []string{}
for k := range h {
allow = append(allow, k)
}
sort.Strings(allow)
w.Header().Set("Allow", strings.Join(allow, ", "))
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
} else {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
}
}
// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its
// friends
type loggingHandler struct {
writer io.Writer
handler http.Handler
}
// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo
// and its friends
type combinedLoggingHandler struct {
writer io.Writer
handler http.Handler
}
func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
t := time.Now()
logger := makeLogger(w)
url := *req.URL
h.handler.ServeHTTP(logger, req)
writeLog(h.writer, req, url, t, logger.Status(), logger.Size())
}
func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
t := time.Now()
logger := makeLogger(w)
url := *req.URL
h.handler.ServeHTTP(logger, req)
writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size())
}
func makeLogger(w http.ResponseWriter) loggingResponseWriter {
var logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK}
if _, ok := w.(http.Hijacker); ok {
logger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}}
}
h, ok1 := logger.(http.Hijacker)
c, ok2 := w.(http.CloseNotifier)
if ok1 && ok2 {
return hijackCloseNotifier{logger, h, c}
}
if ok2 {
return &closeNotifyWriter{logger, c}
}
return logger
}
type commonLoggingResponseWriter interface {
http.ResponseWriter
http.Flusher
Status() int
Size() int
}
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
// status code and body size
type responseLogger struct {
w http.ResponseWriter
status int
size int
}
func (l *responseLogger) Header() http.Header {
return l.w.Header()
}
func (l *responseLogger) Write(b []byte) (int, error) {
size, err := l.w.Write(b)
l.size += size
return size, err
}
func (l *responseLogger) WriteHeader(s int) {
l.w.WriteHeader(s)
l.status = s
}
func (l *responseLogger) Status() int {
return l.status
}
func (l *responseLogger) Size() int {
return l.size
}
func (l *responseLogger) Flush() {
f, ok := l.w.(http.Flusher)
if ok {
f.Flush()
}
}
type hijackLogger struct {
responseLogger
}
func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h := l.responseLogger.w.(http.Hijacker)
conn, rw, err := h.Hijack()
if err == nil && l.responseLogger.status == 0 {
// The status will be StatusSwitchingProtocols if there was no error and
// WriteHeader has not been called yet
l.responseLogger.status = http.StatusSwitchingProtocols
}
return conn, rw, err
}
type closeNotifyWriter struct {
loggingResponseWriter
http.CloseNotifier
}
type hijackCloseNotifier struct {
loggingResponseWriter
http.Hijacker
http.CloseNotifier
}
const lowerhex = "0123456789abcdef"
func appendQuoted(buf []byte, s string) []byte {
var runeTmp [utf8.UTFMax]byte
for width := 0; len(s) > 0; s = s[width:] {
r := rune(s[0])
width = 1
if r >= utf8.RuneSelf {
r, width = utf8.DecodeRuneInString(s)
}
if width == 1 && r == utf8.RuneError {
buf = append(buf, `\x`...)
buf = append(buf, lowerhex[s[0]>>4])
buf = append(buf, lowerhex[s[0]&0xF])
continue
}
if r == rune('"') || r == '\\' { // always backslashed
buf = append(buf, '\\')
buf = append(buf, byte(r))
continue
}
if strconv.IsPrint(r) {
n := utf8.EncodeRune(runeTmp[:], r)
buf = append(buf, runeTmp[:n]...)
continue
}
switch r {
case '\a':
buf = append(buf, `\a`...)
case '\b':
buf = append(buf, `\b`...)
case '\f':
buf = append(buf, `\f`...)
case '\n':
buf = append(buf, `\n`...)
case '\r':
buf = append(buf, `\r`...)
case '\t':
buf = append(buf, `\t`...)
case '\v':
buf = append(buf, `\v`...)
default:
switch {
case r < ' ':
buf = append(buf, `\x`...)
buf = append(buf, lowerhex[s[0]>>4])
buf = append(buf, lowerhex[s[0]&0xF])
case r > utf8.MaxRune:
r = 0xFFFD
fallthrough
case r < 0x10000:
buf = append(buf, `\u`...)
for s := 12; s >= 0; s -= 4 {
buf = append(buf, lowerhex[r>>uint(s)&0xF])
}
default:
buf = append(buf, `\U`...)
for s := 28; s >= 0; s -= 4 {
buf = append(buf, lowerhex[r>>uint(s)&0xF])
}
}
}
}
return buf
}
// buildCommonLogLine builds a log entry for req in Apache Common Log Format.
// ts is the timestamp with which the entry should be logged.
// status and size are used to provide the response HTTP status and size.
func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte {
username := "-"
if url.User != nil {
if name := url.User.Username(); name != "" {
username = name
}
}
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
host = req.RemoteAddr
}
uri := req.RequestURI
// Requests using the CONNECT method over HTTP/2.0 must use
// the authority field (aka r.Host) to identify the target.
// Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT
if req.ProtoMajor == 2 && req.Method == "CONNECT" {
uri = req.Host
}
if uri == "" {
uri = url.RequestURI()
}
buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2)
buf = append(buf, host...)
buf = append(buf, " - "...)
buf = append(buf, username...)
buf = append(buf, " ["...)
buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...)
buf = append(buf, `] "`...)
buf = append(buf, req.Method...)
buf = append(buf, " "...)
buf = appendQuoted(buf, uri)
buf = append(buf, " "...)
buf = append(buf, req.Proto...)
buf = append(buf, `" `...)
buf = append(buf, strconv.Itoa(status)...)
buf = append(buf, " "...)
buf = append(buf, strconv.Itoa(size)...)
return buf
}
// writeLog writes a log entry for req to w in Apache Common Log Format.
// ts is the timestamp with which the entry should be logged.
// status and size are used to provide the response HTTP status and size.
func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) {
buf := buildCommonLogLine(req, url, ts, status, size)
buf = append(buf, '\n')
w.Write(buf)
}
// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format.
// ts is the timestamp with which the entry should be logged.
// status and size are used to provide the response HTTP status and size.
func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) {
buf := buildCommonLogLine(req, url, ts, status, size)
buf = append(buf, ` "`...)
buf = appendQuoted(buf, req.Referer())
buf = append(buf, `" "`...)
buf = appendQuoted(buf, req.UserAgent())
buf = append(buf, '"', '\n')
w.Write(buf)
}
// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in
// Apache Combined Log Format.
//
// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format.
//
// LoggingHandler always sets the ident field of the log to -
func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler {
return combinedLoggingHandler{out, h}
}
// LoggingHandler return a http.Handler that wraps h and logs requests to out in
// Apache Common Log Format (CLF).
//
// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format.
//
// LoggingHandler always sets the ident field of the log to -
//
// Example:
//
// r := mux.NewRouter()
// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// w.Write([]byte("This is a catch-all route"))
// })
// loggedRouter := handlers.LoggingHandler(os.Stdout, r)
// http.ListenAndServe(":1123", loggedRouter)
//
func LoggingHandler(out io.Writer, h http.Handler) http.Handler {
return loggingHandler{out, h}
}
// isContentType validates the Content-Type header matches the supplied
// contentType. That is, its type and subtype match.
func isContentType(h http.Header, contentType string) bool {
ct := h.Get("Content-Type")
if i := strings.IndexRune(ct, ';'); i != -1 {
ct = ct[0:i]
}
return ct == contentType
}
// ContentTypeHandler wraps and returns a http.Handler, validating the request
// content type is compatible with the contentTypes list. It writes a HTTP 415
// error if that fails.
//
// Only PUT, POST, and PATCH requests are considered.
func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") {
h.ServeHTTP(w, r)
return
}
for _, ct := range contentTypes {
if isContentType(r.Header, ct) {
h.ServeHTTP(w, r)
return
}
}
http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType)
})
}
const (
// HTTPMethodOverrideHeader is a commonly used
// http header to override a request method.
HTTPMethodOverrideHeader = "X-HTTP-Method-Override"
// HTTPMethodOverrideFormKey is a commonly used
// HTML form key to override a request method.
HTTPMethodOverrideFormKey = "_method"
)
// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for
// the X-HTTP-Method-Override header or the _method form key, and overrides (if
// valid) request.Method with its value.
//
// This is especially useful for HTTP clients that don't support many http verbs.
// It isn't secure to override e.g a GET to a POST, so only POST requests are
// considered. Likewise, the override method can only be a "write" method: PUT,
// PATCH or DELETE.
//
// Form method takes precedence over header method.
func HTTPMethodOverrideHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
om := r.FormValue(HTTPMethodOverrideFormKey)
if om == "" {
om = r.Header.Get(HTTPMethodOverrideHeader)
}
if om == "PUT" || om == "PATCH" || om == "DELETE" {
r.Method = om
}
}
h.ServeHTTP(w, r)
})
}

21
vendor/github.com/gorilla/handlers/handlers_go18.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
// +build go1.8
package handlers
import (
"fmt"
"net/http"
)
type loggingResponseWriter interface {
commonLoggingResponseWriter
http.Pusher
}
func (l *responseLogger) Push(target string, opts *http.PushOptions) error {
p, ok := l.w.(http.Pusher)
if !ok {
return fmt.Errorf("responseLogger does not implement http.Pusher")
}
return p.Push(target, opts)
}

View file

@ -0,0 +1,34 @@
// +build go1.8
package handlers
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
)
func TestLoggingHandlerWithPush(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if _, ok := w.(http.Pusher); !ok {
t.Fatalf("%T from LoggingHandler does not satisfy http.Pusher interface when built with Go >=1.8", w)
}
w.WriteHeader(200)
})
logger := LoggingHandler(ioutil.Discard, handler)
logger.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/"))
}
func TestCombinedLoggingHandlerWithPush(t *testing.T) {
handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if _, ok := w.(http.Pusher); !ok {
t.Fatalf("%T from CombinedLoggingHandler does not satisfy http.Pusher interface when built with Go >=1.8", w)
}
w.WriteHeader(200)
})
logger := CombinedLoggingHandler(ioutil.Discard, handler)
logger.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/"))
}

7
vendor/github.com/gorilla/handlers/handlers_pre18.go generated vendored Normal file
View file

@ -0,0 +1,7 @@
// +build !go1.8
package handlers
type loggingResponseWriter interface {
commonLoggingResponseWriter
}

378
vendor/github.com/gorilla/handlers/handlers_test.go generated vendored Normal file
View file

@ -0,0 +1,378 @@
// Copyright 2013 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package handlers
import (
"bytes"
"net"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
)
const (
ok = "ok\n"
notAllowed = "Method not allowed\n"
)
var okHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte(ok))
})
func newRequest(method, url string) *http.Request {
req, err := http.NewRequest(method, url, nil)
if err != nil {
panic(err)
}
return req
}
func TestMethodHandler(t *testing.T) {
tests := []struct {
req *http.Request
handler http.Handler
code int
allow string // Contents of the Allow header
body string
}{
// No handlers
{newRequest("GET", "/foo"), MethodHandler{}, http.StatusMethodNotAllowed, "", notAllowed},
{newRequest("OPTIONS", "/foo"), MethodHandler{}, http.StatusOK, "", ""},
// A single handler
{newRequest("GET", "/foo"), MethodHandler{"GET": okHandler}, http.StatusOK, "", ok},
{newRequest("POST", "/foo"), MethodHandler{"GET": okHandler}, http.StatusMethodNotAllowed, "GET", notAllowed},
// Multiple handlers
{newRequest("GET", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok},
{newRequest("POST", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok},
{newRequest("DELETE", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusMethodNotAllowed, "GET, POST", notAllowed},
{newRequest("OPTIONS", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "GET, POST", ""},
// Override OPTIONS
{newRequest("OPTIONS", "/foo"), MethodHandler{"OPTIONS": okHandler}, http.StatusOK, "", ok},
}
for i, test := range tests {
rec := httptest.NewRecorder()
test.handler.ServeHTTP(rec, test.req)
if rec.Code != test.code {
t.Fatalf("%d: wrong code, got %d want %d", i, rec.Code, test.code)
}
if allow := rec.HeaderMap.Get("Allow"); allow != test.allow {
t.Fatalf("%d: wrong Allow, got %s want %s", i, allow, test.allow)
}
if body := rec.Body.String(); body != test.body {
t.Fatalf("%d: wrong body, got %q want %q", i, body, test.body)
}
}
}
func TestMakeLogger(t *testing.T) {
rec := httptest.NewRecorder()
logger := makeLogger(rec)
// initial status
if logger.Status() != http.StatusOK {
t.Fatalf("wrong status, got %d want %d", logger.Status(), http.StatusOK)
}
// WriteHeader
logger.WriteHeader(http.StatusInternalServerError)
if logger.Status() != http.StatusInternalServerError {
t.Fatalf("wrong status, got %d want %d", logger.Status(), http.StatusInternalServerError)
}
// Write
logger.Write([]byte(ok))
if logger.Size() != len(ok) {
t.Fatalf("wrong size, got %d want %d", logger.Size(), len(ok))
}
// Header
logger.Header().Set("key", "value")
if val := logger.Header().Get("key"); val != "value" {
t.Fatalf("wrong header, got %s want %s", val, "value")
}
}
func TestWriteLog(t *testing.T) {
loc, err := time.LoadLocation("Europe/Warsaw")
if err != nil {
panic(err)
}
ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc)
// A typical request with an OK response
req := newRequest("GET", "http://example.com")
req.RemoteAddr = "192.168.100.5"
buf := new(bytes.Buffer)
writeLog(buf, req, *req.URL, ts, http.StatusOK, 100)
log := buf.String()
expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
// CONNECT request over http/2.0
req = &http.Request{
Method: "CONNECT",
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
URL: &url.URL{Host: "www.example.com:443"},
Host: "www.example.com:443",
RemoteAddr: "192.168.100.5",
}
buf = new(bytes.Buffer)
writeLog(buf, req, *req.URL, ts, http.StatusOK, 100)
log = buf.String()
expected = "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"CONNECT www.example.com:443 HTTP/2.0\" 200 100\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
// Request with an unauthorized user
req = newRequest("GET", "http://example.com")
req.RemoteAddr = "192.168.100.5"
req.URL.User = url.User("kamil")
buf.Reset()
writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500)
log = buf.String()
expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
// Request with url encoded parameters
req = newRequest("GET", "http://example.com/test?abc=hello%20world&a=b%3F")
req.RemoteAddr = "192.168.100.5"
buf.Reset()
writeLog(buf, req, *req.URL, ts, http.StatusOK, 100)
log = buf.String()
expected = "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET /test?abc=hello%20world&a=b%3F HTTP/1.1\" 200 100\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
}
func TestWriteCombinedLog(t *testing.T) {
loc, err := time.LoadLocation("Europe/Warsaw")
if err != nil {
panic(err)
}
ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc)
// A typical request with an OK response
req := newRequest("GET", "http://example.com")
req.RemoteAddr = "192.168.100.5"
req.Header.Set("Referer", "http://example.com")
req.Header.Set(
"User-Agent",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.33 "+
"(KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33",
)
buf := new(bytes.Buffer)
writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100)
log := buf.String()
expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " +
"\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
"AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
// CONNECT request over http/2.0
req1 := &http.Request{
Method: "CONNECT",
Host: "www.example.com:443",
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
RemoteAddr: "192.168.100.5",
Header: http.Header{},
URL: &url.URL{Host: "www.example.com:443"},
}
req1.Header.Set("Referer", "http://example.com")
req1.Header.Set(
"User-Agent",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.33 "+
"(KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33",
)
buf = new(bytes.Buffer)
writeCombinedLog(buf, req1, *req1.URL, ts, http.StatusOK, 100)
log = buf.String()
expected = "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"CONNECT www.example.com:443 HTTP/2.0\" 200 100 \"http://example.com\" " +
"\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
"AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
// Request with an unauthorized user
req.URL.User = url.User("kamil")
buf.Reset()
writeCombinedLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500)
log = buf.String()
expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500 \"http://example.com\" " +
"\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
"AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
// Test with remote ipv6 address
req.RemoteAddr = "::1"
buf.Reset()
writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100)
log = buf.String()
expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " +
"\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
"AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
// Test remote ipv6 addr, with port
req.RemoteAddr = net.JoinHostPort("::1", "65000")
buf.Reset()
writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100)
log = buf.String()
expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " +
"\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
"AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n"
if log != expected {
t.Fatalf("wrong log, got %q want %q", log, expected)
}
}
func TestLogPathRewrites(t *testing.T) {
var buf bytes.Buffer
handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
req.URL.Path = "/" // simulate http.StripPrefix and friends
w.WriteHeader(200)
})
logger := LoggingHandler(&buf, handler)
logger.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/subdir/asdf"))
if !strings.Contains(buf.String(), "GET /subdir/asdf HTTP") {
t.Fatalf("Got log %#v, wanted substring %#v", buf.String(), "GET /subdir/asdf HTTP")
}
}
func BenchmarkWriteLog(b *testing.B) {
loc, err := time.LoadLocation("Europe/Warsaw")
if err != nil {
b.Fatalf(err.Error())
}
ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc)
req := newRequest("GET", "http://example.com")
req.RemoteAddr = "192.168.100.5"
b.ResetTimer()
buf := &bytes.Buffer{}
for i := 0; i < b.N; i++ {
buf.Reset()
writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500)
}
}
func TestContentTypeHandler(t *testing.T) {
tests := []struct {
Method string
AllowContentTypes []string
ContentType string
Code int
}{
{"POST", []string{"application/json"}, "application/json", http.StatusOK},
{"POST", []string{"application/json", "application/xml"}, "application/json", http.StatusOK},
{"POST", []string{"application/json"}, "application/json; charset=utf-8", http.StatusOK},
{"POST", []string{"application/json"}, "application/json+xxx", http.StatusUnsupportedMediaType},
{"POST", []string{"application/json"}, "text/plain", http.StatusUnsupportedMediaType},
{"GET", []string{"application/json"}, "", http.StatusOK},
{"GET", []string{}, "", http.StatusOK},
}
for _, test := range tests {
r, err := http.NewRequest(test.Method, "/", nil)
if err != nil {
t.Error(err)
continue
}
h := ContentTypeHandler(okHandler, test.AllowContentTypes...)
r.Header.Set("Content-Type", test.ContentType)
w := httptest.NewRecorder()
h.ServeHTTP(w, r)
if w.Code != test.Code {
t.Errorf("expected %d, got %d", test.Code, w.Code)
}
}
}
func TestHTTPMethodOverride(t *testing.T) {
var tests = []struct {
Method string
OverrideMethod string
ExpectedMethod string
}{
{"POST", "PUT", "PUT"},
{"POST", "PATCH", "PATCH"},
{"POST", "DELETE", "DELETE"},
{"PUT", "DELETE", "PUT"},
{"GET", "GET", "GET"},
{"HEAD", "HEAD", "HEAD"},
{"GET", "PUT", "GET"},
{"HEAD", "DELETE", "HEAD"},
}
for _, test := range tests {
h := HTTPMethodOverrideHandler(okHandler)
reqs := make([]*http.Request, 0, 2)
rHeader, err := http.NewRequest(test.Method, "/", nil)
if err != nil {
t.Error(err)
}
rHeader.Header.Set(HTTPMethodOverrideHeader, test.OverrideMethod)
reqs = append(reqs, rHeader)
f := url.Values{HTTPMethodOverrideFormKey: []string{test.OverrideMethod}}
rForm, err := http.NewRequest(test.Method, "/", strings.NewReader(f.Encode()))
if err != nil {
t.Error(err)
}
rForm.Header.Set("Content-Type", "application/x-www-form-urlencoded")
reqs = append(reqs, rForm)
for _, r := range reqs {
w := httptest.NewRecorder()
h.ServeHTTP(w, r)
if r.Method != test.ExpectedMethod {
t.Errorf("Expected %s, got %s", test.ExpectedMethod, r.Method)
}
}
}
}

120
vendor/github.com/gorilla/handlers/proxy_headers.go generated vendored Normal file
View file

@ -0,0 +1,120 @@
package handlers
import (
"net/http"
"regexp"
"strings"
)
var (
// De-facto standard header keys.
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host")
xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
)
var (
// RFC7239 defines a new "Forwarded: " header designed to replace the
// existing use of X-Forwarded-* headers.
// e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43
forwarded = http.CanonicalHeaderKey("Forwarded")
// Allows for a sub-match of the first value after 'for=' to the next
// comma, semi-colon or space. The match is case-insensitive.
forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`)
// Allows for a sub-match for the first instance of scheme (http|https)
// prefixed by 'proto='. The match is case-insensitive.
protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`)
)
// ProxyHeaders inspects common reverse proxy headers and sets the corresponding
// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP
// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme
// for the scheme (http|https) and the RFC7239 Forwarded header, which may
// include both client IPs and schemes.
//
// NOTE: This middleware should only be used when behind a reverse
// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are
// configured not to) strip these headers from client requests, or where these
// headers are accepted "as is" from a remote client (e.g. when Go is not behind
// a proxy), can manifest as a vulnerability if your application uses these
// headers for validating the 'trustworthiness' of a request.
func ProxyHeaders(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
// Set the remote IP with the value passed from the proxy.
if fwd := getIP(r); fwd != "" {
r.RemoteAddr = fwd
}
// Set the scheme (proto) with the value passed from the proxy.
if scheme := getScheme(r); scheme != "" {
r.URL.Scheme = scheme
}
// Set the host with the value passed by the proxy
if r.Header.Get(xForwardedHost) != "" {
r.Host = r.Header.Get(xForwardedHost)
}
// Call the next handler in the chain.
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
// Forwarded headers (in that order).
func getIP(r *http.Request) string {
var addr string
if fwd := r.Header.Get(xForwardedFor); fwd != "" {
// Only grab the first (client) address. Note that '192.168.0.1,
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
// the first may represent forwarding proxies earlier in the chain.
s := strings.Index(fwd, ", ")
if s == -1 {
s = len(fwd)
}
addr = fwd[:s]
} else if fwd := r.Header.Get(xRealIP); fwd != "" {
// X-Real-IP should only contain one IP address (the client making the
// request).
addr = fwd
} else if fwd := r.Header.Get(forwarded); fwd != "" {
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=' capture, which we ignore. In the case of multiple IP
// addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only
// extract the first, which should be the client IP.
if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
// these quotes.
addr = strings.Trim(match[1], `"`)
}
}
return addr
}
// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239
// Forwarded headers (in that order).
func getScheme(r *http.Request) string {
var scheme string
// Retrieve the scheme from X-Forwarded-Proto.
if proto := r.Header.Get(xForwardedProto); proto != "" {
scheme = strings.ToLower(proto)
} else if proto = r.Header.Get(xForwardedScheme); proto != "" {
scheme = strings.ToLower(proto)
} else if proto = r.Header.Get(forwarded); proto != "" {
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'proto=' capture, which we ignore. In the case of multiple proto
// parameters (invalid) we only extract the first.
if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 {
scheme = strings.ToLower(match[1])
}
}
return scheme
}

View file

@ -0,0 +1,111 @@
package handlers
import (
"net/http"
"net/http/httptest"
"testing"
)
type headerTable struct {
key string // header key
val string // header val
expected string // expected result
}
func TestGetIP(t *testing.T) {
headers := []headerTable{
{xForwardedFor, "8.8.8.8", "8.8.8.8"}, // Single address
{xForwardedFor, "8.8.8.8, 8.8.4.4", "8.8.8.8"}, // Multiple
{xForwardedFor, "[2001:db8:cafe::17]:4711", "[2001:db8:cafe::17]:4711"}, // IPv6 address
{xForwardedFor, "", ""}, // None
{xRealIP, "8.8.8.8", "8.8.8.8"}, // Single address
{xRealIP, "8.8.8.8, 8.8.4.4", "8.8.8.8, 8.8.4.4"}, // Multiple
{xRealIP, "[2001:db8:cafe::17]:4711", "[2001:db8:cafe::17]:4711"}, // IPv6 address
{xRealIP, "", ""}, // None
{forwarded, `for="_gazonk"`, "_gazonk"}, // Hostname
{forwarded, `For="[2001:db8:cafe::17]:4711`, `[2001:db8:cafe::17]:4711`}, // IPv6 address
{forwarded, `for=192.0.2.60;proto=http;by=203.0.113.43`, `192.0.2.60`}, // Multiple params
{forwarded, `for=192.0.2.43, for=198.51.100.17`, "192.0.2.43"}, // Multiple params
{forwarded, `for="workstation.local",for=198.51.100.17`, "workstation.local"}, // Hostname
}
for _, v := range headers {
req := &http.Request{
Header: http.Header{
v.key: []string{v.val},
}}
res := getIP(req)
if res != v.expected {
t.Fatalf("wrong header for %s: got %s want %s", v.key, res,
v.expected)
}
}
}
func TestGetScheme(t *testing.T) {
headers := []headerTable{
{xForwardedProto, "https", "https"},
{xForwardedProto, "http", "http"},
{xForwardedProto, "HTTP", "http"},
{xForwardedScheme, "https", "https"},
{xForwardedScheme, "http", "http"},
{xForwardedScheme, "HTTP", "http"},
{forwarded, `For="[2001:db8:cafe::17]:4711`, ""}, // No proto
{forwarded, `for=192.0.2.43, for=198.51.100.17;proto=https`, "https"}, // Multiple params before proto
{forwarded, `for=172.32.10.15; proto=https;by=127.0.0.1`, "https"}, // Space before proto
{forwarded, `for=192.0.2.60;proto=http;by=203.0.113.43`, "http"}, // Multiple params
}
for _, v := range headers {
req := &http.Request{
Header: http.Header{
v.key: []string{v.val},
},
}
res := getScheme(req)
if res != v.expected {
t.Fatalf("wrong header for %s: got %s want %s", v.key, res,
v.expected)
}
}
}
// Test the middleware end-to-end
func TestProxyHeaders(t *testing.T) {
rr := httptest.NewRecorder()
r := newRequest("GET", "/")
r.Header.Set(xForwardedFor, "8.8.8.8")
r.Header.Set(xForwardedProto, "https")
r.Header.Set(xForwardedHost, "google.com")
var (
addr string
proto string
host string
)
ProxyHeaders(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
addr = r.RemoteAddr
proto = r.URL.Scheme
host = r.Host
})).ServeHTTP(rr, r)
if rr.Code != http.StatusOK {
t.Fatalf("bad status: got %d want %d", rr.Code, http.StatusOK)
}
if addr != r.Header.Get(xForwardedFor) {
t.Fatalf("wrong address: got %s want %s", addr,
r.Header.Get(xForwardedFor))
}
if proto != r.Header.Get(xForwardedProto) {
t.Fatalf("wrong address: got %s want %s", proto,
r.Header.Get(xForwardedProto))
}
if host != r.Header.Get(xForwardedHost) {
t.Fatalf("wrong address: got %s want %s", host,
r.Header.Get(xForwardedHost))
}
}

91
vendor/github.com/gorilla/handlers/recovery.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
package handlers
import (
"log"
"net/http"
"runtime/debug"
)
// RecoveryHandlerLogger is an interface used by the recovering handler to print logs.
type RecoveryHandlerLogger interface {
Println(...interface{})
}
type recoveryHandler struct {
handler http.Handler
logger RecoveryHandlerLogger
printStack bool
}
// RecoveryOption provides a functional approach to define
// configuration for a handler; such as setting the logging
// whether or not to print strack traces on panic.
type RecoveryOption func(http.Handler)
func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler {
for _, option := range opts {
option(h)
}
return h
}
// RecoveryHandler is HTTP middleware that recovers from a panic,
// logs the panic, writes http.StatusInternalServerError, and
// continues to the next handler.
//
// Example:
//
// r := mux.NewRouter()
// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// panic("Unexpected error!")
// })
//
// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r))
func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
r := &recoveryHandler{handler: h}
return parseRecoveryOptions(r, opts...)
}
}
// RecoveryLogger is a functional option to override
// the default logger
func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
return func(h http.Handler) {
r := h.(*recoveryHandler)
r.logger = logger
}
}
// PrintRecoveryStack is a functional option to enable
// or disable printing stack traces on panic.
func PrintRecoveryStack(print bool) RecoveryOption {
return func(h http.Handler) {
r := h.(*recoveryHandler)
r.printStack = print
}
}
func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
defer func() {
if err := recover(); err != nil {
w.WriteHeader(http.StatusInternalServerError)
h.log(err)
}
}()
h.handler.ServeHTTP(w, req)
}
func (h recoveryHandler) log(v ...interface{}) {
if h.logger != nil {
h.logger.Println(v...)
} else {
log.Println(v...)
}
if h.printStack {
debug.PrintStack()
}
}

44
vendor/github.com/gorilla/handlers/recovery_test.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
package handlers
import (
"bytes"
"log"
"net/http"
"net/http/httptest"
"strings"
"testing"
)
func TestRecoveryLoggerWithDefaultOptions(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
handler := RecoveryHandler()
handlerFunc := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
panic("Unexpected error!")
})
recovery := handler(handlerFunc)
recovery.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/subdir/asdf"))
if !strings.Contains(buf.String(), "Unexpected error!") {
t.Fatalf("Got log %#v, wanted substring %#v", buf.String(), "Unexpected error!")
}
}
func TestRecoveryLoggerWithCustomLogger(t *testing.T) {
var buf bytes.Buffer
var logger = log.New(&buf, "", log.LstdFlags)
handler := RecoveryHandler(RecoveryLogger(logger), PrintRecoveryStack(false))
handlerFunc := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
panic("Unexpected error!")
})
recovery := handler(handlerFunc)
recovery.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/subdir/asdf"))
if !strings.Contains(buf.String(), "Unexpected error!") {
t.Fatalf("Got log %#v, wanted substring %#v", buf.String(), "Unexpected error!")
}
}

View file

@ -1,2 +1,7 @@
language: go
install: go get -t
install:
- go get -t
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN

View file

@ -2,17 +2,17 @@
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
## Status
It is ready for production use. It works fine after extensive use in the wild.
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
[![Build Status][1]][2]
[![GoDoc][3]][4]
[![GoCard][5]][6]
[![Build Status][1]][2]
[![Coverage Status][7]][8]
[![Sourcegraph][9]][10]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
@ -20,19 +20,54 @@ It is ready for production use. It works fine after extensive use in the wild.
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://goreportcard.com/badge/imdario/mergo
[6]: https://goreportcard.com/report/github.com/imdario/mergo
[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
[8]: https://coveralls.io/github/imdario/mergo?branch=master
[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
### Latest release
[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4).
### Important note
Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected.
Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
### Donations
If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
### Mergo in the wild
- [docker/docker](https://github.com/docker/docker/)
- [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch)
- [Shopify/themekit](https://github.com/Shopify/themekit)
- [imdario/zas](https://github.com/imdario/zas)
- [matcornic/hermes](https://github.com/matcornic/hermes)
- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
- [kataras/iris](https://github.com/kataras/iris)
- [michaelsauter/crane](https://github.com/michaelsauter/crane)
- [go-task/task](https://github.com/go-task/task)
- [sensu/uchiwa](https://github.com/sensu/uchiwa)
- [ory/hydra](https://github.com/ory/hydra)
- [sisatech/vcli](https://github.com/sisatech/vcli)
- [dairycart/dairycart](https://github.com/dairycart/dairycart)
- [projectcalico/felix](https://github.com/projectcalico/felix)
- [resin-os/balena](https://github.com/resin-os/balena)
- [go-kivik/kivik](https://github.com/go-kivik/kivik)
- [Telefonica/govice](https://github.com/Telefonica/govice)
- [supergiant/supergiant](supergiant/supergiant)
- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
- [russross/canvasassignments](https://github.com/russross/canvasassignments)
@ -50,7 +85,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
- [thoas/picfit](https://github.com/thoas/picfit)
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [Iris Web Framework](https://github.com/kataras/iris)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
## Installation
@ -63,7 +98,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
## Usage
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
```go
if err := mergo.Merge(&dst, src); err != nil {
@ -71,15 +106,15 @@ if err := mergo.Merge(&dst, src); err != nil {
}
```
Also, you can merge overwriting values using the transformer WithOverride.
Also, you can merge overwriting values using the transformer `WithOverride`.
```go
if err := mergo.Merge(&dst, src, WithOverride); err != nil {
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
// ...
}
```
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
```go
if err := mergo.Map(&dst, srcMap); err != nil {
@ -134,6 +169,7 @@ package main
import (
"fmt"
"github.com/imdario/mergo"
"reflect"
"time"
)
@ -151,6 +187,7 @@ func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Valu
dst.Set(src)
}
}
return nil
}
}
return nil
@ -164,7 +201,7 @@ type Snapshot struct {
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
mergo.Merge(&dest, src, WithTransformers(timeTransfomer{}))
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }

View file

@ -21,7 +21,7 @@ func TestIssue23MergeWithOverwrite(t *testing.T) {
if err := MergeWithOverwrite(&dst, src); err != nil {
t.Errorf("Error while merging %s", err)
}
if dst.Created != src.Created {
if !dst.Created.Equal(*src.Created) { //--> https://golang.org/pkg/time/#pkg-overview
t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created)
}
}

View file

@ -28,6 +28,6 @@ func TestIssue33Merge(t *testing.T) {
t.Errorf("Error while merging: %s", err)
}
if dest.Str != toMerge.Str {
t.Errorf("dest.Str should have been override: dest.Str(%v) == toMerge.Str(%v)", dest.Str, toMerge.Str)
t.Errorf("dest.Str should have been override: dest.Str(%v) != toMerge.Str(%v)", dest.Str, toMerge.Str)
}
}

20
vendor/github.com/imdario/mergo/issue61_test.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package mergo
import (
"reflect"
"testing"
)
func TestIssue61MergeNilMap(t *testing.T) {
type T struct {
I map[string][]string
}
t1 := T{}
t2 := T{I: map[string][]string{"hi": {"there"}}}
if err := Merge(&t1, t2); err != nil {
t.Fail()
}
if !reflect.DeepEqual(t2, T{I: map[string][]string{"hi": {"there"}}}) {
t.FailNow()
}
}

38
vendor/github.com/imdario/mergo/issue64_test.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package mergo
import (
"testing"
)
type Student struct {
Name string
Books []string
}
var testData = []struct {
S1 Student
S2 Student
ExpectedSlice []string
}{
{Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{"1"}}, []string{"a", "B"}},
{Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{}}, []string{"a", "B"}},
{Student{"Jack", []string{}}, Student{"Tom", []string{"1"}}, []string{"1"}},
{Student{"Jack", []string{}}, Student{"Tom", []string{}}, []string{}},
}
func TestIssue64MergeSliceWithOverride(t *testing.T) {
for _, data := range testData {
err := Merge(&data.S2, data.S1, WithOverride)
if err != nil {
t.Errorf("Error while merging %s", err)
}
if len(data.S2.Books) != len(data.ExpectedSlice) {
t.Fatalf("Got %d elements in slice, but expected %d", len(data.S2.Books), len(data.ExpectedSlice))
}
for i, val := range data.S2.Books {
if val != data.ExpectedSlice[i] {
t.Fatalf("Expected %s, but got %s while merging slice with override", data.ExpectedSlice[i], val)
}
}
}
}

48
vendor/github.com/imdario/mergo/issue66_test.go generated vendored Normal file
View file

@ -0,0 +1,48 @@
package mergo
import (
"testing"
)
type PrivateSliceTest66 struct {
PublicStrings []string
privateStrings []string
}
func TestPrivateSlice(t *testing.T) {
p1 := PrivateSliceTest66{
PublicStrings: []string{"one", "two", "three"},
privateStrings: []string{"four", "five"},
}
p2 := PrivateSliceTest66{
PublicStrings: []string{"six", "seven"},
}
if err := Merge(&p1, p2); err != nil {
t.Fatalf("Error during the merge: %v", err)
}
if len(p1.PublicStrings) != 3 {
t.Error("5 elements should be in 'PublicStrings' field")
}
if len(p1.privateStrings) != 2 {
t.Error("2 elements should be in 'privateStrings' field")
}
}
func TestPrivateSliceWithAppendSlice(t *testing.T) {
p1 := PrivateSliceTest66{
PublicStrings: []string{"one", "two", "three"},
privateStrings: []string{"four", "five"},
}
p2 := PrivateSliceTest66{
PublicStrings: []string{"six", "seven"},
}
if err := Merge(&p1, p2, WithAppendSlice); err != nil {
t.Fatalf("Error during the merge: %v", err)
}
if len(p1.PublicStrings) != 5 {
t.Error("5 elements should be in 'PublicStrings' field")
}
if len(p1.privateStrings) != 2 {
t.Error("2 elements should be in 'privateStrings' field")
}
}

View file

@ -31,8 +31,8 @@ func isExported(field reflect.StructField) bool {
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) {
overwrite := config.overwrite
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
if dst.CanAddr() {
addr := dst.UnsafeAddr()
h := 17 * addr
@ -128,23 +128,23 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
// doesn't apply if dst is a map.
// This is separated method from Merge because it is cleaner and it keeps sane
// semantics: merging equal types, mapping different (restricted) types.
func Map(dst, src interface{}, opts ...func(*config)) error {
func Map(dst, src interface{}, opts ...func(*Config)) error {
return _map(dst, src, opts...)
}
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
// non-empty src attribute values.
// Deprecated: Use Map(…) with WithOverride
func MapWithOverwrite(dst, src interface{}, opts ...func(*config)) error {
func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
return _map(dst, src, append(opts, WithOverride)...)
}
func _map(dst, src interface{}, opts ...func(*config)) error {
func _map(dst, src interface{}, opts ...func(*Config)) error {
var (
vDst, vSrc reflect.Value
err error
)
config := &config{}
config := &Config{}
for _, opt := range opts {
opt(config)

View file

@ -8,7 +8,9 @@
package mergo
import "reflect"
import (
"reflect"
)
func hasExportedField(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ {
@ -22,20 +24,21 @@ func hasExportedField(dst reflect.Value) (exported bool) {
return
}
type config struct {
overwrite bool
transformers transformers
type Config struct {
Overwrite bool
AppendSlice bool
Transformers Transformers
}
type transformers interface {
type Transformers interface {
Transformer(reflect.Type) func(dst, src reflect.Value) error
}
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) {
overwrite := config.overwrite
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
if !src.IsValid() {
return
@ -54,8 +57,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
visited[h] = &visit{addr, typ, seen}
}
if config.transformers != nil {
if fn := config.transformers.Transformer(dst.Type()); fn != nil {
if config.Transformers != nil && !isEmptyValue(dst) {
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
err = fn(dst, src)
return
}
@ -75,9 +78,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
case reflect.Map:
if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 {
if dst.IsNil() && !src.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
return
}
for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key)
@ -86,7 +88,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
dstElement := dst.MapIndex(key)
switch srcElement.Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
if srcElement.IsNil() {
continue
}
@ -101,7 +103,15 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
case reflect.Ptr:
fallthrough
case reflect.Map:
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
srcMapElm := srcElement
dstMapElm := dstElement
if srcMapElm.CanInterface() {
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
if dstMapElm.IsValid() {
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
}
}
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
return
}
case reflect.Slice:
@ -114,7 +124,11 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dstSlice = reflect.ValueOf(dstElement.Interface())
}
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
dstSlice = srcSlice
} else if config.AppendSlice {
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
}
dst.SetMapIndex(key, dstSlice)
}
}
@ -122,7 +136,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
continue
}
if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
@ -130,7 +144,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
case reflect.Slice:
dst.Set(reflect.AppendSlice(dst, src))
if !dst.CanSet() {
break
}
if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
dst.Set(src)
} else if config.AppendSlice {
dst.Set(reflect.AppendSlice(dst, src))
}
case reflect.Ptr:
fallthrough
case reflect.Interface:
@ -174,36 +195,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
// src attributes if they themselves are not empty. dst and src must be valid same-type structs
// and dst must be a pointer to struct.
// It won't merge unexported (private) fields and will do recursively any exported field.
func Merge(dst, src interface{}, opts ...func(*config)) error {
func Merge(dst, src interface{}, opts ...func(*Config)) error {
return merge(dst, src, opts...)
}
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
// non-empty src attribute values.
// Deprecated: use Merge(…) with WithOverride
func MergeWithOverwrite(dst, src interface{}, opts ...func(*config)) error {
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
return merge(dst, src, append(opts, WithOverride)...)
}
// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
func WithTransformers(transformers transformers) func(*config) {
return func(config *config) {
config.transformers = transformers
func WithTransformers(transformers Transformers) func(*Config) {
return func(config *Config) {
config.Transformers = transformers
}
}
// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
func WithOverride(config *config) {
config.overwrite = true
func WithOverride(config *Config) {
config.Overwrite = true
}
func merge(dst, src interface{}, opts ...func(*config)) error {
// WithAppendSlice will make merge append slices instead of overwriting it
func WithAppendSlice(config *Config) {
config.AppendSlice = true
}
func merge(dst, src interface{}, opts ...func(*Config)) error {
var (
vDst, vSrc reflect.Value
err error
)
config := &config{}
config := &Config{}
for _, opt := range opts {
opt(config)

View file

@ -0,0 +1,33 @@
package mergo
import (
"testing"
)
var testDataS = []struct {
S1 Student
S2 Student
ExpectedSlice []string
}{
{Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{"1"}}, []string{"1", "a", "B"}},
{Student{"Jack", []string{"a", "B"}}, Student{"Tom", []string{}}, []string{"a", "B"}},
{Student{"Jack", []string{}}, Student{"Tom", []string{"1"}}, []string{"1"}},
{Student{"Jack", []string{}}, Student{"Tom", []string{}}, []string{}},
}
func TestMergeSliceWithOverrideWithAppendSlice(t *testing.T) {
for _, data := range testDataS {
err := Merge(&data.S2, data.S1, WithOverride, WithAppendSlice)
if err != nil {
t.Errorf("Error while merging %s", err)
}
if len(data.S2.Books) != len(data.ExpectedSlice) {
t.Fatalf("Got %d elements in slice, but expected %d", len(data.S2.Books), len(data.ExpectedSlice))
}
for i, val := range data.S2.Books {
if val != data.ExpectedSlice[i] {
t.Fatalf("Expected %s, but got %s while merging slice with override", data.ExpectedSlice[i], val)
}
}
}
}

50
vendor/github.com/imdario/mergo/merge_test.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
package mergo
import (
"reflect"
"testing"
)
type transformer struct {
m map[reflect.Type]func(dst, src reflect.Value) error
}
func (s *transformer) Transformer(t reflect.Type) func(dst, src reflect.Value) error {
if fn, ok := s.m[t]; ok {
return fn
}
return nil
}
type foo struct {
s string
Bar *bar
}
type bar struct {
i int
s map[string]string
}
func TestMergeWithTransformerNilStruct(t *testing.T) {
a := foo{s: "foo"}
b := foo{Bar: &bar{i: 2, s: map[string]string{"foo": "bar"}}}
if err := Merge(&a, &b, WithOverride, WithTransformers(&transformer{
m: map[reflect.Type]func(dst, src reflect.Value) error{
reflect.TypeOf(&bar{}): func(dst, src reflect.Value) error {
// Do sthg with Elem
t.Log(dst.Elem().FieldByName("i"))
t.Log(src.Elem())
return nil
},
},
})); err != nil {
t.Fatal(err)
}
if a.s != "foo" {
t.Fatalf("b not merged in properly: a.s.Value(%s) != expected(%s)", a.s, "foo")
}
if a.Bar == nil {
t.Fatalf("b not merged in properly: a.Bar shouldn't be nil")
}
}

View file

@ -45,8 +45,15 @@ func isEmptyValue(v reflect.Value) bool {
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr, reflect.Func:
case reflect.Interface, reflect.Ptr:
if v.IsNil() {
return true
}
return isEmptyValue(v.Elem())
case reflect.Func:
return v.IsNil()
case reflect.Invalid:
return true
}
return false
}

View file

@ -6,11 +6,12 @@
package mergo
import (
"gopkg.in/yaml.v2"
"io/ioutil"
"reflect"
"testing"
"time"
"gopkg.in/yaml.v2"
)
type simpleTest struct {
@ -225,13 +226,13 @@ func TestPointerStructNil(t *testing.T) {
}
}
func testSlice(t *testing.T, a []int, b []int) {
func testSlice(t *testing.T, a []int, b []int, e []int, opts ...func(*Config)) {
t.Helper()
bc := b
e := append(a, b...)
sa := sliceTest{a}
sb := sliceTest{b}
if err := Merge(&sa, sb); err != nil {
if err := Merge(&sa, sb, opts...); err != nil {
t.FailNow()
}
if !reflect.DeepEqual(sb.S, bc) {
@ -243,14 +244,14 @@ func testSlice(t *testing.T, a []int, b []int) {
ma := map[string][]int{"S": a}
mb := map[string][]int{"S": b}
if err := Merge(&ma, mb); err != nil {
if err := Merge(&ma, mb, opts...); err != nil {
t.FailNow()
}
if !reflect.DeepEqual(mb["S"], bc) {
t.Fatalf("Source slice was modified %d != %d", mb["S"], bc)
t.Fatalf("map value: Source slice was modified %d != %d", mb["S"], bc)
}
if !reflect.DeepEqual(ma["S"], e) {
t.Fatalf("b not merged in a proper way %d != %d", ma["S"], e)
t.Fatalf("map value: b not merged in a proper way %d != %d", ma["S"], e)
}
if a == nil {
@ -261,10 +262,10 @@ func testSlice(t *testing.T, a []int, b []int) {
t.FailNow()
}
if !reflect.DeepEqual(mb["S"], bc) {
t.Fatalf("Source slice was modified %d != %d", mb["S"], bc)
t.Fatalf("missing dst key: Source slice was modified %d != %d", mb["S"], bc)
}
if !reflect.DeepEqual(ma["S"], e) {
t.Fatalf("b not merged in a proper way %d != %d", ma["S"], e)
t.Fatalf("missing dst key: b not merged in a proper way %d != %d", ma["S"], e)
}
}
@ -276,20 +277,25 @@ func testSlice(t *testing.T, a []int, b []int) {
t.FailNow()
}
if !reflect.DeepEqual(mb["S"], bc) {
t.Fatalf("Source slice was modified %d != %d", mb["S"], bc)
t.Fatalf("missing src key: Source slice was modified %d != %d", mb["S"], bc)
}
if !reflect.DeepEqual(ma["S"], e) {
t.Fatalf("b not merged in a proper way %d != %d", ma["S"], e)
t.Fatalf("missing src key: b not merged in a proper way %d != %d", ma["S"], e)
}
}
}
func TestSlice(t *testing.T) {
testSlice(t, nil, []int{1, 2, 3})
testSlice(t, []int{}, []int{1, 2, 3})
testSlice(t, []int{1}, []int{2, 3})
testSlice(t, []int{1}, []int{})
testSlice(t, []int{1}, nil)
testSlice(t, nil, []int{1, 2, 3}, []int{1, 2, 3})
testSlice(t, []int{}, []int{1, 2, 3}, []int{1, 2, 3})
testSlice(t, []int{1}, []int{2, 3}, []int{1})
testSlice(t, []int{1}, []int{}, []int{1})
testSlice(t, []int{1}, nil, []int{1})
testSlice(t, nil, []int{1, 2, 3}, []int{1, 2, 3}, WithAppendSlice)
testSlice(t, []int{}, []int{1, 2, 3}, []int{1, 2, 3}, WithAppendSlice)
testSlice(t, []int{1}, []int{2, 3}, []int{1, 2, 3}, WithAppendSlice)
testSlice(t, []int{1}, []int{}, []int{1}, WithAppendSlice)
testSlice(t, []int{1}, nil, []int{1}, WithAppendSlice)
}
func TestEmptyMaps(t *testing.T) {
@ -405,6 +411,30 @@ func TestMaps(t *testing.T) {
}
}
func TestMapsWithNilPointer(t *testing.T) {
m := map[string]*simpleTest{
"a": nil,
"b": nil,
}
n := map[string]*simpleTest{
"b": nil,
"c": nil,
}
expect := map[string]*simpleTest{
"a": nil,
"b": nil,
"c": nil,
}
if err := Merge(&m, n, WithOverride); err != nil {
t.Fatalf(err.Error())
}
if !reflect.DeepEqual(m, expect) {
t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect)
}
}
func TestYAMLMaps(t *testing.T) {
thing := loadYAML("testdata/thing.yml")
license := loadYAML("testdata/license.yml")
@ -666,10 +696,10 @@ type structWithUnexportedProperty struct {
func TestUnexportedProperty(t *testing.T) {
a := structWithMap{map[string]structWithUnexportedProperty{
"key": structWithUnexportedProperty{"hello"},
"key": {"hello"},
}}
b := structWithMap{map[string]structWithUnexportedProperty{
"key": structWithUnexportedProperty{"hi"},
"key": {"hi"},
}}
defer func() {
if r := recover(); r != nil {

18
vendor/github.com/imdario/mergo/pr80_test.go generated vendored Normal file
View file

@ -0,0 +1,18 @@
package mergo
import (
"testing"
)
type mapInterface map[string]interface{}
func TestMergeMapsEmptyString(t *testing.T) {
a := mapInterface{"s": ""}
b := mapInterface{"s": "foo"}
if err := Merge(&a, b); err != nil {
t.Fatal(err)
}
if a["s"] != "foo" {
t.Fatalf("b not merged in properly: a.s.Value(%s) != expected(%s)", a["s"], "foo")
}
}

42
vendor/github.com/imdario/mergo/pr81_test.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
package mergo
import (
"testing"
)
func TestMapInterfaceWithMultipleLayer(t *testing.T) {
m1 := map[string]interface{}{
"k1": map[string]interface{}{
"k1.1": "v1",
},
}
m2 := map[string]interface{}{
"k1": map[string]interface{}{
"k1.1": "v2",
"k1.2": "v3",
},
}
if err := Map(&m1, m2, WithOverride); err != nil {
t.Fatalf("Error merging: %v", err)
}
// Check overwrite of sub map works
expected := "v2"
actual := m1["k1"].(map[string]interface{})["k1.1"].(string)
if actual != expected {
t.Fatalf("Expected %v but got %v",
expected,
actual)
}
// Check new key is merged
expected = "v3"
actual = m1["k1"].(map[string]interface{})["k1.2"].(string)
if actual != expected {
t.Fatalf("Expected %v but got %v",
expected,
actual)
}
}

19
vendor/github.com/kr/text/License generated vendored Normal file
View file

@ -0,0 +1,19 @@
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

3
vendor/github.com/kr/text/Readme generated vendored Normal file
View file

@ -0,0 +1,3 @@
This is a Go package for manipulating paragraphs of text.
See http://go.pkgdoc.org/github.com/kr/text for full documentation.

73
vendor/github.com/kr/text/cmd/agg/doc.go generated vendored Normal file
View file

@ -0,0 +1,73 @@
/*
Agg computes aggregate values over tabular text.
It behaves somewhat like the SQL GROUP BY clause.
Usage:
agg [function...]
It reads input from stdin as a sequence of records, one per line.
It treats each line as a set of fields separated by white space.
One field (the first, by default) is designated as the key.
Successive lines with equal keys are grouped into a group,
and agg produces one line of output for each group.
(Note that only contiguous input lines can form a group.
If you need to make sure that all records for a given key
are grouped together, sort the input first.)
For each remaining field,
agg applies a function to all the values in the group,
producing a single output value.
The command line arguments specify which functions to use,
one per field in the input table.
Functions
The available functions are:
key group by this field (default for field 1)
first value from first line of group (default for rest)
last value from last line of group
sample value from any line of group, uniformly at random
prefix longest common string prefix
join:sep concatenate strings with given sep
smin lexically least string
smax lexically greatest string
min numerically least value
max numerically greatest value
sum numeric sum
mean arithmetic mean
count number of records (ignores input value)
const:val print val, ignoring input
drop omit the column entirely
The numeric functions skip items that don't parse as numbers.
Examples
Using the following input:
$ cat >input
-rwx alice 100 /home/alice/bin/crdt
-rw- alice 210002 /home/alice/thesis.tex
-rw- bob 10051 /home/bob/expenses.tab
-rwx kr 862060 /home/kr/bin/blog
-rwx kr 304608 /home/kr/bin/agg
Disk usage for each user, plus where that disk usage occurs
(longest common prefix of filesystem paths):
$ agg <input drop key sum prefix
alice 210153 /home/alice/
bob 10051 /home/bob/expenses.tab
kr 1166668 /home/kr/
Disk usage for executable vs non-executable files:
$ sort input | agg key drop sum join:,
-rw- 220053 /home/alice/thesis.tex,/home/bob/expenses.tab
-rwx 1166768 /home/alice/bin/crdt,/home/kr/bin/agg,/home/kr/bin/blog
*/
package main

112
vendor/github.com/kr/text/cmd/agg/main.go generated vendored Normal file
View file

@ -0,0 +1,112 @@
package main
// TODO(kr): tests
import (
"bufio"
"fmt"
"log"
"math/rand"
"os"
"strings"
"time"
)
type agg interface {
merge(string)
String() string
}
var (
key = 0
funcmap = make(map[int]func(init, arg string) agg)
argmap = make(map[int]string)
symtab = map[string]func(init, arg string) agg{
"first": first,
"last": last,
"prefix": prefix,
"sample": sample,
"join": join,
"smin": smin,
"smax": smax,
"min": min,
"max": max,
"sum": sum,
"mean": mean,
"count": count,
"const": constf,
"drop": nil,
}
)
func main() {
log.SetPrefix("agg: ")
log.SetFlags(0)
rand.Seed(time.Now().UnixNano())
for i, sym := range os.Args[1:] {
if p := strings.IndexByte(sym, ':'); p >= 0 {
sym, argmap[i] = sym[:p], sym[p+1:]
}
if sym == "key" {
key, sym = i, "first"
}
f, ok := symtab[sym]
if !ok {
log.Fatalf("bad function: %q", sym)
}
funcmap[i] = f
}
sc := bufio.NewScanner(os.Stdin)
var g *group
for sc.Scan() {
ss := strings.Fields(sc.Text())
if !matches(g, ss) {
emit(g)
g = &group{key: ss[key]}
}
mergeLine(g, ss)
}
emit(g)
}
type group struct {
key string
agg []agg
}
func matches(g *group, ss []string) bool {
return g != nil && g.key == ss[key]
}
func emit(g *group) {
if g == nil {
return
}
rest := false
for i, a := range g.agg {
if f, ok := funcmap[i]; ok && f == nil {
continue
}
if rest {
fmt.Print("\t")
}
rest = true
fmt.Print(a)
}
fmt.Println()
}
func mergeLine(g *group, ss []string) {
for i, s := range ss {
if i >= len(g.agg) {
f := funcmap[i]
if f == nil {
f = first
}
g.agg = append(g.agg, f(s, argmap[i]))
} else {
g.agg[i].merge(s)
}
}
}

99
vendor/github.com/kr/text/cmd/agg/num.go generated vendored Normal file
View file

@ -0,0 +1,99 @@
package main
import (
"math/big"
"strconv"
)
func min(s, arg string) agg { return newBinop(s, opmin) }
func max(s, arg string) agg { return newBinop(s, opmax) }
func sum(s, arg string) agg { return newBinop(s, opsum) }
type binop struct {
v *big.Float
f func(a, b *big.Float) *big.Float
}
func newBinop(s string, f func(a, b *big.Float) *big.Float) *binop {
v, _ := parseFloat(s)
return &binop{v, f}
}
func (o *binop) String() string {
if o.v == nil {
return "NaN"
}
return o.v.Text('f', -1)
}
func (o *binop) merge(s string) {
v, ok := parseFloat(s)
if !ok {
return
}
o.v = o.f(o.v, v)
}
func opmin(a, b *big.Float) *big.Float {
if a != nil && (b == nil || a.Cmp(b) <= 0) {
return a
}
return b
}
func opmax(a, b *big.Float) *big.Float {
if a != nil && (b == nil || a.Cmp(b) >= 0) {
return a
}
return b
}
func opsum(a, b *big.Float) *big.Float {
if a == nil {
return b
} else if b == nil {
return a
}
return a.Add(a, b)
}
type meanagg struct {
v *big.Float
d float64 // actually an integer
}
func mean(s, arg string) agg {
v, ok := parseFloat(s)
if !ok {
return &meanagg{new(big.Float), 0}
}
return &meanagg{v, 1}
}
func (m *meanagg) String() string {
if m.d == 0 {
return "NaN"
}
v := new(big.Float).Quo(m.v, big.NewFloat(m.d))
return v.Text('f', -1)
}
func (m *meanagg) merge(s string) {
v, ok := parseFloat(s)
if !ok {
return
}
m.v.Add(m.v, v)
m.d++
}
func parseFloat(s string) (*big.Float, bool) {
v, _, err := big.ParseFloat(s, 0, 1000, big.ToNearestEven)
return v, err == nil
}
type counter int
func count(init, arg string) agg { return new(counter) }
func (c *counter) String() string { return strconv.Itoa(int(*c) + 1) }
func (c *counter) merge(string) { *c++ }

74
vendor/github.com/kr/text/cmd/agg/string.go generated vendored Normal file
View file

@ -0,0 +1,74 @@
package main
import (
"math/rand"
"strings"
)
func first(s, arg string) agg { return &sbinop{s, opfirst} }
func last(s, arg string) agg { return &sbinop{s, oplast} }
func prefix(s, arg string) agg { return &sbinop{s, opprefix} }
func join(s, arg string) agg { return &sbinop{s, opjoin(arg)} }
func smin(s, arg string) agg { return &sbinop{s, opsmin} }
func smax(s, arg string) agg { return &sbinop{s, opsmax} }
type sbinop struct {
s string
f func(a, b string) string
}
func (o *sbinop) String() string { return o.s }
func (o *sbinop) merge(s string) { o.s = o.f(o.s, s) }
func opfirst(a, b string) string { return a }
func oplast(a, b string) string { return b }
func opprefix(a, b string) string {
for i := range a {
if i >= len(b) || a[i] != b[i] {
return a[:i]
}
}
return a
}
func opjoin(sep string) func(a, b string) string {
return func(a, b string) string {
return a + sep + b // TODO(kr): too slow? maybe strings.Join?
}
}
func opsmin(a, b string) string {
if strings.Compare(a, b) <= 0 {
return a
}
return b
}
func opsmax(a, b string) string {
if strings.Compare(a, b) >= 0 {
return a
}
return b
}
type sampler struct {
n int
s string
}
func sample(s, arg string) agg { return &sampler{1, s} }
func (p *sampler) String() string { return p.s }
func (p *sampler) merge(s string) {
p.n++
if rand.Intn(p.n) == 0 {
p.s = s
}
}
type constant string
func constf(init, arg string) agg { return constant(arg) }
func (c constant) String() string { return string(c) }
func (c constant) merge(string) {}

5
vendor/github.com/kr/text/colwriter/Readme generated vendored Normal file
View file

@ -0,0 +1,5 @@
Package colwriter provides a write filter that formats
input lines in multiple columns.
The package is a straightforward translation from
/src/cmd/draw/mc.c in Plan 9 from User Space.

147
vendor/github.com/kr/text/colwriter/column.go generated vendored Normal file
View file

@ -0,0 +1,147 @@
// Package colwriter provides a write filter that formats
// input lines in multiple columns.
//
// The package is a straightforward translation from
// /src/cmd/draw/mc.c in Plan 9 from User Space.
package colwriter
import (
"bytes"
"io"
"unicode/utf8"
)
const (
tab = 4
)
const (
// Print each input line ending in a colon ':' separately.
BreakOnColon uint = 1 << iota
)
// A Writer is a filter that arranges input lines in as many columns as will
// fit in its width. Tab '\t' chars in the input are translated to sequences
// of spaces ending at multiples of 4 positions.
//
// If BreakOnColon is set, each input line ending in a colon ':' is written
// separately.
//
// The Writer assumes that all Unicode code points have the same width; this
// may not be true in some fonts.
type Writer struct {
w io.Writer
buf []byte
width int
flag uint
}
// NewWriter allocates and initializes a new Writer writing to w.
// Parameter width controls the total number of characters on each line
// across all columns.
func NewWriter(w io.Writer, width int, flag uint) *Writer {
return &Writer{
w: w,
width: width,
flag: flag,
}
}
// Write writes p to the writer w. The only errors returned are ones
// encountered while writing to the underlying output stream.
func (w *Writer) Write(p []byte) (n int, err error) {
var linelen int
var lastWasColon bool
for i, c := range p {
w.buf = append(w.buf, c)
linelen++
if c == '\t' {
w.buf[len(w.buf)-1] = ' '
for linelen%tab != 0 {
w.buf = append(w.buf, ' ')
linelen++
}
}
if w.flag&BreakOnColon != 0 && c == ':' {
lastWasColon = true
} else if lastWasColon {
if c == '\n' {
pos := bytes.LastIndex(w.buf[:len(w.buf)-1], []byte{'\n'})
if pos < 0 {
pos = 0
}
line := w.buf[pos:]
w.buf = w.buf[:pos]
if err = w.columnate(); err != nil {
if len(line) < i {
return i - len(line), err
}
return 0, err
}
if n, err := w.w.Write(line); err != nil {
if r := len(line) - n; r < i {
return i - r, err
}
return 0, err
}
}
lastWasColon = false
}
if c == '\n' {
linelen = 0
}
}
return len(p), nil
}
// Flush should be called after the last call to Write to ensure that any data
// buffered in the Writer is written to output.
func (w *Writer) Flush() error {
return w.columnate()
}
func (w *Writer) columnate() error {
words := bytes.Split(w.buf, []byte{'\n'})
w.buf = nil
if len(words[len(words)-1]) == 0 {
words = words[:len(words)-1]
}
maxwidth := 0
for _, wd := range words {
if n := utf8.RuneCount(wd); n > maxwidth {
maxwidth = n
}
}
maxwidth++ // space char
wordsPerLine := w.width / maxwidth
if wordsPerLine <= 0 {
wordsPerLine = 1
}
nlines := (len(words) + wordsPerLine - 1) / wordsPerLine
for i := 0; i < nlines; i++ {
col := 0
endcol := 0
for j := i; j < len(words); j += nlines {
endcol += maxwidth
_, err := w.w.Write(words[j])
if err != nil {
return err
}
col += utf8.RuneCount(words[j])
if j+nlines < len(words) {
for col < endcol {
_, err := w.w.Write([]byte{' '})
if err != nil {
return err
}
col++
}
}
}
_, err := w.w.Write([]byte{'\n'})
if err != nil {
return err
}
}
return nil
}

90
vendor/github.com/kr/text/colwriter/column_test.go generated vendored Normal file
View file

@ -0,0 +1,90 @@
package colwriter
import (
"bytes"
"testing"
)
var src = `
.git
.gitignore
.godir
Procfile:
README.md
api.go
apps.go
auth.go
darwin.go
data.go
dyno.go:
env.go
git.go
help.go
hkdist
linux.go
ls.go
main.go
plugin.go
run.go
scale.go
ssh.go
tail.go
term
unix.go
update.go
version.go
windows.go
`[1:]
var tests = []struct {
wid int
flag uint
src string
want string
}{
{80, 0, "", ""},
{80, 0, src, `
.git README.md darwin.go git.go ls.go scale.go unix.go
.gitignore api.go data.go help.go main.go ssh.go update.go
.godir apps.go dyno.go: hkdist plugin.go tail.go version.go
Procfile: auth.go env.go linux.go run.go term windows.go
`[1:]},
{80, BreakOnColon, src, `
.git .gitignore .godir
Procfile:
README.md api.go apps.go auth.go darwin.go data.go
dyno.go:
env.go hkdist main.go scale.go term version.go
git.go linux.go plugin.go ssh.go unix.go windows.go
help.go ls.go run.go tail.go update.go
`[1:]},
{20, 0, `
Hello
Γειά σου
안녕
今日は
`[1:], `
Hello 안녕
Γειά σου 今日は
`[1:]},
}
func TestWriter(t *testing.T) {
for _, test := range tests {
b := new(bytes.Buffer)
w := NewWriter(b, test.wid, test.flag)
if _, err := w.Write([]byte(test.src)); err != nil {
t.Error(err)
}
if err := w.Flush(); err != nil {
t.Error(err)
}
if g := b.String(); test.want != g {
t.Log("\n" + test.want)
t.Log("\n" + g)
t.Errorf("%q != %q", test.want, g)
}
}
}

3
vendor/github.com/kr/text/doc.go generated vendored Normal file
View file

@ -0,0 +1,3 @@
// Package text provides rudimentary functions for manipulating text in
// paragraphs.
package text

3
vendor/github.com/kr/text/go.mod generated vendored Normal file
View file

@ -0,0 +1,3 @@
module "github.com/kr/text"
require "github.com/kr/pty" v1.1.1

74
vendor/github.com/kr/text/indent.go generated vendored Normal file
View file

@ -0,0 +1,74 @@
package text
import (
"io"
)
// Indent inserts prefix at the beginning of each non-empty line of s. The
// end-of-line marker is NL.
func Indent(s, prefix string) string {
return string(IndentBytes([]byte(s), []byte(prefix)))
}
// IndentBytes inserts prefix at the beginning of each non-empty line of b.
// The end-of-line marker is NL.
func IndentBytes(b, prefix []byte) []byte {
var res []byte
bol := true
for _, c := range b {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// Writer indents each line of its input.
type indentWriter struct {
w io.Writer
bol bool
pre [][]byte
sel int
off int
}
// NewIndentWriter makes a new write filter that indents the input
// lines. Each line is prefixed in order with the corresponding
// element of pre. If there are more lines than elements, the last
// element of pre is repeated for each subsequent line.
func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
return &indentWriter{
w: w,
pre: pre,
bol: true,
}
}
// The only errors returned are from the underlying indentWriter.
func (w *indentWriter) Write(p []byte) (n int, err error) {
for _, c := range p {
if w.bol {
var i int
i, err = w.w.Write(w.pre[w.sel][w.off:])
w.off += i
if err != nil {
return n, err
}
}
_, err = w.w.Write([]byte{c})
if err != nil {
return n, err
}
n++
w.bol = c == '\n'
if w.bol {
w.off = 0
if w.sel < len(w.pre)-1 {
w.sel++
}
}
}
return n, nil
}

119
vendor/github.com/kr/text/indent_test.go generated vendored Normal file
View file

@ -0,0 +1,119 @@
package text
import (
"bytes"
"testing"
)
type T struct {
inp, exp, pre string
}
var tests = []T{
{
"The quick brown fox\njumps over the lazy\ndog.\nBut not quickly.\n",
"xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\nxxxBut not quickly.\n",
"xxx",
},
{
"The quick brown fox\njumps over the lazy\ndog.\n\nBut not quickly.",
"xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\n\nxxxBut not quickly.",
"xxx",
},
}
func TestIndent(t *testing.T) {
for _, test := range tests {
got := Indent(test.inp, test.pre)
if got != test.exp {
t.Errorf("mismatch %q != %q", got, test.exp)
}
}
}
type IndentWriterTest struct {
inp, exp string
pre []string
}
var ts = []IndentWriterTest{
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.
`[1:],
`
xxxThe quick brown fox
xxxjumps over the lazy
xxxdog.
xxxBut not quickly.
`[1:],
[]string{"xxx"},
},
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.
`[1:],
`
xxaThe quick brown fox
xxxjumps over the lazy
xxxdog.
xxxBut not quickly.
`[1:],
[]string{"xxa", "xxx"},
},
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.
`[1:],
`
xxaThe quick brown fox
xxbjumps over the lazy
xxcdog.
xxxBut not quickly.
`[1:],
[]string{"xxa", "xxb", "xxc", "xxx"},
},
{
`
The quick brown fox
jumps over the lazy
dog.
But not quickly.`[1:],
`
xxaThe quick brown fox
xxxjumps over the lazy
xxxdog.
xxx
xxxBut not quickly.`[1:],
[]string{"xxa", "xxx"},
},
}
func TestIndentWriter(t *testing.T) {
for _, test := range ts {
b := new(bytes.Buffer)
pre := make([][]byte, len(test.pre))
for i := range test.pre {
pre[i] = []byte(test.pre[i])
}
w := NewIndentWriter(b, pre...)
if _, err := w.Write([]byte(test.inp)); err != nil {
t.Error(err)
}
if got := b.String(); got != test.exp {
t.Errorf("mismatch %q != %q", got, test.exp)
t.Log(got)
t.Log(test.exp)
}
}
}

9
vendor/github.com/kr/text/mc/Readme generated vendored Normal file
View file

@ -0,0 +1,9 @@
Command mc prints in multiple columns.
Usage: mc [-] [-N] [file...]
Mc splits the input into as many columns as will fit in N
print positions. If the output is a tty, the default N is
the number of characters in a terminal line; otherwise the
default N is 80. Under option - each input line ending in
a colon ':' is printed separately.

62
vendor/github.com/kr/text/mc/mc.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
// Command mc prints in multiple columns.
//
// Usage: mc [-] [-N] [file...]
//
// Mc splits the input into as many columns as will fit in N
// print positions. If the output is a tty, the default N is
// the number of characters in a terminal line; otherwise the
// default N is 80. Under option - each input line ending in
// a colon ':' is printed separately.
package main
import (
"github.com/kr/pty"
"github.com/kr/text/colwriter"
"io"
"log"
"os"
"strconv"
)
func main() {
var width int
var flag uint
args := os.Args[1:]
for len(args) > 0 && len(args[0]) > 0 && args[0][0] == '-' {
if len(args[0]) > 1 {
width, _ = strconv.Atoi(args[0][1:])
} else {
flag |= colwriter.BreakOnColon
}
args = args[1:]
}
if width < 1 {
_, width, _ = pty.Getsize(os.Stdout)
}
if width < 1 {
width = 80
}
w := colwriter.NewWriter(os.Stdout, width, flag)
if len(args) > 0 {
for _, s := range args {
if f, err := os.Open(s); err == nil {
copyin(w, f)
f.Close()
} else {
log.Println(err)
}
}
} else {
copyin(w, os.Stdin)
}
}
func copyin(w *colwriter.Writer, r io.Reader) {
if _, err := io.Copy(w, r); err != nil {
log.Println(err)
}
if err := w.Flush(); err != nil {
log.Println(err)
}
}

86
vendor/github.com/kr/text/wrap.go generated vendored Normal file
View file

@ -0,0 +1,86 @@
package text
import (
"bytes"
"math"
)
var (
nl = []byte{'\n'}
sp = []byte{' '}
)
const defaultPenalty = 1e5
// Wrap wraps s into a paragraph of lines of length lim, with minimal
// raggedness.
func Wrap(s string, lim int) string {
return string(WrapBytes([]byte(s), lim))
}
// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
// raggedness.
func WrapBytes(b []byte, lim int) []byte {
words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
var lines [][]byte
for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
lines = append(lines, bytes.Join(line, sp))
}
return bytes.Join(lines, nl)
}
// WrapWords is the low-level line-breaking algorithm, useful if you need more
// control over the details of the text wrapping process. For most uses, either
// Wrap or WrapBytes will be sufficient and more convenient.
//
// WrapWords splits a list of words into lines with minimal "raggedness",
// treating each byte as one unit, accounting for spc units between adjacent
// words on each line, and attempting to limit lines to lim units. Raggedness
// is the total error over all lines, where error is the square of the
// difference of the length of the line and lim. Too-long lines (which only
// happen when a single word is longer than lim units) have pen penalty units
// added to the error.
func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
n := len(words)
length := make([][]int, n)
for i := 0; i < n; i++ {
length[i] = make([]int, n)
length[i][i] = len(words[i])
for j := i + 1; j < n; j++ {
length[i][j] = length[i][j-1] + spc + len(words[j])
}
}
nbrk := make([]int, n)
cost := make([]int, n)
for i := range cost {
cost[i] = math.MaxInt32
}
for i := n - 1; i >= 0; i-- {
if length[i][n-1] <= lim || i == n-1 {
cost[i] = 0
nbrk[i] = n
} else {
for j := i + 1; j < n; j++ {
d := lim - length[i][j-1]
c := d*d + cost[j]
if length[i][j-1] > lim {
c += pen // too-long lines get a worse penalty
}
if c < cost[i] {
cost[i] = c
nbrk[i] = j
}
}
}
}
var lines [][][]byte
i := 0
for i < n {
lines = append(lines, words[i:nbrk[i]])
i = nbrk[i]
}
return lines
}

62
vendor/github.com/kr/text/wrap_test.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package text
import (
"bytes"
"testing"
)
var text = "The quick brown fox jumps over the lazy dog."
func TestWrap(t *testing.T) {
exp := [][]string{
{"The", "quick", "brown", "fox"},
{"jumps", "over", "the", "lazy", "dog."},
}
words := bytes.Split([]byte(text), sp)
got := WrapWords(words, 1, 24, defaultPenalty)
if len(exp) != len(got) {
t.Fail()
}
for i := range exp {
if len(exp[i]) != len(got[i]) {
t.Fail()
}
for j := range exp[i] {
if exp[i][j] != string(got[i][j]) {
t.Fatal(i, exp[i][j], got[i][j])
}
}
}
}
func TestWrapNarrow(t *testing.T) {
exp := "The\nquick\nbrown\nfox\njumps\nover\nthe\nlazy\ndog."
if Wrap(text, 5) != exp {
t.Fail()
}
}
func TestWrapOneLine(t *testing.T) {
exp := "The quick brown fox jumps over the lazy dog."
if Wrap(text, 500) != exp {
t.Fail()
}
}
func TestWrapBug1(t *testing.T) {
cases := []struct {
limit int
text string
want string
}{
{4, "aaaaa", "aaaaa"},
{4, "a aaaaa", "a\naaaaa"},
}
for _, test := range cases {
got := Wrap(test.text, test.limit)
if got != test.want {
t.Errorf("Wrap(%q, %d) = %q want %q", test.text, test.limit, got, test.want)
}
}
}

4
vendor/github.com/tonnerre/golang-pretty/.gitignore generated vendored Normal file
View file

@ -0,0 +1,4 @@
[568].out
_go*
_test*
_obj

19
vendor/github.com/tonnerre/golang-pretty/License generated vendored Normal file
View file

@ -0,0 +1,19 @@
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

9
vendor/github.com/tonnerre/golang-pretty/Readme generated vendored Normal file
View file

@ -0,0 +1,9 @@
package pretty
import "github.com/kr/pretty"
Package pretty provides pretty-printing for Go values.
Documentation
http://godoc.org/github.com/kr/pretty

View file

@ -0,0 +1,5 @@
golang-pretty (0.0~git20130613-1) unstable; urgency=low
* Initial release. Closes: #722983
-- Tonnerre Lombard <tonnerre@ancient-solutions.com> Wed, 11 Sep 2013 02:36:12 +0200

View file

@ -0,0 +1 @@
9

View file

@ -0,0 +1,22 @@
Source: golang-pretty
Section: devel
Priority: extra
Maintainer: Tonnerre Lombard <tonnerre@ancient-solutions.com>
Build-Depends: debhelper (>= 9), golang-go, dh-golang,
golang-text-dev
Standards-Version: 3.9.4
Homepage: https://github.com/kr/pretty/
Vcs-Git: git://anonscm.debian.org/pkg-go/packages/golang-pretty.git
Vcs-Browser: http://anonscm.debian.org/gitweb/?p=pkg-go/packages/golang-pretty.git;a=summary
Package: golang-pretty-dev
Architecture: all
Depends: ${shlibs:Depends}, ${misc:Depends}, golang-text-dev
Description: Pretty printing for go values
Package pretty provides pretty-printing for Go values. This is useful
during debugging, to avoid wrapping long output lines in the
terminal.
.
It provides a function, Formatter, that can be used with any function
that accepts a format string. It also provides convenience wrappers
for functions in packages fmt and log.

View file

@ -0,0 +1,30 @@
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: golang-pretty
Source: https://github.com/kr/pretty/
Files: *
Copyright: 2011, 2012, 2013 Keith Rarick <kr@xph.us>
License: Expat
Files: debian/*
Copyright: 2013 Tonnerre Lombard <tonnerre@ancient-solutions.com>
License: Expat
License: Expat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

1
vendor/github.com/tonnerre/golang-pretty/debian/docs generated vendored Normal file
View file

@ -0,0 +1 @@
Readme

11
vendor/github.com/tonnerre/golang-pretty/debian/rules generated vendored Executable file
View file

@ -0,0 +1,11 @@
#!/usr/bin/make -f
# Uncomment this to turn on verbose mode.
export DH_VERBOSE=1
# DH_GOPKG is the upstream path which you would normally “go get”.
# Using it allows us to build applications without patching locations.
export DH_GOPKG := github.com/kr/pretty
%:
dh $@ --buildsystem=golang --with=golang

View file

@ -0,0 +1 @@
3.0 (quilt)

148
vendor/github.com/tonnerre/golang-pretty/diff.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
package pretty
import (
"fmt"
"io"
"reflect"
)
type sbuf []string
func (s *sbuf) Write(b []byte) (int, error) {
*s = append(*s, string(b))
return len(b), nil
}
// Diff returns a slice where each element describes
// a difference between a and b.
func Diff(a, b interface{}) (desc []string) {
Fdiff((*sbuf)(&desc), a, b)
return desc
}
// Fdiff writes to w a description of the differences between a and b.
func Fdiff(w io.Writer, a, b interface{}) {
diffWriter{w: w}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
}
type diffWriter struct {
w io.Writer
l string // label
}
func (w diffWriter) printf(f string, a ...interface{}) {
var l string
if w.l != "" {
l = w.l + ": "
}
fmt.Fprintf(w.w, l+f, a...)
}
func (w diffWriter) diff(av, bv reflect.Value) {
if !av.IsValid() && bv.IsValid() {
w.printf("nil != %#v", bv.Interface())
return
}
if av.IsValid() && !bv.IsValid() {
w.printf("%#v != nil", av.Interface())
return
}
if !av.IsValid() && !bv.IsValid() {
return
}
at := av.Type()
bt := bv.Type()
if at != bt {
w.printf("%v != %v", at, bt)
return
}
// numeric types, including bool
if at.Kind() < reflect.Array {
a, b := av.Interface(), bv.Interface()
if a != b {
w.printf("%#v != %#v", a, b)
}
return
}
switch at.Kind() {
case reflect.String:
a, b := av.Interface(), bv.Interface()
if a != b {
w.printf("%q != %q", a, b)
}
case reflect.Ptr:
switch {
case av.IsNil() && !bv.IsNil():
w.printf("nil != %v", bv.Interface())
case !av.IsNil() && bv.IsNil():
w.printf("%v != nil", av.Interface())
case !av.IsNil() && !bv.IsNil():
w.diff(av.Elem(), bv.Elem())
}
case reflect.Struct:
for i := 0; i < av.NumField(); i++ {
w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
}
case reflect.Map:
ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
for _, k := range ak {
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
w.printf("%q != (missing)", av.MapIndex(k))
}
for _, k := range both {
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
w.diff(av.MapIndex(k), bv.MapIndex(k))
}
for _, k := range bk {
w := w.relabel(fmt.Sprintf("[%#v]", k.Interface()))
w.printf("(missing) != %q", bv.MapIndex(k))
}
case reflect.Interface:
w.diff(reflect.ValueOf(av.Interface()), reflect.ValueOf(bv.Interface()))
default:
if !reflect.DeepEqual(av.Interface(), bv.Interface()) {
w.printf("%# v != %# v", Formatter(av.Interface()), Formatter(bv.Interface()))
}
}
}
func (d diffWriter) relabel(name string) (d1 diffWriter) {
d1 = d
if d.l != "" && name[0] != '[' {
d1.l += "."
}
d1.l += name
return d1
}
func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
for _, av := range a {
inBoth := false
for _, bv := range b {
if reflect.DeepEqual(av.Interface(), bv.Interface()) {
inBoth = true
both = append(both, av)
break
}
}
if !inBoth {
ak = append(ak, av)
}
}
for _, bv := range b {
inBoth := false
for _, av := range a {
if reflect.DeepEqual(av.Interface(), bv.Interface()) {
inBoth = true
break
}
}
if !inBoth {
bk = append(bk, bv)
}
}
return
}

73
vendor/github.com/tonnerre/golang-pretty/diff_test.go generated vendored Normal file
View file

@ -0,0 +1,73 @@
package pretty
import (
"testing"
)
type difftest struct {
a interface{}
b interface{}
exp []string
}
type S struct {
A int
S *S
I interface{}
C []int
}
var diffs = []difftest{
{a: nil, b: nil},
{a: S{A: 1}, b: S{A: 1}},
{0, "", []string{`int != string`}},
{0, 1, []string{`0 != 1`}},
{S{}, new(S), []string{`pretty.S != *pretty.S`}},
{"a", "b", []string{`"a" != "b"`}},
{S{}, S{A: 1}, []string{`A: 0 != 1`}},
{new(S), &S{A: 1}, []string{`A: 0 != 1`}},
{S{S: new(S)}, S{S: &S{A: 1}}, []string{`S.A: 0 != 1`}},
{S{}, S{I: 0}, []string{`I: nil != 0`}},
{S{I: 1}, S{I: "x"}, []string{`I: int != string`}},
{S{}, S{C: []int{1}}, []string{`C: []int(nil) != []int{1}`}},
{S{C: []int{}}, S{C: []int{1}}, []string{`C: []int{} != []int{1}`}},
{S{}, S{A: 1, S: new(S)}, []string{`A: 0 != 1`, `S: nil != &{0 <nil> <nil> []}`}},
}
func TestDiff(t *testing.T) {
for _, tt := range diffs {
got := Diff(tt.a, tt.b)
eq := len(got) == len(tt.exp)
if eq {
for i := range got {
eq = eq && got[i] == tt.exp[i]
}
}
if !eq {
t.Errorf("diffing % #v", tt.a)
t.Errorf("with % #v", tt.b)
diffdiff(t, got, tt.exp)
continue
}
}
}
func diffdiff(t *testing.T, got, exp []string) {
minus(t, "unexpected:", got, exp)
minus(t, "missing:", exp, got)
}
func minus(t *testing.T, s string, a, b []string) {
var i, j int
for i = 0; i < len(a); i++ {
for j = 0; j < len(b); j++ {
if a[i] == b[j] {
break
}
}
if j == len(b) {
t.Error(s, a[i])
}
}
}

View file

@ -0,0 +1,20 @@
package pretty_test
import (
"fmt"
"github.com/kr/pretty"
)
func Example() {
type myType struct {
a, b int
}
var x = []myType{{1, 2}, {3, 4}, {5, 6}}
fmt.Printf("%# v", pretty.Formatter(x))
// output:
// []pretty_test.myType{
// {a:1, b:2},
// {a:3, b:4},
// {a:5, b:6},
// }
}

300
vendor/github.com/tonnerre/golang-pretty/formatter.go generated vendored Normal file
View file

@ -0,0 +1,300 @@
package pretty
import (
"fmt"
"github.com/kr/text"
"io"
"reflect"
"strconv"
"text/tabwriter"
)
const (
limit = 50
)
type formatter struct {
x interface{}
force bool
quote bool
}
// Formatter makes a wrapper, f, that will format x as go source with line
// breaks and tabs. Object f responds to the "%v" formatting verb when both the
// "#" and " " (space) flags are set, for example:
//
// fmt.Sprintf("%# v", Formatter(x))
//
// If one of these two flags is not set, or any other verb is used, f will
// format x according to the usual rules of package fmt.
// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
func Formatter(x interface{}) (f fmt.Formatter) {
return formatter{x: x, quote: true}
}
func (fo formatter) String() string {
return fmt.Sprint(fo.x) // unwrap it
}
func (fo formatter) passThrough(f fmt.State, c rune) {
s := "%"
for i := 0; i < 128; i++ {
if f.Flag(i) {
s += string(i)
}
}
if w, ok := f.Width(); ok {
s += fmt.Sprintf("%d", w)
}
if p, ok := f.Precision(); ok {
s += fmt.Sprintf(".%d", p)
}
s += string(c)
fmt.Fprintf(f, s, fo.x)
}
func (fo formatter) Format(f fmt.State, c rune) {
if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
p := &printer{tw: w, Writer: w}
p.printValue(reflect.ValueOf(fo.x), true, fo.quote)
w.Flush()
return
}
fo.passThrough(f, c)
}
type printer struct {
io.Writer
tw *tabwriter.Writer
}
func (p *printer) indent() *printer {
q := *p
q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
return &q
}
func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
if showType {
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, "(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
}
func (p *printer) printValue(v reflect.Value, showType, quote bool) {
switch v.Kind() {
case reflect.Bool:
p.printInline(v, v.Bool(), showType)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.printInline(v, v.Int(), showType)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p.printInline(v, v.Uint(), showType)
case reflect.Float32, reflect.Float64:
p.printInline(v, v.Float(), showType)
case reflect.Complex64, reflect.Complex128:
fmt.Fprintf(p, "%#v", v.Complex())
case reflect.String:
p.fmtString(v.String(), quote)
case reflect.Map:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
keys := v.MapKeys()
for i := 0; i < v.Len(); i++ {
showTypeInStruct := true
k := keys[i]
mv := v.MapIndex(k)
pp.printValue(k, false, true)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct = t.Elem().Kind() == reflect.Interface
pp.printValue(mv, showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Struct:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.NumField(); i++ {
showTypeInStruct := true
if f := t.Field(i); f.Name != "" {
io.WriteString(pp, f.Name)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct = f.Type.Kind() == reflect.Interface
}
pp.printValue(getField(v, i), showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.NumField()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Interface:
switch e := v.Elem(); {
case e.Kind() == reflect.Invalid:
io.WriteString(p, "nil")
case e.IsValid():
p.printValue(e, showType, true)
default:
io.WriteString(p, v.Type().String())
io.WriteString(p, "(nil)")
}
case reflect.Array, reflect.Slice:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
if v.Kind() == reflect.Slice && v.IsNil() && showType {
io.WriteString(p, "(nil)")
break
}
if v.Kind() == reflect.Slice && v.IsNil() {
io.WriteString(p, "nil")
break
}
writeByte(p, '{')
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.Len(); i++ {
showTypeInSlice := t.Elem().Kind() == reflect.Interface
pp.printValue(v.Index(i), showTypeInSlice, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
writeByte(p, '}')
case reflect.Ptr:
e := v.Elem()
if !e.IsValid() {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
io.WriteString(p, ")(nil)")
} else {
writeByte(p, '&')
p.printValue(e, true, true)
}
case reflect.Chan:
x := v.Pointer()
if showType {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, ")(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
case reflect.Func:
io.WriteString(p, v.Type().String())
io.WriteString(p, " {...}")
case reflect.UnsafePointer:
p.printInline(v, v.Pointer(), showType)
case reflect.Invalid:
io.WriteString(p, "nil")
}
}
func canInline(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map:
return !canExpand(t.Elem())
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
if canExpand(t.Field(i).Type) {
return false
}
}
return true
case reflect.Interface:
return false
case reflect.Array, reflect.Slice:
return !canExpand(t.Elem())
case reflect.Ptr:
return false
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
return false
}
return true
}
func canExpand(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map, reflect.Struct,
reflect.Interface, reflect.Array, reflect.Slice,
reflect.Ptr:
return true
}
return false
}
func (p *printer) fmtString(s string, quote bool) {
if quote {
s = strconv.Quote(s)
}
io.WriteString(p, s)
}
func tryDeepEqual(a, b interface{}) bool {
defer func() { recover() }()
return reflect.DeepEqual(a, b)
}
func writeByte(w io.Writer, b byte) {
w.Write([]byte{b})
}
func getField(v reflect.Value, i int) reflect.Value {
val := v.Field(i)
if val.Kind() == reflect.Interface && !val.IsNil() {
val = val.Elem()
}
return val
}

View file

@ -0,0 +1,146 @@
package pretty
import (
"fmt"
"io"
"testing"
"unsafe"
)
type test struct {
v interface{}
s string
}
type LongStructTypeName struct {
longFieldName interface{}
otherLongFieldName interface{}
}
type SA struct {
t *T
}
type T struct {
x, y int
}
type F int
func (f F) Format(s fmt.State, c rune) {
fmt.Fprintf(s, "F(%d)", int(f))
}
var long = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
var gosyntax = []test{
{nil, `nil`},
{"", `""`},
{"a", `"a"`},
{1, "int(1)"},
{1.0, "float64(1)"},
{[]int(nil), "[]int(nil)"},
{[0]int{}, "[0]int{}"},
{complex(1, 0), "(1+0i)"},
//{make(chan int), "(chan int)(0x1234)"},
{unsafe.Pointer(uintptr(1)), "unsafe.Pointer(0x1)"},
{func(int) {}, "func(int) {...}"},
{map[int]int{1: 1}, "map[int]int{1:1}"},
{int32(1), "int32(1)"},
{io.EOF, `&errors.errorString{s:"EOF"}`},
{[]string{"a"}, `[]string{"a"}`},
{
[]string{long},
`[]string{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"}`,
},
{F(5), "pretty.F(5)"},
{
SA{&T{1, 2}},
`pretty.SA{
t: &pretty.T{x:1, y:2},
}`,
},
{
map[int][]byte{1: []byte{}},
`map[int][]uint8{
1: {},
}`,
},
{
map[int]T{1: T{}},
`map[int]pretty.T{
1: {},
}`,
},
{
long,
`"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"`,
},
{
LongStructTypeName{
longFieldName: LongStructTypeName{},
otherLongFieldName: long,
},
`pretty.LongStructTypeName{
longFieldName: pretty.LongStructTypeName{},
otherLongFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
}`,
},
{
&LongStructTypeName{
longFieldName: &LongStructTypeName{},
otherLongFieldName: (*LongStructTypeName)(nil),
},
`&pretty.LongStructTypeName{
longFieldName: &pretty.LongStructTypeName{},
otherLongFieldName: (*pretty.LongStructTypeName)(nil),
}`,
},
{
[]LongStructTypeName{
{nil, nil},
{3, 3},
{long, nil},
},
`[]pretty.LongStructTypeName{
{},
{
longFieldName: int(3),
otherLongFieldName: int(3),
},
{
longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
otherLongFieldName: nil,
},
}`,
},
{
[]interface{}{
LongStructTypeName{nil, nil},
[]byte{1, 2, 3},
T{3, 4},
LongStructTypeName{long, nil},
},
`[]interface {}{
pretty.LongStructTypeName{},
[]uint8{0x1, 0x2, 0x3},
pretty.T{x:3, y:4},
pretty.LongStructTypeName{
longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
otherLongFieldName: nil,
},
}`,
},
}
func TestGoSyntax(t *testing.T) {
for _, tt := range gosyntax {
s := fmt.Sprintf("%# v", Formatter(tt.v))
if tt.s != s {
t.Errorf("expected %q", tt.s)
t.Errorf("got %q", s)
t.Errorf("expraw\n%s", tt.s)
t.Errorf("gotraw\n%s", s)
}
}
}

98
vendor/github.com/tonnerre/golang-pretty/pretty.go generated vendored Normal file
View file

@ -0,0 +1,98 @@
// Package pretty provides pretty-printing for Go values. This is
// useful during debugging, to avoid wrapping long output lines in
// the terminal.
//
// It provides a function, Formatter, that can be used with any
// function that accepts a format string. It also provides
// convenience wrappers for functions in packages fmt and log.
package pretty
import (
"fmt"
"io"
"log"
)
// Errorf is a convenience wrapper for fmt.Errorf.
//
// Calling Errorf(f, x, y) is equivalent to
// fmt.Errorf(f, Formatter(x), Formatter(y)).
func Errorf(format string, a ...interface{}) error {
return fmt.Errorf(format, wrap(a, false)...)
}
// Fprintf is a convenience wrapper for fmt.Fprintf.
//
// Calling Fprintf(w, f, x, y) is equivalent to
// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
return fmt.Fprintf(w, format, wrap(a, false)...)
}
// Log is a convenience wrapper for log.Printf.
//
// Calling Log(x, y) is equivalent to
// log.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Log(a ...interface{}) {
log.Print(wrap(a, true)...)
}
// Logf is a convenience wrapper for log.Printf.
//
// Calling Logf(f, x, y) is equivalent to
// log.Printf(f, Formatter(x), Formatter(y)).
func Logf(format string, a ...interface{}) {
log.Printf(format, wrap(a, false)...)
}
// Logln is a convenience wrapper for log.Printf.
//
// Calling Logln(x, y) is equivalent to
// log.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Logln(a ...interface{}) {
log.Println(wrap(a, true)...)
}
// Print pretty-prints its operands and writes to standard output.
//
// Calling Print(x, y) is equivalent to
// fmt.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Print(a ...interface{}) (n int, errno error) {
return fmt.Print(wrap(a, true)...)
}
// Printf is a convenience wrapper for fmt.Printf.
//
// Calling Printf(f, x, y) is equivalent to
// fmt.Printf(f, Formatter(x), Formatter(y)).
func Printf(format string, a ...interface{}) (n int, errno error) {
return fmt.Printf(format, wrap(a, false)...)
}
// Println pretty-prints its operands and writes to standard output.
//
// Calling Print(x, y) is equivalent to
// fmt.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Println(a ...interface{}) (n int, errno error) {
return fmt.Println(wrap(a, true)...)
}
// Sprintf is a convenience wrapper for fmt.Sprintf.
//
// Calling Sprintf(f, x, y) is equivalent to
// fmt.Sprintf(f, Formatter(x), Formatter(y)).
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, wrap(a, false)...)
}
func wrap(a []interface{}, force bool) []interface{} {
w := make([]interface{}, len(a))
for i, x := range a {
w[i] = formatter{x: x, force: force}
}
return w
}

Some files were not shown because too many files have changed in this diff Show more