Merge branch 'go-mod' into 'master'
Move to go mod (experiment) See merge request fat0troll/fw_zookeeper!1
This commit is contained in:
commit
7e96abd339
151
Gopkg.lock
generated
151
Gopkg.lock
generated
@ -1,151 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "UT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5d27f9572f69e11b238ffd362a0e20ff8fc075cf33f3c147b281a077980616f3"
|
||||
name = "github.com/kirillDanshin/dlog"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "97d876b12bf9f9e11dca34779fedbf017c636e87"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e4cca162f88bcb8b428a0c2a0bc529196575e5c860f4ce4f61871c288c798c24"
|
||||
name = "github.com/kirillDanshin/myutils"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "182269b1fbcc91a4bbed900124a49c92baa5b9d6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aaa8e0e7e35d92e21daed3f241832cee73d15ca1cd3302ba3843159a959a7eac"
|
||||
name = "github.com/klauspost/compress"
|
||||
packages = [
|
||||
"flate",
|
||||
"gzip",
|
||||
"zlib",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "30be6041bed523c18e269a700ebd9c2ea9328574"
|
||||
version = "v1.4.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d643962fac133904694fffa959bc3c5dcfdcee38c6f5ffdd99a3c93eb9c835c"
|
||||
name = "github.com/klauspost/cpuid"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "e7e905edc00ea8827e58662220139109efea09db"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:63987b971c0f3240a0f1eed70b80b360e6a57d7a8c85ed7daa8bf770319ab308"
|
||||
name = "github.com/pquerna/ffjson"
|
||||
packages = [
|
||||
"ffjson",
|
||||
"fflib/v1",
|
||||
"fflib/v1/internal",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "e517b90714f7c0eabe6d2e570a5886ae077d6db6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6112a5eaec2ec65df289ccbb7a730aaf03e3c5cce6c906d367ccf9b7ac567604"
|
||||
name = "github.com/rs/zerolog"
|
||||
packages = [
|
||||
".",
|
||||
"internal/cbor",
|
||||
"internal/json",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "8747b7b3a51b5d08ee7ac50eaf4869edaf9f714a"
|
||||
version = "v1.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c468422f334a6b46a19448ad59aaffdfc0a36b08fdcc1c749a0b29b6453d7e59"
|
||||
name = "github.com/valyala/bytebufferpool"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15ad8a80098fcc7a194b9db6b26d74072a852e4faa957848c8118193d3c69230"
|
||||
name = "github.com/valyala/fasthttp"
|
||||
packages = [
|
||||
".",
|
||||
"fasthttputil",
|
||||
"stackless",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "e5f51c11919d4f66400334047b897ef0a94c6f3c"
|
||||
version = "v20180529"
|
||||
|
||||
[[projects]]
|
||||
branch = "develop"
|
||||
digest = "1:962c8f9e7e2c60f1f991a6f2f9090d315da010ec91361cfa14d4fdcf3ff92232"
|
||||
name = "gitlab.com/toby3d/telegram"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "b3f324e1b3aa692425c23bc87df428ff7d2a492d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3fa70ba3ba75f47646d2a6ff518f46f3c4a215912eb6f9c26b6e956918038f01"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"internal/socks",
|
||||
"proxy",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "fae4c4e3ad76c295c3d6d259f898136b4bf833a8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b154eb17b54cec56332bb76d6b5cf1b23f96beaf19468d0da5e94fc737a9093d"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"feature/plural",
|
||||
"internal",
|
||||
"internal/catmsg",
|
||||
"internal/format",
|
||||
"internal/gen",
|
||||
"internal/number",
|
||||
"internal/stringset",
|
||||
"internal/tag",
|
||||
"language",
|
||||
"message",
|
||||
"message/catalog",
|
||||
"unicode/cldr",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
||||
version = "v2.2.2"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/rs/zerolog",
|
||||
"github.com/valyala/fasthttp",
|
||||
"gitlab.com/toby3d/telegram",
|
||||
"golang.org/x/net/proxy",
|
||||
"gopkg.in/yaml.v2",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
34
Gopkg.toml
34
Gopkg.toml
@ -1,34 +0,0 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rs/zerolog"
|
||||
version = "1.11.0"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
@ -8,7 +8,7 @@ import (
|
||||
"github.com/rs/zerolog"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/config"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/local/config"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -5,7 +5,7 @@ package context
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/config"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/local/config"
|
||||
)
|
||||
|
||||
// VERSION is the current bot's version
|
||||
|
@ -6,7 +6,7 @@ package battlesv1
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/context"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/router"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/local/router"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
|
@ -5,7 +5,7 @@ package battlesv1
|
||||
|
||||
import (
|
||||
"gitlab.com/toby3d/telegram"
|
||||
itelegram "lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/telegram"
|
||||
itelegram "lab.wtfteam.pro/fat0troll/fw_zookeeper/local/telegram"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -6,7 +6,7 @@ package commandsv1
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/context"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/router"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/local/router"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -5,7 +5,7 @@ package commandsv1
|
||||
|
||||
import (
|
||||
"gitlab.com/toby3d/telegram"
|
||||
itelegram "lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/telegram"
|
||||
itelegram "lab.wtfteam.pro/fat0troll/fw_zookeeper/local/telegram"
|
||||
)
|
||||
|
||||
// HelpCommand responds to /help message
|
||||
|
@ -5,7 +5,7 @@ package commandsv1
|
||||
|
||||
import (
|
||||
"gitlab.com/toby3d/telegram"
|
||||
itelegram "lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/telegram"
|
||||
itelegram "lab.wtfteam.pro/fat0troll/fw_zookeeper/local/telegram"
|
||||
)
|
||||
|
||||
// StartCommand responds to /start message
|
||||
|
14
go.mod
Normal file
14
go.mod
Normal file
@ -0,0 +1,14 @@
|
||||
module lab.wtfteam.pro/fat0troll/fw_zookeeper
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/kirillDanshin/dlog v0.0.0-20170728000807-97d876b12bf9 // indirect
|
||||
github.com/kirillDanshin/myutils v0.0.0-20160713214838-182269b1fbcc // indirect
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 // indirect
|
||||
github.com/rs/zerolog v1.11.0
|
||||
github.com/valyala/fasthttp v1.0.0
|
||||
gitlab.com/toby3d/telegram v0.0.0-20181012114749-b3f324e1b3aa
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3
|
||||
golang.org/x/text v0.3.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
)
|
30
go.sum
Normal file
30
go.sum
Normal file
@ -0,0 +1,30 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/kirillDanshin/dlog v0.0.0-20170728000807-97d876b12bf9 h1:mA7k8E2Vrmyj5CW/D1XZBFmohVNi7jf757vibGwzRbo=
|
||||
github.com/kirillDanshin/dlog v0.0.0-20170728000807-97d876b12bf9/go.mod h1:l8CN7iyX1k2xlsTYVTpCtwBPcxThf/jLWDGVcF6T/bM=
|
||||
github.com/kirillDanshin/myutils v0.0.0-20160713214838-182269b1fbcc h1:OkOhOn3WBUmfATC1NsA3rBlgHGkjk0KGnR5akl/8uXc=
|
||||
github.com/kirillDanshin/myutils v0.0.0-20160713214838-182269b1fbcc/go.mod h1:Bt95qRxLvpdmASW9s2tTxGdQ5ma4o4n8QFhCvzCew/M=
|
||||
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
|
||||
github.com/rs/zerolog v1.11.0 h1:DRuq/S+4k52uJzBQciUcofXx45GrMC6yrEbb/CoK6+M=
|
||||
github.com/rs/zerolog v1.11.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.0.0 h1:BwIoZQbBsTo3v2F5lz5Oy3TlTq4wLKTLV260EVTEWco=
|
||||
github.com/valyala/fasthttp v1.0.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
gitlab.com/toby3d/telegram v0.0.0-20181012114749-b3f324e1b3aa h1:cY97lp8vP4ejxaKhxDx//4gBle1NgbGYsuTnCFwhmR0=
|
||||
gitlab.com/toby3d/telegram v0.0.0-20181012114749-b3f324e1b3aa/go.mod h1:qV8SaSi5ClH+I+JPQ56jJxqEuiRmZ5MOj2VqFasMMnM=
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
@ -4,12 +4,12 @@
|
||||
package telegram
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"encoding/json"
|
||||
http "github.com/valyala/fasthttp"
|
||||
"gitlab.com/toby3d/telegram"
|
||||
"golang.org/x/net/proxy"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/router"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/local/router"
|
||||
"net"
|
||||
)
|
||||
|
||||
func proxyDialer(addr string) (net.Conn, error) {
|
||||
@ -90,6 +90,8 @@ func StartBot() {
|
||||
log.Info().Msg("Connection with Telegram established")
|
||||
|
||||
for update := range updates {
|
||||
updateText, _ := json.Marshal(update)
|
||||
log.Debug().Msgf("%s", string(updateText))
|
||||
go router.Respond(update)
|
||||
}
|
||||
}
|
4
main.go
4
main.go
@ -7,8 +7,8 @@ import (
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/context"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/domains/battles/v1"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/domains/commands/v1"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/router"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/telegram"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/local/router"
|
||||
"lab.wtfteam.pro/fat0troll/fw_zookeeper/local/telegram"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
|
1
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
1
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
@ -1,6 +1,5 @@
|
||||
//+build !noasm
|
||||
//+build !appengine
|
||||
//+build !gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
|
1
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
1
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
@ -1,6 +1,5 @@
|
||||
//+build !noasm
|
||||
//+build !appengine
|
||||
//+build !gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
|
2
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
2
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
//+build !amd64 noasm appengine gccgo
|
||||
//+build !amd64 noasm appengine
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
|
15
vendor/github.com/klauspost/compress/snappy/AUTHORS
generated
vendored
15
vendor/github.com/klauspost/compress/snappy/AUTHORS
generated
vendored
@ -1,15 +0,0 @@
|
||||
# This is the official list of Snappy-Go authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Google Inc.
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
37
vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
generated
vendored
37
vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
generated
vendored
@ -1,37 +0,0 @@
|
||||
# This is the official list of people who can contribute
|
||||
# (and typically have contributed) code to the Snappy-Go repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Kai Backman <kaib@golang.org>
|
||||
Marc-Antoine Ruel <maruel@chromium.org>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Rob Pike <r@golang.org>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
27
vendor/github.com/klauspost/compress/snappy/LICENSE
generated
vendored
27
vendor/github.com/klauspost/compress/snappy/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/github.com/valyala/fasthttp/.travis.yml
generated
vendored
22
vendor/github.com/valyala/fasthttp/.travis.yml
generated
vendored
@ -1,10 +1,27 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
- 1.11
|
||||
- 1.10.x
|
||||
- 1.9.x
|
||||
- 1.8.x
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- tip
|
||||
fast_finish: true
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
# - go get -v golang.org/x/tools/cmd/goimports
|
||||
|
||||
script:
|
||||
# TODO(@kirilldanshin)
|
||||
# - test -z "$(goimports -d $(find . -type f -name '*.go' -not -path "./vendor/*"))"
|
||||
# build test for supported platforms
|
||||
- GOOS=linux go build
|
||||
- GOOS=darwin go build
|
||||
@ -14,3 +31,6 @@ script:
|
||||
|
||||
# run tests on a standard platform
|
||||
- go test -v ./...
|
||||
|
||||
# run tests with the race detector as well
|
||||
- go test -race -v ./...
|
||||
|
5
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
5
vendor/github.com/valyala/fasthttp/LICENSE
generated
vendored
@ -1,6 +1,9 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015-2016 Aliaksandr Valialkin, VertaMedia
|
||||
Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia
|
||||
Copyright (c) 2018-present Kirill Danshin
|
||||
Copyright (c) 2018-present Erik Dubbelboer
|
||||
Copyright (c) 2018-present FastHTTP Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
26
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
26
vendor/github.com/valyala/fasthttp/README.md
generated
vendored
@ -23,6 +23,8 @@ connections per physical server.
|
||||
|
||||
[Code examples](examples)
|
||||
|
||||
[Awesome fasthttp tools](https://github.com/fasthttp)
|
||||
|
||||
[Switching from net/http to fasthttp](#switching-from-nethttp-to-fasthttp)
|
||||
|
||||
[Fasthttp best practices](#fasthttp-best-practices)
|
||||
@ -275,10 +277,10 @@ like in net/http. The following code is valid for fasthttp:
|
||||
but there are more powerful third-party routers and web frameworks
|
||||
with fasthttp support:
|
||||
|
||||
* [Iris](https://github.com/kataras/iris)
|
||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
||||
* [atreugo](https://github.com/savsgio/atreugo)
|
||||
|
||||
Net/http code with simple ServeMux is trivially converted to fasthttp code:
|
||||
|
||||
@ -478,18 +480,19 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
||||
|
||||
# Related projects
|
||||
|
||||
* [fasthttp-contrib](https://github.com/fasthttp-contrib) - various useful
|
||||
* [fasthttp](https://github.com/fasthttp) - various useful
|
||||
helpers for projects based on fasthttp.
|
||||
* [iris](https://github.com/kataras/iris) - web application framework built
|
||||
on top of fasthttp. Features speed and functionality.
|
||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) - fast and
|
||||
powerful routing package for fasthttp servers.
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter) - a high
|
||||
performance fasthttp request router that scales well.
|
||||
* [gramework](https://github.com/gramework/gramework) - a web framework made by one of fasthttp maintainers
|
||||
* [lu](https://github.com/vincentLiuxiang/lu) - a high performance
|
||||
go middleware web framework which is based on fasthttp.
|
||||
* [websocket](https://github.com/leavengood/websocket) - Gorilla-based
|
||||
* [websocket](https://github.com/fasthttp/websocket) - Gorilla-based
|
||||
websocket implementation for fasthttp.
|
||||
* [fasthttpsession](https://github.com/phachon/fasthttpsession) - a fast and powerful session package for fasthttp servers.
|
||||
* [atreugo](https://github.com/savsgio/atreugo) - Micro-framework to make simple the use of routing and middlewares.
|
||||
|
||||
|
||||
# FAQ
|
||||
@ -519,10 +522,9 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
||||
|
||||
* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?*
|
||||
|
||||
There are [plans](TODO) for adding HTTP/2.0 and WebSockets support
|
||||
in the future.
|
||||
In the mean time, third parties may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
|
||||
for implementing these goodies. See [the first third-party websocket implementation on the top of fasthttp](https://github.com/leavengood/websocket).
|
||||
[HTTP/2.0 support](https://github.com/fasthttp/http2) is in progress. [WebSockets](https://github.com/fasthttp/websockets) has been done already.
|
||||
Third parties also may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack)
|
||||
for implementing these goodies.
|
||||
|
||||
* *Are there known net/http advantages comparing to fasthttp?*
|
||||
|
||||
@ -545,8 +547,10 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
||||
|
||||
Go1.5+. Older versions won't be supported, since their standard package
|
||||
[miss useful functions](https://github.com/valyala/fasthttp/issues/5).
|
||||
|
||||
**NOTE**: Go 1.9.7 is the oldest tested version. We recommend you to update as soon as you can. As of 1.11.3 we will drop 1.9.x support.
|
||||
|
||||
* *Please provide real benchmark data and sever information*
|
||||
* *Please provide real benchmark data and server information*
|
||||
|
||||
See [this issue](https://github.com/valyala/fasthttp/issues/4).
|
||||
|
||||
@ -555,11 +559,11 @@ uintBuf := fasthttp.AppendUint(nil, 1234)
|
||||
There are no plans to add request routing into fasthttp.
|
||||
Use third-party routers and web frameworks with fasthttp support:
|
||||
|
||||
* [Iris](https://github.com/kataras/iris)
|
||||
* [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing)
|
||||
* [fasthttprouter](https://github.com/buaazp/fasthttprouter)
|
||||
* [gramework](https://github.com/gramework/gramework)
|
||||
* [lu](https://github.com/vincentLiuxiang/lu)
|
||||
* [atreugo](https://github.com/savsgio/atreugo)
|
||||
|
||||
See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info.
|
||||
|
||||
|
17
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
17
vendor/github.com/valyala/fasthttp/args.go
generated
vendored
@ -5,6 +5,8 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
||||
// AcquireArgs returns an empty Args object from the pool.
|
||||
@ -243,10 +245,10 @@ func (a *Args) GetUint(key string) (int, error) {
|
||||
|
||||
// SetUint sets uint value for the given key.
|
||||
func (a *Args) SetUint(key string, value int) {
|
||||
bb := AcquireByteBuffer()
|
||||
bb := bytebufferpool.Get()
|
||||
bb.B = AppendUint(bb.B[:0], value)
|
||||
a.SetBytesV(key, bb.B)
|
||||
ReleaseByteBuffer(bb)
|
||||
bytebufferpool.Put(bb)
|
||||
}
|
||||
|
||||
// SetUintBytes sets uint value for the given key.
|
||||
@ -287,11 +289,14 @@ func (a *Args) GetUfloatOrZero(key string) float64 {
|
||||
|
||||
// GetBool returns boolean value for the given key.
|
||||
//
|
||||
// true is returned for '1', 'y' and 'yes' values,
|
||||
// true is returned for "1", "t", "T", "true", "TRUE", "True", "y", "yes", "Y", "YES", "Yes",
|
||||
// otherwise false is returned.
|
||||
func (a *Args) GetBool(key string) bool {
|
||||
switch string(a.Peek(key)) {
|
||||
case "1", "y", "yes":
|
||||
switch b2s(a.Peek(key)) {
|
||||
// Support the same true cases as strconv.ParseBool
|
||||
// See: https://github.com/golang/go/blob/4e1b11e2c9bdb0ddea1141eed487be1a626ff5be/src/strconv/atob.go#L12
|
||||
// and Y and Yes versions.
|
||||
case "1", "t", "T", "true", "TRUE", "True", "y", "yes", "Y", "YES", "Yes":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@ -486,7 +491,7 @@ func decodeArgAppend(dst, src []byte) []byte {
|
||||
// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't
|
||||
// substitute '+' with ' '.
|
||||
//
|
||||
// The function is copy-pasted from decodeArgAppend due to the preformance
|
||||
// The function is copy-pasted from decodeArgAppend due to the performance
|
||||
// reasons only.
|
||||
func decodeArgAppendNoPlus(dst, src []byte) []byte {
|
||||
if bytes.IndexByte(src, '%') < 0 {
|
||||
|
64
vendor/github.com/valyala/fasthttp/bytebuffer.go
generated
vendored
64
vendor/github.com/valyala/fasthttp/bytebuffer.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
||||
// ByteBuffer provides byte buffer, which can be used with fasthttp API
|
||||
// in order to minimize memory allocations.
|
||||
//
|
||||
// ByteBuffer may be used with functions appending data to the given []byte
|
||||
// slice. See example code for details.
|
||||
//
|
||||
// Use AcquireByteBuffer for obtaining an empty byte buffer.
|
||||
//
|
||||
// ByteBuffer is deprecated. Use github.com/valyala/bytebufferpool instead.
|
||||
type ByteBuffer bytebufferpool.ByteBuffer
|
||||
|
||||
// Write implements io.Writer - it appends p to ByteBuffer.B
|
||||
func (b *ByteBuffer) Write(p []byte) (int, error) {
|
||||
return bb(b).Write(p)
|
||||
}
|
||||
|
||||
// WriteString appends s to ByteBuffer.B
|
||||
func (b *ByteBuffer) WriteString(s string) (int, error) {
|
||||
return bb(b).WriteString(s)
|
||||
}
|
||||
|
||||
// Set sets ByteBuffer.B to p
|
||||
func (b *ByteBuffer) Set(p []byte) {
|
||||
bb(b).Set(p)
|
||||
}
|
||||
|
||||
// SetString sets ByteBuffer.B to s
|
||||
func (b *ByteBuffer) SetString(s string) {
|
||||
bb(b).SetString(s)
|
||||
}
|
||||
|
||||
// Reset makes ByteBuffer.B empty.
|
||||
func (b *ByteBuffer) Reset() {
|
||||
bb(b).Reset()
|
||||
}
|
||||
|
||||
// AcquireByteBuffer returns an empty byte buffer from the pool.
|
||||
//
|
||||
// Acquired byte buffer may be returned to the pool via ReleaseByteBuffer call.
|
||||
// This reduces the number of memory allocations required for byte buffer
|
||||
// management.
|
||||
func AcquireByteBuffer() *ByteBuffer {
|
||||
return (*ByteBuffer)(defaultByteBufferPool.Get())
|
||||
}
|
||||
|
||||
// ReleaseByteBuffer returns byte buffer to the pool.
|
||||
//
|
||||
// ByteBuffer.B mustn't be touched after returning it to the pool.
|
||||
// Otherwise data races occur.
|
||||
func ReleaseByteBuffer(b *ByteBuffer) {
|
||||
defaultByteBufferPool.Put(bb(b))
|
||||
}
|
||||
|
||||
func bb(b *ByteBuffer) *bytebufferpool.ByteBuffer {
|
||||
return (*bytebufferpool.ByteBuffer)(b)
|
||||
}
|
||||
|
||||
var defaultByteBufferPool bytebufferpool.Pool
|
17
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
17
vendor/github.com/valyala/fasthttp/bytesconv.go
generated
vendored
@ -164,7 +164,7 @@ func ParseUint(buf []byte) (int, error) {
|
||||
var (
|
||||
errEmptyInt = errors.New("empty integer")
|
||||
errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9")
|
||||
errUnexpectedTrailingChar = errors.New("unexpected traling char found. Expecting 0-9")
|
||||
errUnexpectedTrailingChar = errors.New("unexpected trailing char found. Expecting 0-9")
|
||||
errTooLongInt = errors.New("too long int")
|
||||
)
|
||||
|
||||
@ -416,8 +416,17 @@ func AppendQuotedArg(dst, src []byte) []byte {
|
||||
|
||||
func appendQuotedPath(dst, src []byte) []byte {
|
||||
for _, c := range src {
|
||||
// From the spec: http://tools.ietf.org/html/rfc3986#section-3.3
|
||||
// an path can contain zero or more of pchar that is defined as follows:
|
||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||
// pct-encoded = "%" HEXDIG HEXDIG
|
||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
// / "*" / "+" / "," / ";" / "="
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' ||
|
||||
c == '/' || c == '.' || c == ',' || c == '=' || c == ':' || c == '&' || c == '~' || c == '-' || c == '_' {
|
||||
c == '-' || c == '.' || c == '_' || c == '~' || c == '!' || c == '$' ||
|
||||
c == '&' || c == '\'' || c == '(' || c == ')' || c == '*' || c == '+' ||
|
||||
c == ',' || c == ';' || c == '=' || c == ':' || c == '@' || c == '/' {
|
||||
dst = append(dst, c)
|
||||
} else {
|
||||
dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15))
|
||||
@ -431,7 +440,7 @@ func appendQuotedPath(dst, src []byte) []byte {
|
||||
// This function has no performance benefits comparing to string(b) == s.
|
||||
// It is left here for backwards compatibility only.
|
||||
//
|
||||
// This function is deperecated and may be deleted soon.
|
||||
// Deprecated: may be deleted soon.
|
||||
func EqualBytesStr(b []byte, s string) bool {
|
||||
return string(b) == s
|
||||
}
|
||||
@ -441,7 +450,7 @@ func EqualBytesStr(b []byte, s string) bool {
|
||||
// This function has no performance benefits comparing to append(dst, src...).
|
||||
// It is left here for backwards compatibility only.
|
||||
//
|
||||
// This function is deprecated and may be deleted soon.
|
||||
// Deprecated: may be deleted soon.
|
||||
func AppendBytesStr(dst []byte, src string) []byte {
|
||||
return append(dst, src...)
|
||||
}
|
||||
|
127
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
127
vendor/github.com/valyala/fasthttp/client.go
generated
vendored
@ -91,33 +91,36 @@ func DoDeadline(req *Request, resp *Response, deadline time.Time) error {
|
||||
return defaultClient.DoDeadline(req, resp, deadline)
|
||||
}
|
||||
|
||||
// Get appends url contents to dst and returns it as body.
|
||||
// Get returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
func Get(dst []byte, url string) (statusCode int, body []byte, err error) {
|
||||
return defaultClient.Get(dst, url)
|
||||
}
|
||||
|
||||
// GetTimeout appends url contents to dst and returns it as body.
|
||||
// GetTimeout returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// ErrTimeout error is returned if url contents couldn't be fetched
|
||||
// during the given timeout.
|
||||
func GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) {
|
||||
return defaultClient.GetTimeout(dst, url, timeout)
|
||||
}
|
||||
|
||||
// GetDeadline appends url contents to dst and returns it as body.
|
||||
// GetDeadline returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// ErrTimeout error is returned if url contents couldn't be fetched
|
||||
// until the given deadline.
|
||||
func GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) {
|
||||
@ -126,12 +129,11 @@ func GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, bo
|
||||
|
||||
// Post sends POST request to the given url with the given POST arguments.
|
||||
//
|
||||
// Response body is appended to dst, which is returned as body.
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// Empty POST body is sent if postArgs is nil.
|
||||
func Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) {
|
||||
return defaultClient.Post(dst, url, postArgs)
|
||||
@ -234,33 +236,36 @@ type Client struct {
|
||||
ms map[string]*HostClient
|
||||
}
|
||||
|
||||
// Get appends url contents to dst and returns it as body.
|
||||
// Get returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
func (c *Client) Get(dst []byte, url string) (statusCode int, body []byte, err error) {
|
||||
return clientGetURL(dst, url, c)
|
||||
}
|
||||
|
||||
// GetTimeout appends url contents to dst and returns it as body.
|
||||
// GetTimeout returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// ErrTimeout error is returned if url contents couldn't be fetched
|
||||
// during the given timeout.
|
||||
func (c *Client) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) {
|
||||
return clientGetURLTimeout(dst, url, timeout, c)
|
||||
}
|
||||
|
||||
// GetDeadline appends url contents to dst and returns it as body.
|
||||
// GetDeadline returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// ErrTimeout error is returned if url contents couldn't be fetched
|
||||
// until the given deadline.
|
||||
func (c *Client) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) {
|
||||
@ -269,12 +274,11 @@ func (c *Client) GetDeadline(dst []byte, url string, deadline time.Time) (status
|
||||
|
||||
// Post sends POST request to the given url with the given POST arguments.
|
||||
//
|
||||
// Response body is appended to dst, which is returned as body.
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// Empty POST body is sent if postArgs is nil.
|
||||
func (c *Client) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) {
|
||||
return clientPostURL(dst, url, postArgs, c)
|
||||
@ -443,6 +447,9 @@ const DefaultMaxConnsPerHost = 512
|
||||
// connection is closed.
|
||||
const DefaultMaxIdleConnDuration = 10 * time.Second
|
||||
|
||||
// DefaultMaxIdemponentCallAttempts is the default idempotent calls attempts count.
|
||||
const DefaultMaxIdemponentCallAttempts = 5
|
||||
|
||||
// DialFunc must establish connection to addr.
|
||||
//
|
||||
// There is no need in establishing TLS (SSL) connection for https.
|
||||
@ -522,6 +529,11 @@ type HostClient struct {
|
||||
// after DefaultMaxIdleConnDuration.
|
||||
MaxIdleConnDuration time.Duration
|
||||
|
||||
// Maximum number of attempts for idempotent calls
|
||||
//
|
||||
// DefaultMaxIdemponentCallAttempts is used if not set.
|
||||
MaxIdemponentCallAttempts int
|
||||
|
||||
// Per-connection buffer size for responses' reading.
|
||||
// This also limits the maximum header size.
|
||||
//
|
||||
@ -609,33 +621,36 @@ func (c *HostClient) LastUseTime() time.Time {
|
||||
return time.Unix(startTimeUnix+int64(n), 0)
|
||||
}
|
||||
|
||||
// Get appends url contents to dst and returns it as body.
|
||||
// Get returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
func (c *HostClient) Get(dst []byte, url string) (statusCode int, body []byte, err error) {
|
||||
return clientGetURL(dst, url, c)
|
||||
}
|
||||
|
||||
// GetTimeout appends url contents to dst and returns it as body.
|
||||
// GetTimeout returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// ErrTimeout error is returned if url contents couldn't be fetched
|
||||
// during the given timeout.
|
||||
func (c *HostClient) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) {
|
||||
return clientGetURLTimeout(dst, url, timeout, c)
|
||||
}
|
||||
|
||||
// GetDeadline appends url contents to dst and returns it as body.
|
||||
// GetDeadline returns the status code and body of url.
|
||||
//
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// ErrTimeout error is returned if url contents couldn't be fetched
|
||||
// until the given deadline.
|
||||
func (c *HostClient) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) {
|
||||
@ -644,12 +659,11 @@ func (c *HostClient) GetDeadline(dst []byte, url string, deadline time.Time) (st
|
||||
|
||||
// Post sends POST request to the given url with the given POST arguments.
|
||||
//
|
||||
// Response body is appended to dst, which is returned as body.
|
||||
// The contents of dst will be replaced by the body and returned, if the dst
|
||||
// is too small a new slice will be allocated.
|
||||
//
|
||||
// The function follows redirects. Use Do* for manually handling redirects.
|
||||
//
|
||||
// New body buffer is allocated if dst is nil.
|
||||
//
|
||||
// Empty POST body is sent if postArgs is nil.
|
||||
func (c *HostClient) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) {
|
||||
return clientPostURL(dst, url, postArgs, c)
|
||||
@ -767,7 +781,11 @@ func doRequestFollowRedirects(req *Request, dst []byte, url string, c clientDoer
|
||||
break
|
||||
}
|
||||
statusCode = resp.Header.StatusCode()
|
||||
if statusCode != StatusMovedPermanently && statusCode != StatusFound && statusCode != StatusSeeOther {
|
||||
if statusCode != StatusMovedPermanently &&
|
||||
statusCode != StatusFound &&
|
||||
statusCode != StatusSeeOther &&
|
||||
statusCode != StatusTemporaryRedirect &&
|
||||
statusCode != StatusPermanentRedirect {
|
||||
break
|
||||
}
|
||||
|
||||
@ -969,7 +987,10 @@ var errorChPool sync.Pool
|
||||
func (c *HostClient) Do(req *Request, resp *Response) error {
|
||||
var err error
|
||||
var retry bool
|
||||
const maxAttempts = 5
|
||||
maxAttempts := c.MaxIdemponentCallAttempts
|
||||
if maxAttempts <= 0 {
|
||||
maxAttempts = DefaultMaxIdemponentCallAttempts
|
||||
}
|
||||
attempts := 0
|
||||
|
||||
atomic.AddUint64(&c.pendingRequests, 1)
|
||||
@ -1041,7 +1062,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error)
|
||||
panic("BUG: resp cannot be nil")
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&c.lastUseTime, uint32(CoarseTimeNow().Unix()-startTimeUnix))
|
||||
atomic.StoreUint32(&c.lastUseTime, uint32(time.Now().Unix()-startTimeUnix))
|
||||
|
||||
// Free up resources occupied by response before sending the request,
|
||||
// so the GC may reclaim these resources (e.g. response body).
|
||||
@ -1057,7 +1078,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error)
|
||||
// Optimization: update write deadline only if more than 25%
|
||||
// of the last write deadline exceeded.
|
||||
// See https://github.com/golang/go/issues/15133 for details.
|
||||
currentTime := CoarseTimeNow()
|
||||
currentTime := time.Now()
|
||||
if currentTime.Sub(cc.lastWriteDeadlineTime) > (c.WriteTimeout >> 2) {
|
||||
if err = conn.SetWriteDeadline(currentTime.Add(c.WriteTimeout)); err != nil {
|
||||
c.closeConn(cc)
|
||||
@ -1079,9 +1100,6 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error)
|
||||
}
|
||||
bw := c.acquireWriter(conn)
|
||||
err = req.Write(bw)
|
||||
if len(userAgentOld) == 0 {
|
||||
req.Header.userAgent = userAgentOld
|
||||
}
|
||||
|
||||
if resetConnection {
|
||||
req.Header.ResetConnectionClose()
|
||||
@ -1101,7 +1119,7 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error)
|
||||
// Optimization: update read deadline only if more than 25%
|
||||
// of the last read deadline exceeded.
|
||||
// See https://github.com/golang/go/issues/15133 for details.
|
||||
currentTime := CoarseTimeNow()
|
||||
currentTime := time.Now()
|
||||
if currentTime.Sub(cc.lastReadDeadlineTime) > (c.ReadTimeout >> 2) {
|
||||
if err = conn.SetReadDeadline(currentTime.Add(c.ReadTimeout)); err != nil {
|
||||
c.closeConn(cc)
|
||||
@ -1226,6 +1244,12 @@ func (c *HostClient) connsCleaner() {
|
||||
for i < n && currentTime.Sub(conns[i].lastUseTime) > maxIdleConnDuration {
|
||||
i++
|
||||
}
|
||||
sleepFor := maxIdleConnDuration
|
||||
if i < n {
|
||||
// + 1 so we actually sleep past the expiration time and not up to it.
|
||||
// Otherwise the > check above would still fail.
|
||||
sleepFor = maxIdleConnDuration - currentTime.Sub(conns[i].lastUseTime) + 1
|
||||
}
|
||||
scratch = append(scratch[:0], conns[:i]...)
|
||||
if i > 0 {
|
||||
m := copy(conns, conns[i:])
|
||||
@ -1253,7 +1277,7 @@ func (c *HostClient) connsCleaner() {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(maxIdleConnDuration)
|
||||
time.Sleep(sleepFor)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1276,19 +1300,20 @@ func acquireClientConn(conn net.Conn) *clientConn {
|
||||
}
|
||||
cc := v.(*clientConn)
|
||||
cc.c = conn
|
||||
cc.createdTime = CoarseTimeNow()
|
||||
cc.createdTime = time.Now()
|
||||
return cc
|
||||
}
|
||||
|
||||
func releaseClientConn(cc *clientConn) {
|
||||
cc.c = nil
|
||||
// Reset all fields.
|
||||
*cc = clientConn{}
|
||||
clientConnPool.Put(cc)
|
||||
}
|
||||
|
||||
var clientConnPool sync.Pool
|
||||
|
||||
func (c *HostClient) releaseConn(cc *clientConn) {
|
||||
cc.lastUseTime = CoarseTimeNow()
|
||||
cc.lastUseTime = time.Now()
|
||||
c.connsLock.Lock()
|
||||
c.conns = append(c.conns, cc)
|
||||
c.connsLock.Unlock()
|
||||
@ -1524,7 +1549,7 @@ type PipelineClient struct {
|
||||
|
||||
// The maximum number of concurrent connections to the Addr.
|
||||
//
|
||||
// A sinle connection is used by default.
|
||||
// A single connection is used by default.
|
||||
MaxConns int
|
||||
|
||||
// The maximum number of pending pipelined requests over
|
||||
@ -1991,7 +2016,7 @@ func (c *pipelineConnClient) writer(conn net.Conn, stopCh <-chan struct{}) error
|
||||
// Optimization: update write deadline only if more than 25%
|
||||
// of the last write deadline exceeded.
|
||||
// See https://github.com/golang/go/issues/15133 for details.
|
||||
currentTime := CoarseTimeNow()
|
||||
currentTime := time.Now()
|
||||
if currentTime.Sub(lastWriteDeadlineTime) > (writeTimeout >> 2) {
|
||||
if err = conn.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil {
|
||||
w.err = err
|
||||
@ -2072,7 +2097,7 @@ func (c *pipelineConnClient) reader(conn net.Conn, stopCh <-chan struct{}) error
|
||||
// Optimization: update read deadline only if more than 25%
|
||||
// of the last read deadline exceeded.
|
||||
// See https://github.com/golang/go/issues/15133 for details.
|
||||
currentTime := CoarseTimeNow()
|
||||
currentTime := time.Now()
|
||||
if currentTime.Sub(lastReadDeadlineTime) > (readTimeout >> 2) {
|
||||
if err = conn.SetReadDeadline(currentTime.Add(readTimeout)); err != nil {
|
||||
w.err = err
|
||||
|
21
vendor/github.com/valyala/fasthttp/coarseTime.go
generated
vendored
21
vendor/github.com/valyala/fasthttp/coarseTime.go
generated
vendored
@ -1,28 +1,13 @@
|
||||
package fasthttp
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CoarseTimeNow returns the current time truncated to the nearest second.
|
||||
//
|
||||
// This is a faster alternative to time.Now().
|
||||
// Deprecated: This is slower than calling time.Now() directly.
|
||||
// This is now time.Now().Truncate(time.Second) shortcut.
|
||||
func CoarseTimeNow() time.Time {
|
||||
tp := coarseTime.Load().(*time.Time)
|
||||
return *tp
|
||||
return time.Now().Truncate(time.Second)
|
||||
}
|
||||
|
||||
func init() {
|
||||
t := time.Now().Truncate(time.Second)
|
||||
coarseTime.Store(&t)
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(time.Second)
|
||||
t := time.Now().Truncate(time.Second)
|
||||
coarseTime.Store(&t)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var coarseTime atomic.Value
|
||||
|
6
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
6
vendor/github.com/valyala/fasthttp/compress.go
generated
vendored
@ -152,7 +152,6 @@ func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) {
|
||||
switch w.(type) {
|
||||
case *byteSliceWriter,
|
||||
*bytes.Buffer,
|
||||
*ByteBuffer,
|
||||
*bytebufferpool.ByteBuffer:
|
||||
// These writers don't block, so we can just use stacklessWriteGzip
|
||||
ctx := &compressCtx{
|
||||
@ -249,7 +248,6 @@ func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) {
|
||||
switch w.(type) {
|
||||
case *byteSliceWriter,
|
||||
*bytes.Buffer,
|
||||
*ByteBuffer,
|
||||
*bytebufferpool.ByteBuffer:
|
||||
// These writers don't block, so we can just use stacklessWriteDeflate
|
||||
ctx := &compressCtx{
|
||||
@ -409,7 +407,7 @@ func isFileCompressible(f *os.File, minCompressRatio float64) bool {
|
||||
// Try compressing the first 4kb of of the file
|
||||
// and see if it can be compressed by more than
|
||||
// the given minCompressRatio.
|
||||
b := AcquireByteBuffer()
|
||||
b := bytebufferpool.Get()
|
||||
zw := acquireStacklessGzipWriter(b, CompressDefaultCompression)
|
||||
lr := &io.LimitedReader{
|
||||
R: f,
|
||||
@ -424,7 +422,7 @@ func isFileCompressible(f *os.File, minCompressRatio float64) bool {
|
||||
|
||||
n := 4096 - lr.N
|
||||
zn := len(b.B)
|
||||
ReleaseByteBuffer(b)
|
||||
bytebufferpool.Put(b)
|
||||
return float64(zn) < float64(n)*minCompressRatio
|
||||
}
|
||||
|
||||
|
122
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
122
vendor/github.com/valyala/fasthttp/cookie.go
generated
vendored
@ -52,6 +52,7 @@ type Cookie struct {
|
||||
key []byte
|
||||
value []byte
|
||||
expire time.Time
|
||||
maxAge int
|
||||
domain []byte
|
||||
path []byte
|
||||
|
||||
@ -68,6 +69,7 @@ func (c *Cookie) CopyTo(src *Cookie) {
|
||||
c.key = append(c.key[:0], src.key...)
|
||||
c.value = append(c.value[:0], src.value...)
|
||||
c.expire = src.expire
|
||||
c.maxAge = src.maxAge
|
||||
c.domain = append(c.domain[:0], src.domain...)
|
||||
c.path = append(c.path[:0], src.path...)
|
||||
c.httpOnly = src.httpOnly
|
||||
@ -128,6 +130,20 @@ func (c *Cookie) SetDomainBytes(domain []byte) {
|
||||
c.domain = append(c.domain[:0], domain...)
|
||||
}
|
||||
|
||||
// MaxAge returns the seconds until the cookie is meant to expire or 0
|
||||
// if no max age.
|
||||
func (c *Cookie) MaxAge() int {
|
||||
return c.maxAge
|
||||
}
|
||||
|
||||
// SetMaxAge sets cookie expiration time based on seconds. This takes precedence
|
||||
// over any absolute expiry set on the cookie
|
||||
//
|
||||
// Set max age to 0 to unset
|
||||
func (c *Cookie) SetMaxAge(seconds int) {
|
||||
c.maxAge = seconds
|
||||
}
|
||||
|
||||
// Expire returns cookie expiration time.
|
||||
//
|
||||
// CookieExpireUnlimited is returned if cookie doesn't expire
|
||||
@ -188,6 +204,7 @@ func (c *Cookie) Reset() {
|
||||
c.key = c.key[:0]
|
||||
c.value = c.value[:0]
|
||||
c.expire = zeroTime
|
||||
c.maxAge = 0
|
||||
c.domain = c.domain[:0]
|
||||
c.path = c.path[:0]
|
||||
c.httpOnly = false
|
||||
@ -203,7 +220,12 @@ func (c *Cookie) AppendBytes(dst []byte) []byte {
|
||||
}
|
||||
dst = append(dst, c.value...)
|
||||
|
||||
if !c.expire.IsZero() {
|
||||
if c.maxAge > 0 {
|
||||
dst = append(dst, ';', ' ')
|
||||
dst = append(dst, strCookieMaxAge...)
|
||||
dst = append(dst, '=')
|
||||
dst = AppendUint(dst, c.maxAge)
|
||||
} else if !c.expire.IsZero() {
|
||||
c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire)
|
||||
dst = append(dst, ';', ' ')
|
||||
dst = append(dst, strCookieExpires...)
|
||||
@ -272,29 +294,58 @@ func (c *Cookie) ParseBytes(src []byte) error {
|
||||
c.value = append(c.value[:0], kv.value...)
|
||||
|
||||
for s.next(kv) {
|
||||
if len(kv.key) == 0 && len(kv.value) == 0 {
|
||||
continue
|
||||
}
|
||||
switch string(kv.key) {
|
||||
case "expires":
|
||||
v := b2s(kv.value)
|
||||
exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC)
|
||||
if err != nil {
|
||||
return err
|
||||
if len(kv.key) != 0 {
|
||||
// Case insensitive switch on first char
|
||||
switch kv.key[0] | 0x20 {
|
||||
case 'm':
|
||||
if caseInsensitiveCompare(strCookieMaxAge, kv.key) {
|
||||
maxAge, err := ParseUint(kv.value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.maxAge = maxAge
|
||||
}
|
||||
|
||||
case 'e': // "expires"
|
||||
if caseInsensitiveCompare(strCookieExpires, kv.key) {
|
||||
v := b2s(kv.value)
|
||||
// Try the same two formats as net/http
|
||||
// See: https://github.com/golang/go/blob/00379be17e63a5b75b3237819392d2dc3b313a27/src/net/http/cookie.go#L133-L135
|
||||
exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC)
|
||||
if err != nil {
|
||||
exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.expire = exptime
|
||||
}
|
||||
|
||||
case 'd': // "domain"
|
||||
if caseInsensitiveCompare(strCookieDomain, kv.key) {
|
||||
c.domain = append(c.domain[:0], kv.value...)
|
||||
}
|
||||
|
||||
case 'p': // "path"
|
||||
if caseInsensitiveCompare(strCookiePath, kv.key) {
|
||||
c.path = append(c.path[:0], kv.value...)
|
||||
}
|
||||
}
|
||||
c.expire = exptime
|
||||
case "domain":
|
||||
c.domain = append(c.domain[:0], kv.value...)
|
||||
case "path":
|
||||
c.path = append(c.path[:0], kv.value...)
|
||||
case "":
|
||||
switch string(kv.value) {
|
||||
case "HttpOnly":
|
||||
c.httpOnly = true
|
||||
case "secure":
|
||||
c.secure = true
|
||||
|
||||
} else if len(kv.value) != 0 {
|
||||
// Case insensitive switch on first char
|
||||
switch kv.value[0] | 0x20 {
|
||||
case 'h': // "httponly"
|
||||
if caseInsensitiveCompare(strCookieHTTPOnly, kv.value) {
|
||||
c.httpOnly = true
|
||||
}
|
||||
|
||||
case 's': // "secure"
|
||||
if caseInsensitiveCompare(strCookieSecure, kv.value) {
|
||||
c.secure = true
|
||||
}
|
||||
}
|
||||
}
|
||||
} // else empty or no match
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -329,6 +380,19 @@ func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte {
|
||||
return dst
|
||||
}
|
||||
|
||||
// For Response we can not use the above function as response cookies
|
||||
// already contain the key= in the value.
|
||||
func appendResponseCookieBytes(dst []byte, cookies []argsKV) []byte {
|
||||
for i, n := 0, len(cookies); i < n; i++ {
|
||||
kv := &cookies[i]
|
||||
dst = append(dst, kv.value...)
|
||||
if i+1 < n {
|
||||
dst = append(dst, ';', ' ')
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func parseRequestCookies(cookies []argsKV, src []byte) []argsKV {
|
||||
var s cookieScanner
|
||||
s.b = src
|
||||
@ -394,3 +458,17 @@ func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte {
|
||||
}
|
||||
return append(dst[:0], src...)
|
||||
}
|
||||
|
||||
// caseInsensitiveCompare does a case insensitive equality comparison of
|
||||
// two []byte. Assumes only letters need to be matched.
|
||||
func caseInsensitiveCompare(a, b []byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(a); i++ {
|
||||
if a[i]|0x20 != b[i]|0x20 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
3
vendor/github.com/valyala/fasthttp/doc.go
generated
vendored
3
vendor/github.com/valyala/fasthttp/doc.go
generated
vendored
@ -7,9 +7,6 @@ Fasthttp provides the following features:
|
||||
concurrent keep-alive connections on modern hardware.
|
||||
* Optimized for low memory usage.
|
||||
* Easy 'Connection: Upgrade' support via RequestCtx.Hijack.
|
||||
* Server supports requests' pipelining. Multiple requests may be read from
|
||||
a single network packet and multiple responses may be sent in a single
|
||||
network packet. This may be useful for highly loaded REST services.
|
||||
* Server provides the following anti-DoS limits:
|
||||
|
||||
* The number of concurrent connections.
|
||||
|
2
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
||||
|
||||
// InmemoryListener provides in-memory dialer<->net.Listener implementation.
|
||||
//
|
||||
// It may be used either for fast in-process client<->server communcations
|
||||
// It may be used either for fast in-process client<->server communications
|
||||
// without network stack overhead or for client<->server tests.
|
||||
type InmemoryListener struct {
|
||||
lock sync.Mutex
|
||||
|
2
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewPipeConns returns new bi-directonal connection pipe.
|
||||
// NewPipeConns returns new bi-directional connection pipe.
|
||||
func NewPipeConns() *PipeConns {
|
||||
ch1 := make(chan *byteBuffer, 4)
|
||||
ch2 := make(chan *byteBuffer, 4)
|
||||
|
33
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
33
vendor/github.com/valyala/fasthttp/fs.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
||||
// ServeFileBytesUncompressed returns HTTP response containing file contents
|
||||
@ -139,12 +140,12 @@ func NewVHostPathRewriter(slashesCount int) PathRewriteFunc {
|
||||
if len(host) == 0 {
|
||||
host = strInvalidHost
|
||||
}
|
||||
b := AcquireByteBuffer()
|
||||
b := bytebufferpool.Get()
|
||||
b.B = append(b.B, '/')
|
||||
b.B = append(b.B, host...)
|
||||
b.B = append(b.B, path...)
|
||||
ctx.URI().SetPathBytes(b.B)
|
||||
ReleaseByteBuffer(b)
|
||||
bytebufferpool.Put(b)
|
||||
|
||||
return ctx.Path()
|
||||
}
|
||||
@ -225,7 +226,7 @@ type FS struct {
|
||||
// It adds CompressedFileSuffix suffix to the original file name and
|
||||
// tries saving the resulting compressed file under the new file name.
|
||||
// So it is advisable to give the server write access to Root
|
||||
// and to all inner folders in order to minimze CPU usage when serving
|
||||
// and to all inner folders in order to minimize CPU usage when serving
|
||||
// compressed responses.
|
||||
//
|
||||
// Transparent compression is disabled by default.
|
||||
@ -241,6 +242,14 @@ type FS struct {
|
||||
// By default request path is not modified.
|
||||
PathRewrite PathRewriteFunc
|
||||
|
||||
// PathNotFound fires when file is not found in filesystem
|
||||
// this functions tries to replace "Cannot open requested path"
|
||||
// server response giving to the programmer the control of server flow.
|
||||
//
|
||||
// By default PathNotFound returns
|
||||
// "Cannot open requested path"
|
||||
PathNotFound RequestHandler
|
||||
|
||||
// Expiration duration for inactive file handlers.
|
||||
//
|
||||
// FSHandlerCacheDuration is used by default.
|
||||
@ -343,6 +352,7 @@ func (fs *FS) initRequestHandler() {
|
||||
pathRewrite: fs.PathRewrite,
|
||||
generateIndexPages: fs.GenerateIndexPages,
|
||||
compress: fs.Compress,
|
||||
pathNotFound: fs.PathNotFound,
|
||||
acceptByteRange: fs.AcceptByteRange,
|
||||
cacheDuration: cacheDuration,
|
||||
compressedFileSuffix: compressedFileSuffix,
|
||||
@ -365,6 +375,7 @@ type fsHandler struct {
|
||||
root string
|
||||
indexNames []string
|
||||
pathRewrite PathRewriteFunc
|
||||
pathNotFound RequestHandler
|
||||
generateIndexPages bool
|
||||
compress bool
|
||||
acceptByteRange bool
|
||||
@ -726,7 +737,12 @@ func (h *fsHandler) handleRequest(ctx *RequestCtx) {
|
||||
}
|
||||
} else if err != nil {
|
||||
ctx.Logger().Printf("cannot open file %q: %s", filePath, err)
|
||||
ctx.Error("Cannot open requested path", StatusNotFound)
|
||||
if h.pathNotFound == nil {
|
||||
ctx.Error("Cannot open requested path", StatusNotFound)
|
||||
} else {
|
||||
ctx.SetStatusCode(StatusNotFound)
|
||||
h.pathNotFound(ctx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -808,7 +824,10 @@ func (h *fsHandler) handleRequest(ctx *RequestCtx) {
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.SetContentType(ff.contentType)
|
||||
hdr.noDefaultContentType = true
|
||||
if len(hdr.ContentType()) == 0 {
|
||||
ctx.SetContentType(ff.contentType)
|
||||
}
|
||||
ctx.SetStatusCode(statusCode)
|
||||
}
|
||||
|
||||
@ -897,7 +916,7 @@ var (
|
||||
)
|
||||
|
||||
func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool) (*fsFile, error) {
|
||||
w := &ByteBuffer{}
|
||||
w := &bytebufferpool.ByteBuffer{}
|
||||
|
||||
basePathEscaped := html.EscapeString(string(base.Path()))
|
||||
fmt.Fprintf(w, "<html><head><title>%s</title><style>.dir { font-weight: bold }</style></head><body>", basePathEscaped)
|
||||
@ -957,7 +976,7 @@ func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool)
|
||||
fmt.Fprintf(w, "</ul></body></html>")
|
||||
|
||||
if mustCompress {
|
||||
var zbuf ByteBuffer
|
||||
var zbuf bytebufferpool.ByteBuffer
|
||||
zbuf.B = AppendGzipBytesLevel(zbuf.B, w.B, CompressDefaultCompression)
|
||||
w = &zbuf
|
||||
}
|
||||
|
9
vendor/github.com/valyala/fasthttp/go.mod
generated
vendored
Normal file
9
vendor/github.com/valyala/fasthttp/go.mod
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
module github.com/valyala/fasthttp
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.4.0
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3
|
||||
)
|
10
vendor/github.com/valyala/fasthttp/go.sum
generated
vendored
Normal file
10
vendor/github.com/valyala/fasthttp/go.sum
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3 h1:czFLhve3vsQetD6JOJ8NZZvGQIXlnN3/yXxbT6/awxI=
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
240
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
240
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
@ -20,9 +20,10 @@ import (
|
||||
type ResponseHeader struct {
|
||||
noCopy noCopy
|
||||
|
||||
disableNormalizing bool
|
||||
noHTTP11 bool
|
||||
connectionClose bool
|
||||
disableNormalizing bool
|
||||
noHTTP11 bool
|
||||
connectionClose bool
|
||||
noDefaultContentType bool
|
||||
|
||||
statusCode int
|
||||
contentLength int
|
||||
@ -50,7 +51,6 @@ type RequestHeader struct {
|
||||
disableNormalizing bool
|
||||
noHTTP11 bool
|
||||
connectionClose bool
|
||||
isGet bool
|
||||
|
||||
// These two fields have been moved close to other bool fields
|
||||
// for reducing RequestHeader object size.
|
||||
@ -157,12 +157,6 @@ func (h *RequestHeader) ConnectionClose() bool {
|
||||
return h.connectionClose
|
||||
}
|
||||
|
||||
func (h *RequestHeader) connectionCloseFast() bool {
|
||||
// h.parseRawHeaders() isn't called for performance reasons.
|
||||
// Use ConnectionClose for triggering raw headers parsing.
|
||||
return h.connectionClose
|
||||
}
|
||||
|
||||
// SetConnectionClose sets 'Connection: close' header.
|
||||
func (h *RequestHeader) SetConnectionClose() {
|
||||
// h.parseRawHeaders() isn't called for performance reasons.
|
||||
@ -189,6 +183,11 @@ func (h *RequestHeader) ConnectionUpgrade() bool {
|
||||
return hasHeaderValue(h.Peek("Connection"), strUpgrade)
|
||||
}
|
||||
|
||||
// PeekCookie is able to returns cookie by a given key from response.
|
||||
func (h *ResponseHeader) PeekCookie(key string) []byte {
|
||||
return peekArgStr(h.cookies, key)
|
||||
}
|
||||
|
||||
// ContentLength returns Content-Length header value.
|
||||
//
|
||||
// It may be negative:
|
||||
@ -241,9 +240,15 @@ func (h *ResponseHeader) mustSkipContentLength() bool {
|
||||
// It may be negative:
|
||||
// -1 means Transfer-Encoding: chunked.
|
||||
func (h *RequestHeader) ContentLength() int {
|
||||
if h.noBody() {
|
||||
if h.ignoreBody() {
|
||||
return 0
|
||||
}
|
||||
return h.realContentLength()
|
||||
}
|
||||
|
||||
// realContentLength returns the actual Content-Length set in the request,
|
||||
// including positive lengths for GET/HEAD requests.
|
||||
func (h *RequestHeader) realContentLength() int {
|
||||
h.parseRawHeaders()
|
||||
return h.contentLength
|
||||
}
|
||||
@ -272,7 +277,7 @@ func (h *ResponseHeader) isCompressibleContentType() bool {
|
||||
// ContentType returns Content-Type header value.
|
||||
func (h *ResponseHeader) ContentType() []byte {
|
||||
contentType := h.contentType
|
||||
if len(h.contentType) == 0 {
|
||||
if !h.noDefaultContentType && len(h.contentType) == 0 {
|
||||
contentType = defaultContentType
|
||||
}
|
||||
return contentType
|
||||
@ -504,14 +509,10 @@ func (h *RequestHeader) SetRequestURIBytes(requestURI []byte) {
|
||||
|
||||
// IsGet returns true if request method is GET.
|
||||
func (h *RequestHeader) IsGet() bool {
|
||||
// Optimize fast path for GET requests.
|
||||
if !h.isGet {
|
||||
h.isGet = bytes.Equal(h.Method(), strGet)
|
||||
}
|
||||
return h.isGet
|
||||
return bytes.Equal(h.Method(), strGet)
|
||||
}
|
||||
|
||||
// IsPost returns true if request methos is POST.
|
||||
// IsPost returns true if request method is POST.
|
||||
func (h *RequestHeader) IsPost() bool {
|
||||
return bytes.Equal(h.Method(), strPost)
|
||||
}
|
||||
@ -523,10 +524,6 @@ func (h *RequestHeader) IsPut() bool {
|
||||
|
||||
// IsHead returns true if request method is HEAD.
|
||||
func (h *RequestHeader) IsHead() bool {
|
||||
// Fast path
|
||||
if h.isGet {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(h.Method(), strHead)
|
||||
}
|
||||
|
||||
@ -535,6 +532,26 @@ func (h *RequestHeader) IsDelete() bool {
|
||||
return bytes.Equal(h.Method(), strDelete)
|
||||
}
|
||||
|
||||
// IsConnect returns true if request method is CONNECT.
|
||||
func (h *RequestHeader) IsConnect() bool {
|
||||
return bytes.Equal(h.Method(), strConnect)
|
||||
}
|
||||
|
||||
// IsOptions returns true if request method is OPTIONS.
|
||||
func (h *RequestHeader) IsOptions() bool {
|
||||
return bytes.Equal(h.Method(), strOptions)
|
||||
}
|
||||
|
||||
// IsTrace returns true if request method is TRACE.
|
||||
func (h *RequestHeader) IsTrace() bool {
|
||||
return bytes.Equal(h.Method(), strTrace)
|
||||
}
|
||||
|
||||
// IsPatch returns true if request method is PATCH.
|
||||
func (h *RequestHeader) IsPatch() bool {
|
||||
return bytes.Equal(h.Method(), strPatch)
|
||||
}
|
||||
|
||||
// IsHTTP11 returns true if the request is HTTP/1.1.
|
||||
func (h *RequestHeader) IsHTTP11() bool {
|
||||
return !h.noHTTP11
|
||||
@ -621,6 +638,7 @@ func (h *ResponseHeader) DisableNormalizing() {
|
||||
// Reset clears response header.
|
||||
func (h *ResponseHeader) Reset() {
|
||||
h.disableNormalizing = false
|
||||
h.noDefaultContentType = false
|
||||
h.resetSkipNormalize()
|
||||
}
|
||||
|
||||
@ -648,7 +666,6 @@ func (h *RequestHeader) Reset() {
|
||||
func (h *RequestHeader) resetSkipNormalize() {
|
||||
h.noHTTP11 = false
|
||||
h.connectionClose = false
|
||||
h.isGet = false
|
||||
|
||||
h.contentLength = 0
|
||||
h.contentLengthBytes = h.contentLengthBytes[:0]
|
||||
@ -674,6 +691,7 @@ func (h *ResponseHeader) CopyTo(dst *ResponseHeader) {
|
||||
dst.disableNormalizing = h.disableNormalizing
|
||||
dst.noHTTP11 = h.noHTTP11
|
||||
dst.connectionClose = h.connectionClose
|
||||
dst.noDefaultContentType = h.noDefaultContentType
|
||||
|
||||
dst.statusCode = h.statusCode
|
||||
dst.contentLength = h.contentLength
|
||||
@ -691,7 +709,6 @@ func (h *RequestHeader) CopyTo(dst *RequestHeader) {
|
||||
dst.disableNormalizing = h.disableNormalizing
|
||||
dst.noHTTP11 = h.noHTTP11
|
||||
dst.connectionClose = h.connectionClose
|
||||
dst.isGet = h.isGet
|
||||
|
||||
dst.contentLength = h.contentLength
|
||||
dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...)
|
||||
@ -1185,6 +1202,8 @@ func (h *ResponseHeader) peek(key []byte) []byte {
|
||||
return peekArgBytes(h.h, key)
|
||||
case "Content-Length":
|
||||
return h.contentLengthBytes
|
||||
case "Set-Cookie":
|
||||
return appendResponseCookieBytes(nil, h.cookies)
|
||||
default:
|
||||
return peekArgBytes(h.h, key)
|
||||
}
|
||||
@ -1206,6 +1225,12 @@ func (h *RequestHeader) peek(key []byte) []byte {
|
||||
return peekArgBytes(h.h, key)
|
||||
case "Content-Length":
|
||||
return h.contentLengthBytes
|
||||
case "Cookie":
|
||||
if h.cookiesCollected {
|
||||
return appendRequestCookieBytes(nil, h.cookies)
|
||||
} else {
|
||||
return peekArgBytes(h.h, key)
|
||||
}
|
||||
default:
|
||||
return peekArgBytes(h.h, key)
|
||||
}
|
||||
@ -1330,9 +1355,12 @@ func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error {
|
||||
h.resetSkipNormalize()
|
||||
b, err := r.Peek(n)
|
||||
if len(b) == 0 {
|
||||
// treat all errors on the first byte read as EOF
|
||||
if n == 1 || err == io.EOF {
|
||||
return io.EOF
|
||||
if err == io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
panic("bufio.Reader.Peek() returned nil, nil")
|
||||
}
|
||||
|
||||
// This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 .
|
||||
@ -1431,10 +1459,9 @@ func (h *ResponseHeader) AppendBytes(dst []byte) []byte {
|
||||
dst = append(dst, statusLine(statusCode)...)
|
||||
|
||||
server := h.Server()
|
||||
if len(server) == 0 {
|
||||
server = defaultServerName
|
||||
if len(server) != 0 {
|
||||
dst = appendHeaderLine(dst, strServer, server)
|
||||
}
|
||||
dst = appendHeaderLine(dst, strServer, server)
|
||||
dst = appendHeaderLine(dst, strDate, serverDate.Load().([]byte))
|
||||
|
||||
// Append Content-Type only for non-zero responses
|
||||
@ -1524,7 +1551,7 @@ func (h *RequestHeader) AppendBytes(dst []byte) []byte {
|
||||
}
|
||||
|
||||
contentType := h.ContentType()
|
||||
if !h.noBody() {
|
||||
if !h.ignoreBody() {
|
||||
if len(contentType) == 0 {
|
||||
contentType = strPostArgsContentType
|
||||
}
|
||||
@ -1578,7 +1605,7 @@ func (h *ResponseHeader) parse(buf []byte) (int, error) {
|
||||
return m + n, nil
|
||||
}
|
||||
|
||||
func (h *RequestHeader) noBody() bool {
|
||||
func (h *RequestHeader) ignoreBody() bool {
|
||||
return h.IsGet() || h.IsHead()
|
||||
}
|
||||
|
||||
@ -1589,7 +1616,7 @@ func (h *RequestHeader) parse(buf []byte) (int, error) {
|
||||
}
|
||||
|
||||
var n int
|
||||
if !h.noBody() || h.noHTTP11 {
|
||||
if !h.ignoreBody() || h.noHTTP11 {
|
||||
n, err = h.parseHeaders(buf[m:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -1738,36 +1765,52 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) {
|
||||
var err error
|
||||
var kv *argsKV
|
||||
for s.next() {
|
||||
switch string(s.key) {
|
||||
case "Content-Type":
|
||||
h.contentType = append(h.contentType[:0], s.value...)
|
||||
case "Server":
|
||||
h.server = append(h.server[:0], s.value...)
|
||||
case "Content-Length":
|
||||
if h.contentLength != -1 {
|
||||
if h.contentLength, err = parseContentLength(s.value); err != nil {
|
||||
h.contentLength = -2
|
||||
} else {
|
||||
h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...)
|
||||
if len(s.key) > 0 {
|
||||
switch s.key[0] | 0x20 {
|
||||
case 'c':
|
||||
if caseInsensitiveCompare(s.key, strContentType) {
|
||||
h.contentType = append(h.contentType[:0], s.value...)
|
||||
continue
|
||||
}
|
||||
if caseInsensitiveCompare(s.key, strContentLength) {
|
||||
if h.contentLength != -1 {
|
||||
if h.contentLength, err = parseContentLength(s.value); err != nil {
|
||||
h.contentLength = -2
|
||||
} else {
|
||||
h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if caseInsensitiveCompare(s.key, strConnection) {
|
||||
if bytes.Equal(s.value, strClose) {
|
||||
h.connectionClose = true
|
||||
} else {
|
||||
h.connectionClose = false
|
||||
h.h = appendArgBytes(h.h, s.key, s.value)
|
||||
}
|
||||
continue
|
||||
}
|
||||
case 's':
|
||||
if caseInsensitiveCompare(s.key, strServer) {
|
||||
h.server = append(h.server[:0], s.value...)
|
||||
continue
|
||||
}
|
||||
if caseInsensitiveCompare(s.key, strSetCookie) {
|
||||
h.cookies, kv = allocArg(h.cookies)
|
||||
kv.key = getCookieKey(kv.key, s.value)
|
||||
kv.value = append(kv.value[:0], s.value...)
|
||||
continue
|
||||
}
|
||||
case 't':
|
||||
if caseInsensitiveCompare(s.key, strTransferEncoding) {
|
||||
if !bytes.Equal(s.value, strIdentity) {
|
||||
h.contentLength = -1
|
||||
h.h = setArgBytes(h.h, strTransferEncoding, strChunked)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
case "Transfer-Encoding":
|
||||
if !bytes.Equal(s.value, strIdentity) {
|
||||
h.contentLength = -1
|
||||
h.h = setArgBytes(h.h, strTransferEncoding, strChunked)
|
||||
}
|
||||
case "Set-Cookie":
|
||||
h.cookies, kv = allocArg(h.cookies)
|
||||
kv.key = getCookieKey(kv.key, s.value)
|
||||
kv.value = append(kv.value[:0], s.value...)
|
||||
case "Connection":
|
||||
if bytes.Equal(s.value, strClose) {
|
||||
h.connectionClose = true
|
||||
} else {
|
||||
h.connectionClose = false
|
||||
h.h = appendArgBytes(h.h, s.key, s.value)
|
||||
}
|
||||
default:
|
||||
h.h = appendArgBytes(h.h, s.key, s.value)
|
||||
}
|
||||
}
|
||||
@ -1800,36 +1843,53 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) {
|
||||
s.disableNormalizing = h.disableNormalizing
|
||||
var err error
|
||||
for s.next() {
|
||||
switch string(s.key) {
|
||||
case "Host":
|
||||
h.host = append(h.host[:0], s.value...)
|
||||
case "User-Agent":
|
||||
h.userAgent = append(h.userAgent[:0], s.value...)
|
||||
case "Content-Type":
|
||||
h.contentType = append(h.contentType[:0], s.value...)
|
||||
case "Content-Length":
|
||||
if h.contentLength != -1 {
|
||||
if h.contentLength, err = parseContentLength(s.value); err != nil {
|
||||
h.contentLength = -2
|
||||
} else {
|
||||
h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...)
|
||||
if len(s.key) > 0 {
|
||||
switch s.key[0] | 0x20 {
|
||||
case 'h':
|
||||
if caseInsensitiveCompare(s.key, strHost) {
|
||||
h.host = append(h.host[:0], s.value...)
|
||||
continue
|
||||
}
|
||||
case 'u':
|
||||
if caseInsensitiveCompare(s.key, strUserAgent) {
|
||||
h.userAgent = append(h.userAgent[:0], s.value...)
|
||||
continue
|
||||
}
|
||||
case 'c':
|
||||
if caseInsensitiveCompare(s.key, strContentType) {
|
||||
h.contentType = append(h.contentType[:0], s.value...)
|
||||
continue
|
||||
}
|
||||
if caseInsensitiveCompare(s.key, strContentLength) {
|
||||
if h.contentLength != -1 {
|
||||
if h.contentLength, err = parseContentLength(s.value); err != nil {
|
||||
h.contentLength = -2
|
||||
} else {
|
||||
h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if caseInsensitiveCompare(s.key, strConnection) {
|
||||
if bytes.Equal(s.value, strClose) {
|
||||
h.connectionClose = true
|
||||
} else {
|
||||
h.connectionClose = false
|
||||
h.h = appendArgBytes(h.h, s.key, s.value)
|
||||
}
|
||||
continue
|
||||
}
|
||||
case 't':
|
||||
if caseInsensitiveCompare(s.key, strTransferEncoding) {
|
||||
if !bytes.Equal(s.value, strIdentity) {
|
||||
h.contentLength = -1
|
||||
h.h = setArgBytes(h.h, strTransferEncoding, strChunked)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
case "Transfer-Encoding":
|
||||
if !bytes.Equal(s.value, strIdentity) {
|
||||
h.contentLength = -1
|
||||
h.h = setArgBytes(h.h, strTransferEncoding, strChunked)
|
||||
}
|
||||
case "Connection":
|
||||
if bytes.Equal(s.value, strClose) {
|
||||
h.connectionClose = true
|
||||
} else {
|
||||
h.connectionClose = false
|
||||
h.h = appendArgBytes(h.h, s.key, s.value)
|
||||
}
|
||||
default:
|
||||
h.h = appendArgBytes(h.h, s.key, s.value)
|
||||
}
|
||||
h.h = appendArgBytes(h.h, s.key, s.value)
|
||||
}
|
||||
if s.err != nil {
|
||||
h.connectionClose = true
|
||||
@ -1839,10 +1899,6 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) {
|
||||
if h.contentLength < 0 {
|
||||
h.contentLengthBytes = h.contentLengthBytes[:0]
|
||||
}
|
||||
if h.noBody() {
|
||||
h.contentLength = 0
|
||||
h.contentLengthBytes = h.contentLengthBytes[:0]
|
||||
}
|
||||
if h.noHTTP11 && !h.connectionClose {
|
||||
// close connection for non-http/1.1 request unless 'Connection: keep-alive' is set.
|
||||
v := peekArgBytes(h.h, strConnection)
|
||||
|
44
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
44
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
@ -346,7 +346,7 @@ func (resp *Response) BodyGunzip() ([]byte, error) {
|
||||
}
|
||||
|
||||
func gunzipData(p []byte) ([]byte, error) {
|
||||
var bb ByteBuffer
|
||||
var bb bytebufferpool.ByteBuffer
|
||||
_, err := WriteGunzip(&bb, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -373,7 +373,7 @@ func (resp *Response) BodyInflate() ([]byte, error) {
|
||||
}
|
||||
|
||||
func inflateData(p []byte) ([]byte, error) {
|
||||
var bb ByteBuffer
|
||||
var bb bytebufferpool.ByteBuffer
|
||||
_, err := WriteInflate(&bb, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -711,7 +711,7 @@ func (req *Request) MultipartForm() (*multipart.Form, error) {
|
||||
}
|
||||
|
||||
func marshalMultipartForm(f *multipart.Form, boundary string) ([]byte, error) {
|
||||
var buf ByteBuffer
|
||||
var buf bytebufferpool.ByteBuffer
|
||||
if err := WriteMultipartForm(&buf, f, boundary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -722,7 +722,7 @@ func marshalMultipartForm(f *multipart.Form, boundary string) ([]byte, error) {
|
||||
// boundary to w.
|
||||
func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error {
|
||||
// Do not care about memory allocations here, since multipart
|
||||
// form processing is slooow.
|
||||
// form processing is slow.
|
||||
if len(boundary) == 0 {
|
||||
panic("BUG: form boundary cannot be empty")
|
||||
}
|
||||
@ -881,10 +881,6 @@ func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool
|
||||
return errGetOnly
|
||||
}
|
||||
|
||||
if req.Header.noBody() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if req.MayContinue() {
|
||||
// 'Expect: 100-continue' header found. Let the caller deciding
|
||||
// whether to read request body or
|
||||
@ -918,7 +914,7 @@ func (req *Request) MayContinue() bool {
|
||||
// then ErrBodyTooLarge is returned.
|
||||
func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int) error {
|
||||
var err error
|
||||
contentLength := req.Header.ContentLength()
|
||||
contentLength := req.Header.realContentLength()
|
||||
if contentLength > 0 {
|
||||
if maxBodySize > 0 && contentLength > maxBodySize {
|
||||
return ErrBodyTooLarge
|
||||
@ -1109,8 +1105,11 @@ func (req *Request) Write(w *bufio.Writer) error {
|
||||
req.Header.SetMultipartFormBoundary(req.multipartFormBoundary)
|
||||
}
|
||||
|
||||
hasBody := !req.Header.noBody()
|
||||
hasBody := !req.Header.ignoreBody()
|
||||
if hasBody {
|
||||
if len(body) == 0 {
|
||||
body = req.postArgs.QueryString()
|
||||
}
|
||||
req.Header.SetContentLength(len(body))
|
||||
}
|
||||
if err = req.Header.Write(w); err != nil {
|
||||
@ -1458,7 +1457,7 @@ func (resp *Response) String() string {
|
||||
}
|
||||
|
||||
func getHTTPString(hw httpWriter) string {
|
||||
w := AcquireByteBuffer()
|
||||
w := bytebufferpool.Get()
|
||||
bw := bufio.NewWriter(w)
|
||||
if err := hw.Write(bw); err != nil {
|
||||
return err.Error()
|
||||
@ -1467,7 +1466,7 @@ func getHTTPString(hw httpWriter) string {
|
||||
return err.Error()
|
||||
}
|
||||
s := string(w.B)
|
||||
ReleaseByteBuffer(w)
|
||||
bytebufferpool.Put(w)
|
||||
return s
|
||||
}
|
||||
|
||||
@ -1683,14 +1682,21 @@ func parseChunkSize(r *bufio.Reader) (int, error) {
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
for {
|
||||
c, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err)
|
||||
}
|
||||
// Skip any trailing whitespace after chunk size.
|
||||
if c == ' ' {
|
||||
continue
|
||||
}
|
||||
if c != '\r' {
|
||||
return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r')
|
||||
}
|
||||
break
|
||||
}
|
||||
c, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err)
|
||||
}
|
||||
if c != '\r' {
|
||||
return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r')
|
||||
}
|
||||
c, err = r.ReadByte()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err)
|
||||
}
|
||||
|
2
vendor/github.com/valyala/fasthttp/lbclient.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/lbclient.go
generated
vendored
@ -59,7 +59,7 @@ type LBClient struct {
|
||||
// DefaultLBClientTimeout is the default request timeout used by LBClient
|
||||
// when calling LBClient.Do.
|
||||
//
|
||||
// The timeout may be overriden via LBClient.Timeout.
|
||||
// The timeout may be overridden via LBClient.Timeout.
|
||||
const DefaultLBClientTimeout = time.Second
|
||||
|
||||
// DoDeadline calls DoDeadline on the least loaded client
|
||||
|
21
vendor/github.com/valyala/fasthttp/reuseport/LICENSE
generated
vendored
21
vendor/github.com/valyala/fasthttp/reuseport/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Max Riveiro
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
452
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
452
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
@ -16,6 +16,14 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var errNoCertOrKeyProvided = errors.New("Cert or key has not provided")
|
||||
|
||||
var (
|
||||
// ErrAlreadyServing is returned when calling Serve on a Server
|
||||
// that is already serving connections.
|
||||
ErrAlreadyServing = errors.New("Server is already serving connections")
|
||||
)
|
||||
|
||||
// ServeConn serves HTTP requests from the given connection
|
||||
// using the given handler.
|
||||
//
|
||||
@ -211,6 +219,18 @@ type Server struct {
|
||||
// By default keep-alive connection lifetime is unlimited.
|
||||
MaxKeepaliveDuration time.Duration
|
||||
|
||||
// Whether to enable tcp keep-alive connections.
|
||||
//
|
||||
// Whether the operating system should send tcp keep-alive messages on the tcp connection.
|
||||
//
|
||||
// By default tcp keep-alive connections are disabled.
|
||||
TCPKeepalive bool
|
||||
|
||||
// Period between tcp keep-alive messages.
|
||||
//
|
||||
// TCP keep-alive period is determined by operation system by default.
|
||||
TCPKeepalivePeriod time.Duration
|
||||
|
||||
// Maximum request body size.
|
||||
//
|
||||
// The server rejects requests with bodies exceeding this limit.
|
||||
@ -265,11 +285,34 @@ type Server struct {
|
||||
// * cONTENT-lenGTH -> Content-Length
|
||||
DisableHeaderNamesNormalizing bool
|
||||
|
||||
// NoDefaultServerHeader, when set to true, causes the default Server header
|
||||
// to be excluded from the Response.
|
||||
//
|
||||
// The default Server header value is the value of the Name field or an
|
||||
// internal default value in its absence. With this option set to true,
|
||||
// the only time a Server header will be sent is if a non-zero length
|
||||
// value is explicitly provided during a request.
|
||||
NoDefaultServerHeader bool
|
||||
|
||||
// NoDefaultContentType, when set to true, causes the default Content-Type
|
||||
// header to be excluded from the Response.
|
||||
//
|
||||
// The default Content-Type header value is the internal default value. When
|
||||
// set to true, the Content-Type will not be present.
|
||||
NoDefaultContentType bool
|
||||
|
||||
// ConnState specifies an optional callback function that is
|
||||
// called when a client connection changes state. See the
|
||||
// ConnState type and associated constants for details.
|
||||
ConnState func(net.Conn, ConnState)
|
||||
|
||||
// Logger, which is used by RequestCtx.Logger().
|
||||
//
|
||||
// By default standard logger from log package is used.
|
||||
Logger Logger
|
||||
|
||||
tlsConfig *tls.Config
|
||||
|
||||
concurrency uint32
|
||||
concurrencyCh chan struct{}
|
||||
perIPConnCounter perIPConnCounter
|
||||
@ -280,6 +323,13 @@ type Server struct {
|
||||
writerPool sync.Pool
|
||||
hijackConnPool sync.Pool
|
||||
bytePool sync.Pool
|
||||
|
||||
// We need to know our listener so we can close it in Shutdown().
|
||||
ln net.Listener
|
||||
|
||||
mu sync.Mutex
|
||||
open int32
|
||||
stop int32
|
||||
}
|
||||
|
||||
// TimeoutHandler creates RequestHandler, which returns StatusRequestTimeout
|
||||
@ -344,12 +394,6 @@ func CompressHandler(h RequestHandler) RequestHandler {
|
||||
func CompressHandlerLevel(h RequestHandler, level int) RequestHandler {
|
||||
return func(ctx *RequestCtx) {
|
||||
h(ctx)
|
||||
ce := ctx.Response.Header.PeekBytes(strContentEncoding)
|
||||
if len(ce) > 0 {
|
||||
// Do not compress responses with non-empty
|
||||
// Content-Encoding.
|
||||
return
|
||||
}
|
||||
if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) {
|
||||
ctx.Response.gzipBody(level)
|
||||
} else if ctx.Request.Header.HasAcceptEncodingBytes(strDeflate) {
|
||||
@ -598,18 +642,13 @@ func (ctx *RequestCtx) ConnID() uint64 {
|
||||
return ctx.connID
|
||||
}
|
||||
|
||||
// Time returns RequestHandler call time truncated to the nearest second.
|
||||
//
|
||||
// Call time.Now() at the beginning of RequestHandler in order to obtain
|
||||
// percise RequestHandler call time.
|
||||
// Time returns RequestHandler call time.
|
||||
func (ctx *RequestCtx) Time() time.Time {
|
||||
return ctx.time
|
||||
}
|
||||
|
||||
// ConnTime returns the time server starts serving the connection
|
||||
// ConnTime returns the time the server started serving the connection
|
||||
// the current request came from.
|
||||
//
|
||||
// The returned time is truncated to the nearest second.
|
||||
func (ctx *RequestCtx) ConnTime() time.Time {
|
||||
return ctx.connTime
|
||||
}
|
||||
@ -758,12 +797,28 @@ func SaveMultipartFile(fh *multipart.FileHeader, path string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if ff, ok := f.(*os.File); ok {
|
||||
return os.Rename(ff.Name(), path)
|
||||
// Windows can't rename files that are opened.
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If renaming fails we try the normal copying method.
|
||||
// Renaming could fail if the files are on different devices.
|
||||
if os.Rename(ff.Name(), path) == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reopen f for the code below.
|
||||
f, err = fh.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
ff, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -827,6 +882,26 @@ func (ctx *RequestCtx) IsDelete() bool {
|
||||
return ctx.Request.Header.IsDelete()
|
||||
}
|
||||
|
||||
// IsConnect returns true if request method is CONNECT.
|
||||
func (ctx *RequestCtx) IsConnect() bool {
|
||||
return ctx.Request.Header.IsConnect()
|
||||
}
|
||||
|
||||
// IsOptions returns true if request method is OPTIONS.
|
||||
func (ctx *RequestCtx) IsOptions() bool {
|
||||
return ctx.Request.Header.IsOptions()
|
||||
}
|
||||
|
||||
// IsTrace returns true if request method is TRACE.
|
||||
func (ctx *RequestCtx) IsTrace() bool {
|
||||
return ctx.Request.Header.IsTrace()
|
||||
}
|
||||
|
||||
// IsPatch returns true if request method is PATCH.
|
||||
func (ctx *RequestCtx) IsPatch() bool {
|
||||
return ctx.Request.Header.IsPatch()
|
||||
}
|
||||
|
||||
// Method return request method.
|
||||
//
|
||||
// Returned value is valid until returning from RequestHandler.
|
||||
@ -922,7 +997,13 @@ func (ctx *RequestCtx) SuccessString(contentType, body string) {
|
||||
// All other statusCode values are replaced by StatusFound (302).
|
||||
//
|
||||
// The redirect uri may be either absolute or relative to the current
|
||||
// request uri.
|
||||
// request uri. Fasthttp will always send an absolute uri back to the client.
|
||||
// To send a relative uri you can use the following code:
|
||||
//
|
||||
// strLocation = []byte("Location") // Put this with your top level var () declarations.
|
||||
// ctx.Response.Header.SetCanonical(strLocation, "/relative?uri")
|
||||
// ctx.Response.SetStatusCode(fasthttp.StatusMovedPermanently)
|
||||
//
|
||||
func (ctx *RequestCtx) Redirect(uri string, statusCode int) {
|
||||
u := AcquireURI()
|
||||
ctx.URI().CopyTo(u)
|
||||
@ -944,7 +1025,13 @@ func (ctx *RequestCtx) Redirect(uri string, statusCode int) {
|
||||
// All other statusCode values are replaced by StatusFound (302).
|
||||
//
|
||||
// The redirect uri may be either absolute or relative to the current
|
||||
// request uri.
|
||||
// request uri. Fasthttp will always send an absolute uri back to the client.
|
||||
// To send a relative uri you can use the following code:
|
||||
//
|
||||
// strLocation = []byte("Location") // Put this with your top level var () declarations.
|
||||
// ctx.Response.Header.SetCanonical(strLocation, "/relative?uri")
|
||||
// ctx.Response.SetStatusCode(fasthttp.StatusMovedPermanently)
|
||||
//
|
||||
func (ctx *RequestCtx) RedirectBytes(uri []byte, statusCode int) {
|
||||
s := b2s(uri)
|
||||
ctx.Redirect(s, statusCode)
|
||||
@ -1155,15 +1242,46 @@ func (ctx *RequestCtx) TimeoutErrorWithResponse(resp *Response) {
|
||||
ctx.timeoutResponse = respCopy
|
||||
}
|
||||
|
||||
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
|
||||
// connections. It's used by ListenAndServe, ListenAndServeTLS and
|
||||
// ListenAndServeTLSEmbed so dead TCP connections (e.g. closing laptop mid-download)
|
||||
// eventually go away.
|
||||
type tcpKeepaliveListener struct {
|
||||
*net.TCPListener
|
||||
keepalivePeriod time.Duration
|
||||
}
|
||||
|
||||
func (ln tcpKeepaliveListener) Accept() (net.Conn, error) {
|
||||
tc, err := ln.AcceptTCP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tc.SetKeepAlive(true)
|
||||
if ln.keepalivePeriod > 0 {
|
||||
tc.SetKeepAlivePeriod(ln.keepalivePeriod)
|
||||
}
|
||||
return tc, nil
|
||||
}
|
||||
|
||||
// ListenAndServe serves HTTP requests from the given TCP4 addr.
|
||||
//
|
||||
// Pass custom listener to Serve if you need listening on non-TCP4 media
|
||||
// such as IPv6.
|
||||
//
|
||||
// Accepted connections are configured to enable TCP keep-alives.
|
||||
func (s *Server) ListenAndServe(addr string) error {
|
||||
ln, err := net.Listen("tcp4", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.TCPKeepalive {
|
||||
if tcpln, ok := ln.(*net.TCPListener); ok {
|
||||
return s.Serve(tcpKeepaliveListener{
|
||||
TCPListener: tcpln,
|
||||
keepalivePeriod: s.TCPKeepalivePeriod,
|
||||
})
|
||||
}
|
||||
}
|
||||
return s.Serve(ln)
|
||||
}
|
||||
|
||||
@ -1192,11 +1310,24 @@ func (s *Server) ListenAndServeUNIX(addr string, mode os.FileMode) error {
|
||||
//
|
||||
// Pass custom listener to Serve if you need listening on non-TCP4 media
|
||||
// such as IPv6.
|
||||
//
|
||||
// If the certFile or keyFile has not been provided to the server structure,
|
||||
// the function will use the previously added TLS configuration.
|
||||
//
|
||||
// Accepted connections are configured to enable TCP keep-alives.
|
||||
func (s *Server) ListenAndServeTLS(addr, certFile, keyFile string) error {
|
||||
ln, err := net.Listen("tcp4", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.TCPKeepalive {
|
||||
if tcpln, ok := ln.(*net.TCPListener); ok {
|
||||
return s.ServeTLS(tcpKeepaliveListener{
|
||||
TCPListener: tcpln,
|
||||
keepalivePeriod: s.TCPKeepalivePeriod,
|
||||
}, certFile, keyFile)
|
||||
}
|
||||
}
|
||||
return s.ServeTLS(ln, certFile, keyFile)
|
||||
}
|
||||
|
||||
@ -1206,59 +1337,117 @@ func (s *Server) ListenAndServeTLS(addr, certFile, keyFile string) error {
|
||||
//
|
||||
// Pass custom listener to Serve if you need listening on arbitrary media
|
||||
// such as IPv6.
|
||||
//
|
||||
// If the certFile or keyFile has not been provided the server structure,
|
||||
// the function will use previously added TLS configuration.
|
||||
//
|
||||
// Accepted connections are configured to enable TCP keep-alives.
|
||||
func (s *Server) ListenAndServeTLSEmbed(addr string, certData, keyData []byte) error {
|
||||
ln, err := net.Listen("tcp4", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.TCPKeepalive {
|
||||
if tcpln, ok := ln.(*net.TCPListener); ok {
|
||||
return s.ServeTLSEmbed(tcpKeepaliveListener{
|
||||
TCPListener: tcpln,
|
||||
keepalivePeriod: s.TCPKeepalivePeriod,
|
||||
}, certData, keyData)
|
||||
}
|
||||
}
|
||||
return s.ServeTLSEmbed(ln, certData, keyData)
|
||||
}
|
||||
|
||||
// ServeTLS serves HTTPS requests from the given listener.
|
||||
//
|
||||
// certFile and keyFile are paths to TLS certificate and key files.
|
||||
//
|
||||
// If the certFile or keyFile has not been provided the server structure,
|
||||
// the function will use previously added TLS configuration.
|
||||
func (s *Server) ServeTLS(ln net.Listener, certFile, keyFile string) error {
|
||||
lnTLS, err := newTLSListener(ln, certFile, keyFile)
|
||||
if err != nil {
|
||||
err := s.AppendCert(certFile, keyFile)
|
||||
if err != nil && err != errNoCertOrKeyProvided {
|
||||
return err
|
||||
}
|
||||
return s.Serve(lnTLS)
|
||||
if s.tlsConfig == nil {
|
||||
return errNoCertOrKeyProvided
|
||||
}
|
||||
s.tlsConfig.BuildNameToCertificate()
|
||||
|
||||
return s.Serve(
|
||||
tls.NewListener(ln, s.tlsConfig),
|
||||
)
|
||||
}
|
||||
|
||||
// ServeTLSEmbed serves HTTPS requests from the given listener.
|
||||
//
|
||||
// certData and keyData must contain valid TLS certificate and key data.
|
||||
//
|
||||
// If the certFile or keyFile has not been provided the server structure,
|
||||
// the function will use previously added TLS configuration.
|
||||
func (s *Server) ServeTLSEmbed(ln net.Listener, certData, keyData []byte) error {
|
||||
lnTLS, err := newTLSListenerEmbed(ln, certData, keyData)
|
||||
if err != nil {
|
||||
err := s.AppendCertEmbed(certData, keyData)
|
||||
if err != nil && err != errNoCertOrKeyProvided {
|
||||
return err
|
||||
}
|
||||
return s.Serve(lnTLS)
|
||||
if s.tlsConfig == nil {
|
||||
return errNoCertOrKeyProvided
|
||||
}
|
||||
s.tlsConfig.BuildNameToCertificate()
|
||||
|
||||
return s.Serve(
|
||||
tls.NewListener(ln, s.tlsConfig),
|
||||
)
|
||||
}
|
||||
|
||||
func newTLSListener(ln net.Listener, certFile, keyFile string) (net.Listener, error) {
|
||||
// AppendCert appends certificate and keyfile to TLS Configuration.
|
||||
//
|
||||
// This function allows programmer to handle multiple domains
|
||||
// in one server structure. See examples/multidomain
|
||||
func (s *Server) AppendCert(certFile, keyFile string) error {
|
||||
if len(certFile) == 0 && len(keyFile) == 0 {
|
||||
return errNoCertOrKeyProvided
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err)
|
||||
return fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err)
|
||||
}
|
||||
return newCertListener(ln, &cert), nil
|
||||
|
||||
if s.tlsConfig == nil {
|
||||
s.tlsConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
PreferServerCipherSuites: true,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert)
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTLSListenerEmbed(ln net.Listener, certData, keyData []byte) (net.Listener, error) {
|
||||
// AppendCertEmbed does the same as AppendCert but using in-memory data.
|
||||
func (s *Server) AppendCertEmbed(certData, keyData []byte) error {
|
||||
if len(certData) == 0 && len(keyData) == 0 {
|
||||
return errNoCertOrKeyProvided
|
||||
}
|
||||
|
||||
cert, err := tls.X509KeyPair(certData, keyData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS key pair from the provided certData(%d) and keyData(%d): %s",
|
||||
return fmt.Errorf("cannot load TLS key pair from the provided certData(%d) and keyData(%d): %s",
|
||||
len(certData), len(keyData), err)
|
||||
}
|
||||
return newCertListener(ln, &cert), nil
|
||||
}
|
||||
|
||||
func newCertListener(ln net.Listener, cert *tls.Certificate) net.Listener {
|
||||
tlsConfig := &tls.Config{
|
||||
Certificates: []tls.Certificate{*cert},
|
||||
PreferServerCipherSuites: true,
|
||||
if s.tlsConfig == nil {
|
||||
s.tlsConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
PreferServerCipherSuites: true,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return tls.NewListener(ln, tlsConfig)
|
||||
|
||||
s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultConcurrency is the maximum number of concurrent connections
|
||||
@ -1274,6 +1463,17 @@ func (s *Server) Serve(ln net.Listener) error {
|
||||
var c net.Conn
|
||||
var err error
|
||||
|
||||
s.mu.Lock()
|
||||
{
|
||||
if s.ln != nil {
|
||||
s.mu.Unlock()
|
||||
return ErrAlreadyServing
|
||||
}
|
||||
|
||||
s.ln = ln
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
maxWorkersCount := s.getConcurrency()
|
||||
s.concurrencyCh = make(chan struct{}, maxWorkersCount)
|
||||
wp := &workerPool{
|
||||
@ -1281,6 +1481,7 @@ func (s *Server) Serve(ln net.Listener) error {
|
||||
MaxWorkersCount: maxWorkersCount,
|
||||
LogAllErrors: s.LogAllErrors,
|
||||
Logger: s.logger(),
|
||||
connState: s.setState,
|
||||
}
|
||||
wp.Start()
|
||||
|
||||
@ -1292,14 +1493,18 @@ func (s *Server) Serve(ln net.Listener) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
s.setState(c, StateNew)
|
||||
atomic.AddInt32(&s.open, 1)
|
||||
if !wp.Serve(c) {
|
||||
atomic.AddInt32(&s.open, -1)
|
||||
s.writeFastError(c, StatusServiceUnavailable,
|
||||
"The connection cannot be served because Server.Concurrency limit exceeded")
|
||||
c.Close()
|
||||
s.setState(c, StateClosed)
|
||||
if time.Since(lastOverflowErrorTime) > time.Minute {
|
||||
s.logger().Printf("The incoming connection cannot be served, because %d concurrent connections are served. "+
|
||||
"Try increasing Server.Concurrency", maxWorkersCount)
|
||||
lastOverflowErrorTime = CoarseTimeNow()
|
||||
lastOverflowErrorTime = time.Now()
|
||||
}
|
||||
|
||||
// The current server reached concurrency limit,
|
||||
@ -1314,6 +1519,45 @@ func (s *Server) Serve(ln net.Listener) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down the server without interrupting any active connections.
|
||||
// Shutdown works by first closing all open listeners and then waiting indefinitely for all connections to return to idle and then shut down.
|
||||
//
|
||||
// When Shutdown is called, Serve, ListenAndServe, and ListenAndServeTLS immediately return nil.
|
||||
// Make sure the program doesn't exit and waits instead for Shutdown to return.
|
||||
//
|
||||
// Shutdown does not close keepalive connections so its recommended to set ReadTimeout to something else than 0.
|
||||
func (s *Server) Shutdown() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
atomic.StoreInt32(&s.stop, 1)
|
||||
defer atomic.StoreInt32(&s.stop, 0)
|
||||
|
||||
if s.ln == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.ln.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Closing the listener will make Serve() call Stop on the worker pool.
|
||||
// Setting .stop to 1 will make serveConn() break out of its loop.
|
||||
// Now we just have to wait until all workers are done.
|
||||
for {
|
||||
if open := atomic.LoadInt32(&s.open); open == 0 {
|
||||
break
|
||||
}
|
||||
// This is not an optimal solution but using a sync.WaitGroup
|
||||
// here causes data races as it's hard to prevent Add() to be called
|
||||
// while Wait() is waiting.
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
s.ln = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func acceptConn(s *Server, ln net.Listener, lastPerIPErrorTime *time.Time) (net.Conn, error) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
@ -1341,7 +1585,7 @@ func acceptConn(s *Server, ln net.Listener, lastPerIPErrorTime *time.Time) (net.
|
||||
if time.Since(*lastPerIPErrorTime) > time.Minute {
|
||||
s.logger().Printf("The number of connections from %s exceeds MaxConnsPerIP=%d",
|
||||
getConnIP4(c), s.MaxConnsPerIP)
|
||||
*lastPerIPErrorTime = CoarseTimeNow()
|
||||
*lastPerIPErrorTime = time.Now()
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -1381,8 +1625,8 @@ var (
|
||||
ErrPerIPConnLimit = errors.New("too many connections per ip")
|
||||
|
||||
// ErrConcurrencyLimit may be returned from ServeConn if the number
|
||||
// of concurrenty served connections exceeds Server.Concurrency.
|
||||
ErrConcurrencyLimit = errors.New("canot serve the connection because Server.Concurrency concurrent connections are served")
|
||||
// of concurrently served connections exceeds Server.Concurrency.
|
||||
ErrConcurrencyLimit = errors.New("cannot serve the connection because Server.Concurrency concurrent connections are served")
|
||||
|
||||
// ErrKeepaliveTimeout is returned from ServeConn
|
||||
// if the connection lifetime exceeds MaxKeepaliveDuration.
|
||||
@ -1415,17 +1659,21 @@ func (s *Server) ServeConn(c net.Conn) error {
|
||||
return ErrConcurrencyLimit
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.open, 1)
|
||||
|
||||
err := s.serveConn(c)
|
||||
|
||||
atomic.AddUint32(&s.concurrency, ^uint32(0))
|
||||
|
||||
if err != errHijacked {
|
||||
err1 := c.Close()
|
||||
s.setState(c, StateClosed)
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
} else {
|
||||
err = nil
|
||||
s.setState(c, StateHijacked)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -1453,10 +1701,15 @@ func nextConnID() uint64 {
|
||||
const DefaultMaxRequestBodySize = 4 * 1024 * 1024
|
||||
|
||||
func (s *Server) serveConn(c net.Conn) error {
|
||||
serverName := s.getServerName()
|
||||
defer atomic.AddInt32(&s.open, -1)
|
||||
|
||||
var serverName []byte
|
||||
if !s.NoDefaultServerHeader {
|
||||
serverName = s.getServerName()
|
||||
}
|
||||
connRequestNum := uint64(0)
|
||||
connID := nextConnID()
|
||||
currentTime := CoarseTimeNow()
|
||||
currentTime := time.Now()
|
||||
connTime := currentTime
|
||||
maxRequestBodySize := s.MaxRequestBodySize
|
||||
if maxRequestBodySize <= 0 {
|
||||
@ -1481,6 +1734,11 @@ func (s *Server) serveConn(c net.Conn) error {
|
||||
isHTTP11 bool
|
||||
)
|
||||
for {
|
||||
if atomic.LoadInt32(&s.stop) == 1 {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
connRequestNum++
|
||||
ctx.time = currentTime
|
||||
|
||||
@ -1500,34 +1758,40 @@ func (s *Server) serveConn(c net.Conn) error {
|
||||
br, err = acquireByteReader(&ctx)
|
||||
}
|
||||
ctx.Request.isTLS = isTLS
|
||||
ctx.Response.Header.noDefaultContentType = s.NoDefaultContentType
|
||||
|
||||
if err == nil {
|
||||
if s.DisableHeaderNamesNormalizing {
|
||||
ctx.Request.Header.DisableNormalizing()
|
||||
ctx.Response.Header.DisableNormalizing()
|
||||
}
|
||||
// reading Headers and Body
|
||||
err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly)
|
||||
if br.Buffered() > 0 {
|
||||
// If we read any bytes off the wire, we're active.
|
||||
s.setState(c, StateActive)
|
||||
}
|
||||
if br.Buffered() == 0 || err != nil {
|
||||
releaseReader(s, br)
|
||||
br = nil
|
||||
}
|
||||
}
|
||||
|
||||
currentTime = CoarseTimeNow()
|
||||
currentTime = time.Now()
|
||||
ctx.lastReadDuration = currentTime.Sub(ctx.time)
|
||||
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
} else {
|
||||
bw = writeErrorResponse(bw, ctx, err)
|
||||
bw = writeErrorResponse(bw, ctx, serverName, err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// 'Expect: 100-continue' request handling.
|
||||
// See http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html for details.
|
||||
if !ctx.Request.Header.noBody() && ctx.Request.MayContinue() {
|
||||
if !ctx.Request.Header.ignoreBody() && ctx.Request.MayContinue() {
|
||||
// Send 'HTTP/1.1 100 Continue' response.
|
||||
if bw == nil {
|
||||
bw = acquireWriter(ctx)
|
||||
@ -1550,18 +1814,19 @@ func (s *Server) serveConn(c net.Conn) error {
|
||||
br = nil
|
||||
}
|
||||
if err != nil {
|
||||
bw = writeErrorResponse(bw, ctx, err)
|
||||
bw = writeErrorResponse(bw, ctx, serverName, err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
connectionClose = s.DisableKeepalive || ctx.Request.Header.connectionCloseFast()
|
||||
connectionClose = s.DisableKeepalive || ctx.Request.Header.ConnectionClose()
|
||||
isHTTP11 = ctx.Request.Header.IsHTTP11()
|
||||
|
||||
ctx.Response.Header.SetServerBytes(serverName)
|
||||
if serverName != nil {
|
||||
ctx.Response.Header.SetServerBytes(serverName)
|
||||
}
|
||||
ctx.connID = connID
|
||||
ctx.connRequestNum = connRequestNum
|
||||
ctx.connTime = connTime
|
||||
ctx.time = currentTime
|
||||
s.Handler(ctx)
|
||||
|
||||
@ -1593,9 +1858,7 @@ func (s *Server) serveConn(c net.Conn) error {
|
||||
lastWriteDeadlineTime = s.updateWriteDeadline(c, ctx, lastWriteDeadlineTime)
|
||||
}
|
||||
|
||||
// Verify Request.Header.connectionCloseFast() again,
|
||||
// since request handler might trigger full headers' parsing.
|
||||
connectionClose = connectionClose || ctx.Request.Header.connectionCloseFast() || ctx.Response.ConnectionClose()
|
||||
connectionClose = connectionClose || ctx.Response.ConnectionClose()
|
||||
if connectionClose {
|
||||
ctx.Response.Header.SetCanonical(strConnection, strClose)
|
||||
} else if !isHTTP11 {
|
||||
@ -1605,7 +1868,7 @@ func (s *Server) serveConn(c net.Conn) error {
|
||||
ctx.Response.Header.SetCanonical(strConnection, strKeepAlive)
|
||||
}
|
||||
|
||||
if len(ctx.Response.Header.Server()) == 0 {
|
||||
if serverName != nil && len(ctx.Response.Header.Server()) == 0 {
|
||||
ctx.Response.Header.SetServerBytes(serverName)
|
||||
}
|
||||
|
||||
@ -1654,7 +1917,8 @@ func (s *Server) serveConn(c net.Conn) error {
|
||||
break
|
||||
}
|
||||
|
||||
currentTime = CoarseTimeNow()
|
||||
currentTime = time.Now()
|
||||
s.setState(c, StateIdle)
|
||||
}
|
||||
|
||||
if br != nil {
|
||||
@ -1667,6 +1931,12 @@ func (s *Server) serveConn(c net.Conn) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) setState(nc net.Conn, state ConnState) {
|
||||
if hook := s.ConnState; hook != nil {
|
||||
hook(nc, state)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) updateReadDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time {
|
||||
readTimeout := s.ReadTimeout
|
||||
currentTime := ctx.time
|
||||
@ -1710,7 +1980,7 @@ func (s *Server) updateWriteDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTi
|
||||
// Optimization: update write deadline only if more than 25%
|
||||
// of the last write deadline exceeded.
|
||||
// See https://github.com/golang/go/issues/15133 for details.
|
||||
currentTime := CoarseTimeNow()
|
||||
currentTime := time.Now()
|
||||
if currentTime.Sub(lastDeadlineTime) > (writeTimeout >> 2) {
|
||||
if err := c.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil {
|
||||
panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%s): %s", writeTimeout, err))
|
||||
@ -1864,9 +2134,8 @@ func releaseWriter(s *Server, w *bufio.Writer) {
|
||||
s.writerPool.Put(w)
|
||||
}
|
||||
|
||||
func (s *Server) acquireCtx(c net.Conn) *RequestCtx {
|
||||
func (s *Server) acquireCtx(c net.Conn) (ctx *RequestCtx) {
|
||||
v := s.ctxPool.Get()
|
||||
var ctx *RequestCtx
|
||||
if v == nil {
|
||||
ctx = &RequestCtx{
|
||||
s: s,
|
||||
@ -1878,7 +2147,7 @@ func (s *Server) acquireCtx(c net.Conn) *RequestCtx {
|
||||
ctx = v.(*RequestCtx)
|
||||
}
|
||||
ctx.c = c
|
||||
return ctx
|
||||
return
|
||||
}
|
||||
|
||||
// Init2 prepares ctx for passing to RequestHandler.
|
||||
@ -1893,7 +2162,7 @@ func (ctx *RequestCtx) Init2(conn net.Conn, logger Logger, reduceMemoryUsage boo
|
||||
ctx.connID = nextConnID()
|
||||
ctx.s = fakeServer
|
||||
ctx.connRequestNum = 0
|
||||
ctx.connTime = CoarseTimeNow()
|
||||
ctx.connTime = time.Now()
|
||||
ctx.time = ctx.connTime
|
||||
|
||||
keepBodyBuffer := !reduceMemoryUsage
|
||||
@ -1978,22 +2247,31 @@ func (s *Server) getServerName() []byte {
|
||||
|
||||
func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) {
|
||||
w.Write(statusLine(statusCode))
|
||||
|
||||
server := ""
|
||||
if !s.NoDefaultServerHeader {
|
||||
server = fmt.Sprintf("Server: %s\r\n", s.getServerName())
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "Connection: close\r\n"+
|
||||
"Server: %s\r\n"+
|
||||
server+
|
||||
"Date: %s\r\n"+
|
||||
"Content-Type: text/plain\r\n"+
|
||||
"Content-Length: %d\r\n"+
|
||||
"\r\n"+
|
||||
"%s",
|
||||
s.getServerName(), serverDate.Load(), len(msg), msg)
|
||||
serverDate.Load(), len(msg), msg)
|
||||
}
|
||||
|
||||
func writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, err error) *bufio.Writer {
|
||||
func writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, serverName []byte, err error) *bufio.Writer {
|
||||
if _, ok := err.(*ErrSmallBuffer); ok {
|
||||
ctx.Error("Too big request header", StatusRequestHeaderFieldsTooLarge)
|
||||
} else {
|
||||
ctx.Error("Error when parsing request", StatusBadRequest)
|
||||
}
|
||||
if serverName != nil {
|
||||
ctx.Response.Header.SetServerBytes(serverName)
|
||||
}
|
||||
ctx.SetConnectionClose()
|
||||
if bw == nil {
|
||||
bw = acquireWriter(ctx)
|
||||
@ -2002,3 +2280,55 @@ func writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, err error) *bufio.Wri
|
||||
bw.Flush()
|
||||
return bw
|
||||
}
|
||||
|
||||
// A ConnState represents the state of a client connection to a server.
|
||||
// It's used by the optional Server.ConnState hook.
|
||||
type ConnState int
|
||||
|
||||
const (
|
||||
// StateNew represents a new connection that is expected to
|
||||
// send a request immediately. Connections begin at this
|
||||
// state and then transition to either StateActive or
|
||||
// StateClosed.
|
||||
StateNew ConnState = iota
|
||||
|
||||
// StateActive represents a connection that has read 1 or more
|
||||
// bytes of a request. The Server.ConnState hook for
|
||||
// StateActive fires before the request has entered a handler
|
||||
// and doesn't fire again until the request has been
|
||||
// handled. After the request is handled, the state
|
||||
// transitions to StateClosed, StateHijacked, or StateIdle.
|
||||
// For HTTP/2, StateActive fires on the transition from zero
|
||||
// to one active request, and only transitions away once all
|
||||
// active requests are complete. That means that ConnState
|
||||
// cannot be used to do per-request work; ConnState only notes
|
||||
// the overall state of the connection.
|
||||
StateActive
|
||||
|
||||
// StateIdle represents a connection that has finished
|
||||
// handling a request and is in the keep-alive state, waiting
|
||||
// for a new request. Connections transition from StateIdle
|
||||
// to either StateActive or StateClosed.
|
||||
StateIdle
|
||||
|
||||
// StateHijacked represents a hijacked connection.
|
||||
// This is a terminal state. It does not transition to StateClosed.
|
||||
StateHijacked
|
||||
|
||||
// StateClosed represents a closed connection.
|
||||
// This is a terminal state. Hijacked connections do not
|
||||
// transition to StateClosed.
|
||||
StateClosed
|
||||
)
|
||||
|
||||
var stateName = map[ConnState]string{
|
||||
StateNew: "new",
|
||||
StateActive: "active",
|
||||
StateIdle: "idle",
|
||||
StateHijacked: "hijacked",
|
||||
StateClosed: "closed",
|
||||
}
|
||||
|
||||
func (c ConnState) String() string {
|
||||
return stateName[c]
|
||||
}
|
||||
|
3
vendor/github.com/valyala/fasthttp/stackless/writer.go
generated
vendored
3
vendor/github.com/valyala/fasthttp/stackless/writer.go
generated
vendored
@ -3,8 +3,9 @@ package stackless
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
"io"
|
||||
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
||||
// Writer is an interface stackless writer must conform to.
|
||||
|
15
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
15
vendor/github.com/valyala/fasthttp/strings.go
generated
vendored
@ -22,11 +22,15 @@ var (
|
||||
|
||||
strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n")
|
||||
|
||||
strGet = []byte("GET")
|
||||
strHead = []byte("HEAD")
|
||||
strPost = []byte("POST")
|
||||
strPut = []byte("PUT")
|
||||
strDelete = []byte("DELETE")
|
||||
strGet = []byte("GET")
|
||||
strHead = []byte("HEAD")
|
||||
strPost = []byte("POST")
|
||||
strPut = []byte("PUT")
|
||||
strDelete = []byte("DELETE")
|
||||
strConnect = []byte("CONNECT")
|
||||
strOptions = []byte("OPTIONS")
|
||||
strTrace = []byte("TRACE")
|
||||
strPatch = []byte("PATCH")
|
||||
|
||||
strExpect = []byte("Expect")
|
||||
strConnection = []byte("Connection")
|
||||
@ -54,6 +58,7 @@ var (
|
||||
strCookiePath = []byte("path")
|
||||
strCookieHTTPOnly = []byte("HttpOnly")
|
||||
strCookieSecure = []byte("secure")
|
||||
strCookieMaxAge = []byte("max-age")
|
||||
|
||||
strClose = []byte("close")
|
||||
strGzip = []byte("gzip")
|
||||
|
2
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
2
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
@ -180,7 +180,7 @@ func (u *URI) Reset() {
|
||||
u.parsedQueryArgs = false
|
||||
|
||||
// There is no need in u.fullURI = u.fullURI[:0], since full uri
|
||||
// is calucalted on each call to FullURI().
|
||||
// is calculated on each call to FullURI().
|
||||
|
||||
// There is no need in u.requestURI = u.requestURI[:0], since requestURI
|
||||
// is calculated on each call to RequestURI().
|
||||
|
10
vendor/github.com/valyala/fasthttp/workerpool.go
generated
vendored
10
vendor/github.com/valyala/fasthttp/workerpool.go
generated
vendored
@ -35,6 +35,8 @@ type workerPool struct {
|
||||
stopCh chan struct{}
|
||||
|
||||
workerChanPool sync.Pool
|
||||
|
||||
connState func(net.Conn, ConnState)
|
||||
}
|
||||
|
||||
type workerChan struct {
|
||||
@ -187,7 +189,7 @@ func (wp *workerPool) getCh() *workerChan {
|
||||
}
|
||||
|
||||
func (wp *workerPool) release(ch *workerChan) bool {
|
||||
ch.lastUseTime = CoarseTimeNow()
|
||||
ch.lastUseTime = time.Now()
|
||||
wp.lock.Lock()
|
||||
if wp.mustStop {
|
||||
wp.lock.Unlock()
|
||||
@ -211,12 +213,16 @@ func (wp *workerPool) workerFunc(ch *workerChan) {
|
||||
errStr := err.Error()
|
||||
if wp.LogAllErrors || !(strings.Contains(errStr, "broken pipe") ||
|
||||
strings.Contains(errStr, "reset by peer") ||
|
||||
strings.Contains(errStr, "request headers: small read buffer") ||
|
||||
strings.Contains(errStr, "i/o timeout")) {
|
||||
wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
}
|
||||
}
|
||||
if err != errHijacked {
|
||||
if err == errHijacked {
|
||||
wp.connState(c, StateHijacked)
|
||||
} else {
|
||||
c.Close()
|
||||
wp.connState(c, StateClosed)
|
||||
}
|
||||
c = nil
|
||||
|
||||
|
369
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
369
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
@ -1,369 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// This file contains utilities for generating code.
|
||||
|
||||
// TODO: other write methods like:
|
||||
// - slices, maps, types, etc.
|
||||
|
||||
// CodeWriter is a utility for writing structured code. It computes the content
|
||||
// hash and size of written content. It ensures there are newlines between
|
||||
// written code blocks.
|
||||
type CodeWriter struct {
|
||||
buf bytes.Buffer
|
||||
Size int
|
||||
Hash hash.Hash32 // content hash
|
||||
gob *gob.Encoder
|
||||
// For comments we skip the usual one-line separator if they are followed by
|
||||
// a code block.
|
||||
skipSep bool
|
||||
}
|
||||
|
||||
func (w *CodeWriter) Write(p []byte) (n int, err error) {
|
||||
return w.buf.Write(p)
|
||||
}
|
||||
|
||||
// NewCodeWriter returns a new CodeWriter.
|
||||
func NewCodeWriter() *CodeWriter {
|
||||
h := fnv.New32()
|
||||
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
|
||||
}
|
||||
|
||||
// WriteGoFile appends the buffer with the total size of all created structures
|
||||
// and writes it as a Go file to the the given file with the given package name.
|
||||
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = w.WriteGo(f, pkg, ""); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteVersionedGoFile appends the buffer with the total size of all created
|
||||
// structures and writes it as a Go file to the the given file with the given
|
||||
// package name and build tags for the current Unicode version,
|
||||
func (w *CodeWriter) WriteVersionedGoFile(filename, pkg string) {
|
||||
tags := buildTags()
|
||||
if tags != "" {
|
||||
filename = insertVersion(filename, UnicodeVersion())
|
||||
}
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = w.WriteGo(f, pkg, tags); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo appends the buffer with the total size of all created structures and
|
||||
// writes it as a Go file to the the given writer with the given package name.
|
||||
func (w *CodeWriter) WriteGo(out io.Writer, pkg, tags string) (n int, err error) {
|
||||
sz := w.Size
|
||||
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
|
||||
defer w.buf.Reset()
|
||||
return WriteGo(out, pkg, tags, w.buf.Bytes())
|
||||
}
|
||||
|
||||
func (w *CodeWriter) printf(f string, x ...interface{}) {
|
||||
fmt.Fprintf(w, f, x...)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) insertSep() {
|
||||
if w.skipSep {
|
||||
w.skipSep = false
|
||||
return
|
||||
}
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n")
|
||||
}
|
||||
|
||||
// WriteComment writes a comment block. All line starts are prefixed with "//".
|
||||
// Initial empty lines are gobbled. The indentation for the first line is
|
||||
// stripped from consecutive lines.
|
||||
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
|
||||
s := fmt.Sprintf(comment, args...)
|
||||
s = strings.Trim(s, "\n")
|
||||
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n// ")
|
||||
w.skipSep = true
|
||||
|
||||
// strip first indent level.
|
||||
sep := "\n"
|
||||
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
|
||||
sep += s[:1]
|
||||
}
|
||||
|
||||
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
|
||||
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSizeInfo(size int) {
|
||||
w.printf("// Size: %d bytes\n", size)
|
||||
}
|
||||
|
||||
// WriteConst writes a constant of the given name and value.
|
||||
func (w *CodeWriter) WriteConst(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("const %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
w.printf("\n")
|
||||
default:
|
||||
w.printf("const %s = %#v\n", name, x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteVar writes a variable of the given name and value.
|
||||
func (w *CodeWriter) WriteVar(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
oldSize := w.Size
|
||||
sz := int(v.Type().Size())
|
||||
w.Size += sz
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
case reflect.Struct:
|
||||
w.gob.Encode(x)
|
||||
fallthrough
|
||||
case reflect.Slice, reflect.Array:
|
||||
w.printf("var %s = ", name)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
default:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.gob.Encode(x)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
}
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeValue(v reflect.Value) {
|
||||
x := v.Interface()
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
w.WriteString(v.String())
|
||||
case reflect.Array:
|
||||
// Don't double count: callers of WriteArray count on the size being
|
||||
// added, so we need to discount it here.
|
||||
w.Size -= int(v.Type().Size())
|
||||
w.writeSlice(x, true)
|
||||
case reflect.Slice:
|
||||
w.writeSlice(x, false)
|
||||
case reflect.Struct:
|
||||
w.printf("%s{\n", typeName(v.Interface()))
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
w.printf("%s: ", t.Field(i).Name)
|
||||
w.writeValue(v.Field(i))
|
||||
w.printf(",\n")
|
||||
}
|
||||
w.printf("}")
|
||||
default:
|
||||
w.printf("%#v", x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteString writes a string literal.
|
||||
func (w *CodeWriter) WriteString(s string) {
|
||||
s = strings.Replace(s, `\`, `\\`, -1)
|
||||
io.WriteString(w.Hash, s) // content hash
|
||||
w.Size += len(s)
|
||||
|
||||
const maxInline = 40
|
||||
if len(s) <= maxInline {
|
||||
w.printf("%q", s)
|
||||
return
|
||||
}
|
||||
|
||||
// We will render the string as a multi-line string.
|
||||
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
|
||||
|
||||
// When starting on its own line, go fmt indents line 2+ an extra level.
|
||||
n, max := maxWidth, maxWidth-4
|
||||
|
||||
// As per https://golang.org/issue/18078, the compiler has trouble
|
||||
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
|
||||
// for large N. We insert redundant, explicit parentheses to work around
|
||||
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
|
||||
// ... + s127) + etc + (etc + ... + sN).
|
||||
explicitParens, extraComment := len(s) > 128*1024, ""
|
||||
if explicitParens {
|
||||
w.printf(`(`)
|
||||
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
|
||||
}
|
||||
|
||||
// Print "" +\n, if a string does not start on its own line.
|
||||
b := w.buf.Bytes()
|
||||
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
|
||||
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
|
||||
n, max = maxWidth, maxWidth
|
||||
}
|
||||
|
||||
w.printf(`"`)
|
||||
|
||||
for sz, p, nLines := 0, 0, 0; p < len(s); {
|
||||
var r rune
|
||||
r, sz = utf8.DecodeRuneInString(s[p:])
|
||||
out := s[p : p+sz]
|
||||
chars := 1
|
||||
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
|
||||
switch sz {
|
||||
case 1:
|
||||
out = fmt.Sprintf("\\x%02x", s[p])
|
||||
case 2, 3:
|
||||
out = fmt.Sprintf("\\u%04x", r)
|
||||
case 4:
|
||||
out = fmt.Sprintf("\\U%08x", r)
|
||||
}
|
||||
chars = len(out)
|
||||
}
|
||||
if n -= chars; n < 0 {
|
||||
nLines++
|
||||
if explicitParens && nLines&63 == 63 {
|
||||
w.printf("\") + (\"")
|
||||
}
|
||||
w.printf("\" +\n\"")
|
||||
n = max - len(out)
|
||||
}
|
||||
w.printf("%s", out)
|
||||
p += sz
|
||||
}
|
||||
w.printf(`"`)
|
||||
if explicitParens {
|
||||
w.printf(`)`)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteSlice writes a slice value.
|
||||
func (w *CodeWriter) WriteSlice(x interface{}) {
|
||||
w.writeSlice(x, false)
|
||||
}
|
||||
|
||||
// WriteArray writes an array value.
|
||||
func (w *CodeWriter) WriteArray(x interface{}) {
|
||||
w.writeSlice(x, true)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
|
||||
v := reflect.ValueOf(x)
|
||||
w.gob.Encode(v.Len())
|
||||
w.Size += v.Len() * int(v.Type().Elem().Size())
|
||||
name := typeName(x)
|
||||
if isArray {
|
||||
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
|
||||
}
|
||||
if isArray {
|
||||
w.printf("%s{\n", name)
|
||||
} else {
|
||||
w.printf("%s{ // %d elements\n", name, v.Len())
|
||||
}
|
||||
|
||||
switch kind := v.Type().Elem().Kind(); kind {
|
||||
case reflect.String:
|
||||
for _, s := range x.([]string) {
|
||||
w.WriteString(s)
|
||||
w.printf(",\n")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// nLine and nBlock are the number of elements per line and block.
|
||||
nLine, nBlock, format := 8, 64, "%d,"
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
format = "%#02x,"
|
||||
case reflect.Uint16:
|
||||
format = "%#04x,"
|
||||
case reflect.Uint32:
|
||||
nLine, nBlock, format = 4, 32, "%#08x,"
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
nLine, nBlock, format = 4, 32, "%#016x,"
|
||||
case reflect.Int8:
|
||||
nLine = 16
|
||||
}
|
||||
n := nLine
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i%nBlock == 0 && v.Len() > nBlock {
|
||||
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
|
||||
}
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.Encode(x)
|
||||
w.printf(format, x)
|
||||
if n--; n == 0 {
|
||||
n = nLine
|
||||
w.printf("\n")
|
||||
}
|
||||
}
|
||||
w.printf("\n")
|
||||
case reflect.Struct:
|
||||
zero := reflect.Zero(v.Type().Elem()).Interface()
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.EncodeValue(v)
|
||||
if !reflect.DeepEqual(zero, x) {
|
||||
line := fmt.Sprintf("%#v,\n", x)
|
||||
line = line[strings.IndexByte(line, '{'):]
|
||||
w.printf("%d: ", i)
|
||||
w.printf(line)
|
||||
}
|
||||
}
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
|
||||
}
|
||||
default:
|
||||
panic("gen: slice elem type not supported")
|
||||
}
|
||||
w.printf("}")
|
||||
}
|
||||
|
||||
// WriteType writes a definition of the type of the given value and returns the
|
||||
// type name.
|
||||
func (w *CodeWriter) WriteType(x interface{}) string {
|
||||
t := reflect.TypeOf(x)
|
||||
w.printf("type %s struct {\n", t.Name())
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
|
||||
}
|
||||
w.printf("}\n")
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
// typeName returns the name of the go type of x.
|
||||
func typeName(x interface{}) string {
|
||||
t := reflect.ValueOf(x).Type()
|
||||
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
|
||||
}
|
333
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
333
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
@ -1,333 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gen contains common code for the various code generation tools in the
|
||||
// text repository. Its usage ensures consistency between tools.
|
||||
//
|
||||
// This package defines command line flags that are common to most generation
|
||||
// tools. The flags allow for specifying specific Unicode and CLDR versions
|
||||
// in the public Unicode data repository (http://www.unicode.org/Public).
|
||||
//
|
||||
// A local Unicode data mirror can be set through the flag -local or the
|
||||
// environment variable UNICODE_DIR. The former takes precedence. The local
|
||||
// directory should follow the same structure as the public repository.
|
||||
//
|
||||
// IANA data can also optionally be mirrored by putting it in the iana directory
|
||||
// rooted at the top of the local mirror. Beware, though, that IANA data is not
|
||||
// versioned. So it is up to the developer to use the right version.
|
||||
package gen // import "golang.org/x/text/internal/gen"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
url = flag.String("url",
|
||||
"http://www.unicode.org/Public",
|
||||
"URL of Unicode database directory")
|
||||
iana = flag.String("iana",
|
||||
"http://www.iana.org",
|
||||
"URL of the IANA repository")
|
||||
unicodeVersion = flag.String("unicode",
|
||||
getEnv("UNICODE_VERSION", unicode.Version),
|
||||
"unicode version to use")
|
||||
cldrVersion = flag.String("cldr",
|
||||
getEnv("CLDR_VERSION", cldr.Version),
|
||||
"cldr version to use")
|
||||
)
|
||||
|
||||
func getEnv(name, def string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Init performs common initialization for a gen command. It parses the flags
|
||||
// and sets up the standard logging parameters.
|
||||
func Init() {
|
||||
log.SetPrefix("")
|
||||
log.SetFlags(log.Lshortfile)
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
`
|
||||
|
||||
// UnicodeVersion reports the requested Unicode version.
|
||||
func UnicodeVersion() string {
|
||||
return *unicodeVersion
|
||||
}
|
||||
|
||||
// CLDRVersion reports the requested CLDR version.
|
||||
func CLDRVersion() string {
|
||||
return *cldrVersion
|
||||
}
|
||||
|
||||
var tags = []struct{ version, buildTags string }{
|
||||
{"10.0.0", "go1.10"},
|
||||
{"", "!go1.10"},
|
||||
}
|
||||
|
||||
// buildTags reports the build tags used for the current Unicode version.
|
||||
func buildTags() string {
|
||||
v := UnicodeVersion()
|
||||
for _, x := range tags {
|
||||
// We should do a numeric comparison, but including the collate package
|
||||
// would create an import cycle. We approximate it by assuming that
|
||||
// longer version strings are later.
|
||||
if len(x.version) <= len(v) {
|
||||
return x.buildTags
|
||||
}
|
||||
if len(x.version) == len(v) && x.version <= v {
|
||||
return x.buildTags
|
||||
}
|
||||
}
|
||||
return tags[0].buildTags
|
||||
}
|
||||
|
||||
// IsLocal reports whether data files are available locally.
|
||||
func IsLocal() bool {
|
||||
dir, err := localReadmeFile()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, err = os.Stat(dir); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// OpenUCDFile opens the requested UCD file. The file is specified relative to
|
||||
// the public Unicode root directory. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenUCDFile(file string) io.ReadCloser {
|
||||
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
|
||||
}
|
||||
|
||||
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
|
||||
// are any errors.
|
||||
func OpenCLDRCoreZip() io.ReadCloser {
|
||||
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
|
||||
}
|
||||
|
||||
// OpenUnicodeFile opens the requested file of the requested category from the
|
||||
// root of the Unicode data archive. The file is specified relative to the
|
||||
// public Unicode root directory. If version is "", it will use the default
|
||||
// Unicode version. It will call log.Fatal if there are any errors.
|
||||
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
|
||||
if version == "" {
|
||||
version = UnicodeVersion()
|
||||
}
|
||||
return openUnicode(path.Join(category, version, file))
|
||||
}
|
||||
|
||||
// OpenIANAFile opens the requested IANA file. The file is specified relative
|
||||
// to the IANA root, which is typically either http://www.iana.org or the
|
||||
// iana directory in the local mirror. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenIANAFile(path string) io.ReadCloser {
|
||||
return Open(*iana, "iana", path)
|
||||
}
|
||||
|
||||
var (
|
||||
dirMutex sync.Mutex
|
||||
localDir string
|
||||
)
|
||||
|
||||
const permissions = 0755
|
||||
|
||||
func localReadmeFile() (string, error) {
|
||||
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not locate package: %v", err)
|
||||
}
|
||||
return filepath.Join(p.Dir, "DATA", "README"), nil
|
||||
}
|
||||
|
||||
func getLocalDir() string {
|
||||
dirMutex.Lock()
|
||||
defer dirMutex.Unlock()
|
||||
|
||||
readme, err := localReadmeFile()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
dir := filepath.Dir(readme)
|
||||
if _, err := os.Stat(readme); err != nil {
|
||||
if err := os.MkdirAll(dir, permissions); err != nil {
|
||||
log.Fatalf("Could not create directory: %v", err)
|
||||
}
|
||||
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
|
||||
|
||||
This directory contains downloaded files used to generate the various tables
|
||||
in the golang.org/x/text subrepo.
|
||||
|
||||
Note that the language subtag repo (iana/assignments/language-subtag-registry)
|
||||
and all other times in the iana subdirectory are not versioned and will need
|
||||
to be periodically manually updated. The easiest way to do this is to remove
|
||||
the entire iana directory. This is mostly of concern when updating the language
|
||||
package.
|
||||
`
|
||||
|
||||
// Open opens subdir/path if a local directory is specified and the file exists,
|
||||
// where subdir is a directory relative to the local root, or fetches it from
|
||||
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
|
||||
func Open(urlRoot, subdir, path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
|
||||
return open(file, urlRoot, path)
|
||||
}
|
||||
|
||||
func openUnicode(path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
|
||||
return open(file, *url, path)
|
||||
}
|
||||
|
||||
// TODO: automatically periodically update non-versioned files.
|
||||
|
||||
func open(file, urlRoot, path string) io.ReadCloser {
|
||||
if f, err := os.Open(file); err == nil {
|
||||
return f
|
||||
}
|
||||
r := get(urlRoot, path)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not download file: %v", err)
|
||||
}
|
||||
os.MkdirAll(filepath.Dir(file), permissions)
|
||||
if err := ioutil.WriteFile(file, b, permissions); err != nil {
|
||||
log.Fatalf("Could not create file: %v", err)
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func get(root, path string) io.ReadCloser {
|
||||
url := root + "/" + path
|
||||
fmt.Printf("Fetching %s...", url)
|
||||
defer fmt.Println(" done.")
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("HTTP GET: %v", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
|
||||
}
|
||||
return resp.Body
|
||||
}
|
||||
|
||||
// TODO: use Write*Version in all applicable packages.
|
||||
|
||||
// WriteUnicodeVersion writes a constant for the Unicode version from which the
|
||||
// tables are generated.
|
||||
func WriteUnicodeVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
|
||||
}
|
||||
|
||||
// WriteCLDRVersion writes a constant for the CLDR version from which the
|
||||
// tables are generated.
|
||||
func WriteCLDRVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
|
||||
}
|
||||
|
||||
// WriteGoFile prepends a standard file comment and package statement to the
|
||||
// given bytes, applies gofmt, and writes them to a file with the given name.
|
||||
// It will call log.Fatal if there are any errors.
|
||||
func WriteGoFile(filename, pkg string, b []byte) {
|
||||
w, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err = WriteGo(w, pkg, "", b); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
func insertVersion(filename, version string) string {
|
||||
suffix := ".go"
|
||||
if strings.HasSuffix(filename, "_test.go") {
|
||||
suffix = "_test.go"
|
||||
}
|
||||
return fmt.Sprint(filename[:len(filename)-len(suffix)], version, suffix)
|
||||
}
|
||||
|
||||
// WriteVersionedGoFile prepends a standard file comment, adds build tags to
|
||||
// version the file for the current Unicode version, and package statement to
|
||||
// the given bytes, applies gofmt, and writes them to a file with the given
|
||||
// name. It will call log.Fatal if there are any errors.
|
||||
func WriteVersionedGoFile(filename, pkg string, b []byte) {
|
||||
tags := buildTags()
|
||||
if tags != "" {
|
||||
filename = insertVersion(filename, UnicodeVersion())
|
||||
}
|
||||
w, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err = WriteGo(w, pkg, tags, b); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo prepends a standard file comment and package statement to the given
|
||||
// bytes, applies gofmt, and writes them to w.
|
||||
func WriteGo(w io.Writer, pkg, tags string, b []byte) (n int, err error) {
|
||||
src := []byte(header)
|
||||
if tags != "" {
|
||||
src = append(src, fmt.Sprintf("// +build %s\n\n", tags)...)
|
||||
}
|
||||
src = append(src, fmt.Sprintf("package %s\n\n", pkg)...)
|
||||
src = append(src, b...)
|
||||
formatted, err := format.Source(src)
|
||||
if err != nil {
|
||||
// Print the generated code even in case of an error so that the
|
||||
// returned error can be meaningfully interpreted.
|
||||
n, _ = w.Write(src)
|
||||
return n, err
|
||||
}
|
||||
return w.Write(formatted)
|
||||
}
|
||||
|
||||
// Repackage rewrites a Go file from belonging to package main to belonging to
|
||||
// the given package.
|
||||
func Repackage(inFile, outFile, pkg string) {
|
||||
src, err := ioutil.ReadFile(inFile)
|
||||
if err != nil {
|
||||
log.Fatalf("reading %s: %v", inFile, err)
|
||||
}
|
||||
const toDelete = "package main\n\n"
|
||||
i := bytes.Index(src, []byte(toDelete))
|
||||
if i < 0 {
|
||||
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
w.Write(src[i+len(toDelete):])
|
||||
WriteGoFile(outFile, pkg, w.Bytes())
|
||||
}
|
105
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
105
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Elem is implemented by every XML element.
|
||||
type Elem interface {
|
||||
setEnclosing(Elem)
|
||||
setName(string)
|
||||
enclosing() Elem
|
||||
|
||||
GetCommon() *Common
|
||||
}
|
||||
|
||||
type hidden struct {
|
||||
CharData string `xml:",chardata"`
|
||||
Alias *struct {
|
||||
Common
|
||||
Source string `xml:"source,attr"`
|
||||
Path string `xml:"path,attr"`
|
||||
} `xml:"alias"`
|
||||
Def *struct {
|
||||
Common
|
||||
Choice string `xml:"choice,attr,omitempty"`
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
} `xml:"default"`
|
||||
}
|
||||
|
||||
// Common holds several of the most common attributes and sub elements
|
||||
// of an XML element.
|
||||
type Common struct {
|
||||
XMLName xml.Name
|
||||
name string
|
||||
enclElem Elem
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
Reference string `xml:"reference,attr,omitempty"`
|
||||
Alt string `xml:"alt,attr,omitempty"`
|
||||
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
|
||||
Draft string `xml:"draft,attr,omitempty"`
|
||||
hidden
|
||||
}
|
||||
|
||||
// Default returns the default type to select from the enclosed list
|
||||
// or "" if no default value is specified.
|
||||
func (e *Common) Default() string {
|
||||
if e.Def == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Def.Choice != "" {
|
||||
return e.Def.Choice
|
||||
} else if e.Def.Type != "" {
|
||||
// Type is still used by the default element in collation.
|
||||
return e.Def.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Element returns the XML element name.
|
||||
func (e *Common) Element() string {
|
||||
return e.name
|
||||
}
|
||||
|
||||
// GetCommon returns e. It is provided such that Common implements Elem.
|
||||
func (e *Common) GetCommon() *Common {
|
||||
return e
|
||||
}
|
||||
|
||||
// Data returns the character data accumulated for this element.
|
||||
func (e *Common) Data() string {
|
||||
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
|
||||
return e.CharData
|
||||
}
|
||||
|
||||
func (e *Common) setName(s string) {
|
||||
e.name = s
|
||||
}
|
||||
|
||||
func (e *Common) enclosing() Elem {
|
||||
return e.enclElem
|
||||
}
|
||||
|
||||
func (e *Common) setEnclosing(en Elem) {
|
||||
e.enclElem = en
|
||||
}
|
||||
|
||||
// Escape characters that can be escaped without further escaping the string.
|
||||
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
|
||||
|
||||
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
|
||||
// It assumes the input string is correctly formatted.
|
||||
func replaceUnicode(s string) string {
|
||||
if s[1] == '#' {
|
||||
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
|
||||
return string(r)
|
||||
}
|
||||
r, _, _, _ := strconv.UnquoteChar(s, 0)
|
||||
return string(r)
|
||||
}
|
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run makexml.go -output xml.go
|
||||
|
||||
// Package cldr provides a parser for LDML and related XML formats.
|
||||
// This package is intended to be used by the table generation tools
|
||||
// for the various internationalization-related packages.
|
||||
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
|
||||
// is periodically amended, this package may change considerably over time.
|
||||
// This mostly means that data may appear and disappear between versions.
|
||||
// That is, old code should keep compiling for newer versions, but data
|
||||
// may have moved or changed.
|
||||
// CLDR version 22 is the first version supported by this package.
|
||||
// Older versions may not work.
|
||||
package cldr // import "golang.org/x/text/unicode/cldr"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
|
||||
type CLDR struct {
|
||||
parent map[string][]string
|
||||
locale map[string]*LDML
|
||||
resolved map[string]*LDML
|
||||
bcp47 *LDMLBCP47
|
||||
supp *SupplementalData
|
||||
}
|
||||
|
||||
func makeCLDR() *CLDR {
|
||||
return &CLDR{
|
||||
parent: make(map[string][]string),
|
||||
locale: make(map[string]*LDML),
|
||||
resolved: make(map[string]*LDML),
|
||||
bcp47: &LDMLBCP47{},
|
||||
supp: &SupplementalData{},
|
||||
}
|
||||
}
|
||||
|
||||
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
|
||||
func (cldr *CLDR) BCP47() *LDMLBCP47 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Draft indicates the draft level of an element.
|
||||
type Draft int
|
||||
|
||||
const (
|
||||
Approved Draft = iota
|
||||
Contributed
|
||||
Provisional
|
||||
Unconfirmed
|
||||
)
|
||||
|
||||
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
|
||||
|
||||
// ParseDraft returns the Draft value corresponding to the given string. The
|
||||
// empty string corresponds to Approved.
|
||||
func ParseDraft(level string) (Draft, error) {
|
||||
if level == "" {
|
||||
return Approved, nil
|
||||
}
|
||||
for i, s := range drafts {
|
||||
if level == s {
|
||||
return Unconfirmed - Draft(i), nil
|
||||
}
|
||||
}
|
||||
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
|
||||
}
|
||||
|
||||
func (d Draft) String() string {
|
||||
return drafts[len(drafts)-1-int(d)]
|
||||
}
|
||||
|
||||
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
|
||||
// Any draft element for which the draft level is higher than lev will be excluded.
|
||||
// If multiple draft levels are available for a single element, the one with the
|
||||
// lowest draft level will be selected, unless preferDraft is true, in which case
|
||||
// the highest draft will be chosen.
|
||||
// It is assumed that the underlying LDML is canonicalized.
|
||||
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
|
||||
// TODO: implement
|
||||
cldr.resolved = make(map[string]*LDML)
|
||||
}
|
||||
|
||||
// RawLDML returns the LDML XML for id in unresolved form.
|
||||
// id must be one of the strings returned by Locales.
|
||||
func (cldr *CLDR) RawLDML(loc string) *LDML {
|
||||
return cldr.locale[loc]
|
||||
}
|
||||
|
||||
// LDML returns the fully resolved LDML XML for loc, which must be one of
|
||||
// the strings returned by Locales.
|
||||
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
|
||||
return cldr.resolve(loc)
|
||||
}
|
||||
|
||||
// Supplemental returns the parsed supplemental data. If no such data was parsed,
|
||||
// nil is returned.
|
||||
func (cldr *CLDR) Supplemental() *SupplementalData {
|
||||
return cldr.supp
|
||||
}
|
||||
|
||||
// Locales returns the locales for which there exist files.
|
||||
// Valid sublocales for which there is no file are not included.
|
||||
// The root locale is always sorted first.
|
||||
func (cldr *CLDR) Locales() []string {
|
||||
loc := []string{"root"}
|
||||
hasRoot := false
|
||||
for l, _ := range cldr.locale {
|
||||
if l == "root" {
|
||||
hasRoot = true
|
||||
continue
|
||||
}
|
||||
loc = append(loc, l)
|
||||
}
|
||||
sort.Strings(loc[1:])
|
||||
if !hasRoot {
|
||||
return loc[1:]
|
||||
}
|
||||
return loc
|
||||
}
|
||||
|
||||
// Get fills in the fields of x based on the XPath path.
|
||||
func Get(e Elem, path string) (res Elem, err error) {
|
||||
return walkXPath(e, path)
|
||||
}
|
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
@ -1,359 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// RuleProcessor can be passed to Collator's Process method, which
|
||||
// parses the rules and calls the respective method for each rule found.
|
||||
type RuleProcessor interface {
|
||||
Reset(anchor string, before int) error
|
||||
Insert(level int, str, context, extend string) error
|
||||
Index(id string)
|
||||
}
|
||||
|
||||
const (
|
||||
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
|
||||
// of a grouping within an index.
|
||||
// We ignore any rule that starts with this rune.
|
||||
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
|
||||
cldrIndex = "\uFDD0"
|
||||
|
||||
// specialAnchor is the format in which to represent logical reset positions,
|
||||
// such as "first tertiary ignorable".
|
||||
specialAnchor = "<%s/>"
|
||||
)
|
||||
|
||||
// Process parses the rules for the tailorings of this collation
|
||||
// and calls the respective methods of p for each rule found.
|
||||
func (c Collation) Process(p RuleProcessor) (err error) {
|
||||
if len(c.Cr) > 0 {
|
||||
if len(c.Cr) > 1 {
|
||||
return fmt.Errorf("multiple cr elements, want 0 or 1")
|
||||
}
|
||||
return processRules(p, c.Cr[0].Data())
|
||||
}
|
||||
if c.Rules.Any != nil {
|
||||
return c.processXML(p)
|
||||
}
|
||||
return errors.New("no tailoring data")
|
||||
}
|
||||
|
||||
// processRules parses rules in the Collation Rule Syntax defined in
|
||||
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
|
||||
func processRules(p RuleProcessor, s string) (err error) {
|
||||
chk := func(s string, e error) string {
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
return s
|
||||
}
|
||||
i := 0 // Save the line number for use after the loop.
|
||||
scanner := bufio.NewScanner(strings.NewReader(s))
|
||||
for ; scanner.Scan() && err == nil; i++ {
|
||||
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
|
||||
level := 5
|
||||
var ch byte
|
||||
switch ch, s = s[0], s[1:]; ch {
|
||||
case '&': // followed by <anchor> or '[' <key> ']'
|
||||
if s = skipSpace(s); consume(&s, '[') {
|
||||
s = chk(parseSpecialAnchor(p, s))
|
||||
} else {
|
||||
s = chk(parseAnchor(p, 0, s))
|
||||
}
|
||||
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
|
||||
for level = 1; consume(&s, '<'); level++ {
|
||||
}
|
||||
if level > 4 {
|
||||
err = fmt.Errorf("level %d > 4", level)
|
||||
}
|
||||
fallthrough
|
||||
case '=': // identity relation, optionally followed by *.
|
||||
if consume(&s, '*') {
|
||||
s = chk(parseSequence(p, level, s))
|
||||
} else {
|
||||
s = chk(parseOrder(p, level, s))
|
||||
}
|
||||
default:
|
||||
chk("", fmt.Errorf("illegal operator %q", ch))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if chk("", scanner.Err()); err != nil {
|
||||
return fmt.Errorf("%d: %v", i, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseSpecialAnchor parses the anchor syntax which is either of the form
|
||||
// ['before' <level>] <anchor>
|
||||
// or
|
||||
// [<label>]
|
||||
// The starting should already be consumed.
|
||||
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
|
||||
i := strings.IndexByte(s, ']')
|
||||
if i == -1 {
|
||||
return "", errors.New("unmatched bracket")
|
||||
}
|
||||
a := strings.TrimSpace(s[:i])
|
||||
s = s[i+1:]
|
||||
if strings.HasPrefix(a, "before ") {
|
||||
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return parseAnchor(p, int(l), s)
|
||||
}
|
||||
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
|
||||
}
|
||||
|
||||
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
anchor, s, err := scanString(s)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return s, p.Reset(anchor, level)
|
||||
}
|
||||
|
||||
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
var value, context, extend string
|
||||
if value, s, err = scanString(s); err != nil {
|
||||
return s, err
|
||||
}
|
||||
if strings.HasPrefix(value, cldrIndex) {
|
||||
p.Index(value[len(cldrIndex):])
|
||||
return
|
||||
}
|
||||
if consume(&s, '|') {
|
||||
if context, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after context")
|
||||
}
|
||||
}
|
||||
if consume(&s, '/') {
|
||||
if extend, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after extension")
|
||||
}
|
||||
}
|
||||
return s, p.Insert(level, value, context, extend)
|
||||
}
|
||||
|
||||
// scanString scans a single input string.
|
||||
func scanString(s string) (str, tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, s, errors.New("missing string")
|
||||
}
|
||||
buf := [16]byte{} // small but enough to hold most cases.
|
||||
value := buf[:0]
|
||||
for s != "" {
|
||||
if consume(&s, '\'') {
|
||||
i := strings.IndexByte(s, '\'')
|
||||
if i == -1 {
|
||||
return "", "", errors.New(`unmatched single quote`)
|
||||
}
|
||||
if i == 0 {
|
||||
value = append(value, '\'')
|
||||
} else {
|
||||
value = append(value, s[:i]...)
|
||||
}
|
||||
s = s[i+1:]
|
||||
continue
|
||||
}
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
|
||||
break
|
||||
}
|
||||
value = append(value, s[:sz]...)
|
||||
s = s[sz:]
|
||||
}
|
||||
return string(value), skipSpace(s), nil
|
||||
}
|
||||
|
||||
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, errors.New("empty sequence")
|
||||
}
|
||||
last := rune(0)
|
||||
for s != "" {
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
|
||||
if r == '-' {
|
||||
// We have a range. The first element was already written.
|
||||
if last == 0 {
|
||||
return s, errors.New("range without starter value")
|
||||
}
|
||||
r, sz = utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
if r == utf8.RuneError || r < last {
|
||||
return s, fmt.Errorf("invalid range %q-%q", last, r)
|
||||
}
|
||||
for i := last + 1; i <= r; i++ {
|
||||
if err := p.Insert(level, string(i), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
}
|
||||
last = 0
|
||||
continue
|
||||
}
|
||||
|
||||
if unicode.IsSpace(r) || unicode.IsPunct(r) {
|
||||
break
|
||||
}
|
||||
|
||||
// normal case
|
||||
if err := p.Insert(level, string(r), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
last = r
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func skipSpace(s string) string {
|
||||
return strings.TrimLeftFunc(s, unicode.IsSpace)
|
||||
}
|
||||
|
||||
// consumes returns whether the next byte is ch. If so, it gobbles it by
|
||||
// updating s.
|
||||
func consume(s *string, ch byte) (ok bool) {
|
||||
if *s == "" || (*s)[0] != ch {
|
||||
return false
|
||||
}
|
||||
*s = (*s)[1:]
|
||||
return true
|
||||
}
|
||||
|
||||
// The following code parses Collation rules of CLDR version 24 and before.
|
||||
|
||||
var lmap = map[byte]int{
|
||||
'p': 1,
|
||||
's': 2,
|
||||
't': 3,
|
||||
'i': 5,
|
||||
}
|
||||
|
||||
type rulesElem struct {
|
||||
Rules struct {
|
||||
Common
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
} `xml:"rules"`
|
||||
}
|
||||
|
||||
type rule struct {
|
||||
Value string `xml:",chardata"`
|
||||
Before string `xml:"before,attr"`
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
}
|
||||
|
||||
var emptyValueError = errors.New("cldr: empty rule value")
|
||||
|
||||
func (r *rule) value() (string, error) {
|
||||
// Convert hexadecimal Unicode codepoint notation to a string.
|
||||
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
|
||||
r.Value = s
|
||||
if s == "" {
|
||||
if len(r.Any) != 1 {
|
||||
return "", emptyValueError
|
||||
}
|
||||
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
|
||||
r.Any = nil
|
||||
} else if len(r.Any) != 0 {
|
||||
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
|
||||
}
|
||||
return r.Value, nil
|
||||
}
|
||||
|
||||
func (r rule) process(p RuleProcessor, name, context, extend string) error {
|
||||
v, err := r.value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch name {
|
||||
case "p", "s", "t", "i":
|
||||
if strings.HasPrefix(v, cldrIndex) {
|
||||
p.Index(v[len(cldrIndex):])
|
||||
return nil
|
||||
}
|
||||
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
case "pc", "sc", "tc", "ic":
|
||||
level := lmap[name[0]]
|
||||
for _, s := range v {
|
||||
if err := p.Insert(level, string(s), context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("cldr: unsupported tag: %q", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processXML parses the format of CLDR versions 24 and older.
|
||||
func (c Collation) processXML(p RuleProcessor) (err error) {
|
||||
// Collation is generated and defined in xml.go.
|
||||
var v string
|
||||
for _, r := range c.Rules.Any {
|
||||
switch r.XMLName.Local {
|
||||
case "reset":
|
||||
level := 0
|
||||
switch r.Before {
|
||||
case "primary", "1":
|
||||
level = 1
|
||||
case "secondary", "2":
|
||||
level = 2
|
||||
case "tertiary", "3":
|
||||
level = 3
|
||||
case "":
|
||||
default:
|
||||
return fmt.Errorf("cldr: unknown level %q", r.Before)
|
||||
}
|
||||
v, err = r.value()
|
||||
if err == nil {
|
||||
err = p.Reset(v, level)
|
||||
}
|
||||
case "x":
|
||||
var context, extend string
|
||||
for _, r1 := range r.Any {
|
||||
v, err = r1.value()
|
||||
switch r1.XMLName.Local {
|
||||
case "context":
|
||||
context = v
|
||||
case "extend":
|
||||
extend = v
|
||||
}
|
||||
}
|
||||
for _, r1 := range r.Any {
|
||||
if t := r1.XMLName.Local; t == "context" || t == "extend" {
|
||||
continue
|
||||
}
|
||||
r1.rule.process(p, r1.XMLName.Local, context, extend)
|
||||
}
|
||||
default:
|
||||
err = r.rule.process(p, r.XMLName.Local, "", "")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// A Decoder loads an archive of CLDR data.
|
||||
type Decoder struct {
|
||||
dirFilter []string
|
||||
sectionFilter []string
|
||||
loader Loader
|
||||
cldr *CLDR
|
||||
curLocale string
|
||||
}
|
||||
|
||||
// SetSectionFilter takes a list top-level LDML element names to which
|
||||
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
|
||||
func (d *Decoder) SetSectionFilter(filter ...string) {
|
||||
d.sectionFilter = filter
|
||||
// TODO: automatically set dir filter
|
||||
}
|
||||
|
||||
// SetDirFilter limits the loading of LDML XML files of the specied directories.
|
||||
// Note that sections may be split across directories differently for different CLDR versions.
|
||||
// For more robust code, use SetSectionFilter.
|
||||
func (d *Decoder) SetDirFilter(dir ...string) {
|
||||
d.dirFilter = dir
|
||||
}
|
||||
|
||||
// A Loader provides access to the files of a CLDR archive.
|
||||
type Loader interface {
|
||||
Len() int
|
||||
Path(i int) string
|
||||
Reader(i int) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
var fileRe = regexp.MustCompile(`.*[/\\](.*)[/\\](.*)\.xml`)
|
||||
|
||||
// Decode loads and decodes the files represented by l.
|
||||
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
|
||||
d.cldr = makeCLDR()
|
||||
for i := 0; i < l.Len(); i++ {
|
||||
fname := l.Path(i)
|
||||
if m := fileRe.FindStringSubmatch(fname); m != nil {
|
||||
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
|
||||
continue
|
||||
}
|
||||
var r io.Reader
|
||||
if r, err = l.Reader(i); err == nil {
|
||||
err = d.decode(m[1], m[2], r)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
d.cldr.finalize(d.sectionFilter)
|
||||
return d.cldr, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decode(dir, id string, r io.Reader) error {
|
||||
var v interface{}
|
||||
var l *LDML
|
||||
cldr := d.cldr
|
||||
switch {
|
||||
case dir == "supplemental":
|
||||
v = cldr.supp
|
||||
case dir == "transforms":
|
||||
return nil
|
||||
case dir == "bcp47":
|
||||
v = cldr.bcp47
|
||||
case dir == "validity":
|
||||
return nil
|
||||
default:
|
||||
ok := false
|
||||
if v, ok = cldr.locale[id]; !ok {
|
||||
l = &LDML{}
|
||||
v, cldr.locale[id] = l, l
|
||||
}
|
||||
}
|
||||
x := xml.NewDecoder(r)
|
||||
if err := x.Decode(v); err != nil {
|
||||
log.Printf("%s/%s: %v", dir, id, err)
|
||||
return err
|
||||
}
|
||||
if l != nil {
|
||||
if l.Identity == nil {
|
||||
return fmt.Errorf("%s/%s: missing identity element", dir, id)
|
||||
}
|
||||
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
|
||||
// is resolved.
|
||||
// path := strings.Split(id, "_")
|
||||
// if lang := l.Identity.Language.Type; lang != path[0] {
|
||||
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
|
||||
// }
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathLoader []string
|
||||
|
||||
func makePathLoader(path string) (pl pathLoader, err error) {
|
||||
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
|
||||
pl = append(pl, path)
|
||||
return err
|
||||
})
|
||||
return pl, err
|
||||
}
|
||||
|
||||
func (pl pathLoader) Len() int {
|
||||
return len(pl)
|
||||
}
|
||||
|
||||
func (pl pathLoader) Path(i int) string {
|
||||
return pl[i]
|
||||
}
|
||||
|
||||
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return os.Open(pl[i])
|
||||
}
|
||||
|
||||
// DecodePath loads CLDR data from the given path.
|
||||
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
|
||||
loader, err := makePathLoader(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(loader)
|
||||
}
|
||||
|
||||
type zipLoader struct {
|
||||
r *zip.Reader
|
||||
}
|
||||
|
||||
func (zl zipLoader) Len() int {
|
||||
return len(zl.r.File)
|
||||
}
|
||||
|
||||
func (zl zipLoader) Path(i int) string {
|
||||
return zl.r.File[i].Name
|
||||
}
|
||||
|
||||
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return zl.r.File[i].Open()
|
||||
}
|
||||
|
||||
// DecodeZip loads CLDR data from the zip archive for which r is the source.
|
||||
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(zipLoader{archive})
|
||||
}
|
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
@ -1,400 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This tool generates types for the various XML formats of CLDR.
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
var outputFile = flag.String("output", "xml.go", "output file name")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatal("Could not read zip file")
|
||||
}
|
||||
r.Close()
|
||||
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read zip archive: %v", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
version := gen.CLDRVersion()
|
||||
|
||||
for _, dtd := range files {
|
||||
for _, f := range z.File {
|
||||
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
|
||||
r, err := f.Open()
|
||||
failOnError(err)
|
||||
|
||||
b := makeBuilder(&buf, dtd)
|
||||
b.parseDTD(r)
|
||||
b.resolve(b.index[dtd.top[0]])
|
||||
b.write()
|
||||
if b.version != "" && version != b.version {
|
||||
println(f.Name)
|
||||
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
|
||||
fmt.Fprintf(&buf, "const Version = %q\n", version)
|
||||
|
||||
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
|
||||
}
|
||||
|
||||
func failOnError(err error) {
|
||||
if err != nil {
|
||||
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// configuration data per DTD type
|
||||
type dtd struct {
|
||||
file string // base file name
|
||||
root string // Go name of the root XML element
|
||||
top []string // create a different type for this section
|
||||
|
||||
skipElem []string // hard-coded or deprecated elements
|
||||
skipAttr []string // attributes to exclude
|
||||
predefined []string // hard-coded elements exist of the form <name>Elem
|
||||
forceRepeat []string // elements to make slices despite DTD
|
||||
}
|
||||
|
||||
var files = []dtd{
|
||||
{
|
||||
file: "ldmlBCP47",
|
||||
root: "LDMLBCP47",
|
||||
top: []string{"ldmlBCP47"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldmlSupplemental",
|
||||
root: "SupplementalData",
|
||||
top: []string{"supplementalData"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
forceRepeat: []string{
|
||||
"plurals", // data defined in plurals.xml and ordinals.xml
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldml",
|
||||
root: "LDML",
|
||||
top: []string{
|
||||
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
|
||||
},
|
||||
skipElem: []string{
|
||||
"cp", // not used anywhere
|
||||
"special", // not used anywhere
|
||||
"fallback", // deprecated, not used
|
||||
"alias", // in Common
|
||||
"default", // in Common
|
||||
},
|
||||
skipAttr: []string{
|
||||
"hiraganaQuarternary", // typo in DTD, correct version included as well
|
||||
},
|
||||
predefined: []string{"rules"},
|
||||
},
|
||||
}
|
||||
|
||||
var comments = map[string]string{
|
||||
"ldmlBCP47": `
|
||||
// LDMLBCP47 holds information on allowable values for various variables in LDML.
|
||||
`,
|
||||
"supplementalData": `
|
||||
// SupplementalData holds information relevant for internationalization
|
||||
// and proper use of CLDR, but that is not contained in the locale hierarchy.
|
||||
`,
|
||||
"ldml": `
|
||||
// LDML is the top-level type for locale-specific data.
|
||||
`,
|
||||
"collation": `
|
||||
// Collation contains rules that specify a certain sort-order,
|
||||
// as a tailoring of the root order.
|
||||
// The parsed rules are obtained by passing a RuleProcessor to Collation's
|
||||
// Process method.
|
||||
`,
|
||||
"calendar": `
|
||||
// Calendar specifies the fields used for formatting and parsing dates and times.
|
||||
// The month and quarter names are identified numerically, starting at 1.
|
||||
// The day (of the week) names are identified with short strings, since there is
|
||||
// no universally-accepted numeric designation.
|
||||
`,
|
||||
"dates": `
|
||||
// Dates contains information regarding the format and parsing of dates and times.
|
||||
`,
|
||||
"localeDisplayNames": `
|
||||
// LocaleDisplayNames specifies localized display names for for scripts, languages,
|
||||
// countries, currencies, and variants.
|
||||
`,
|
||||
"numbers": `
|
||||
// Numbers supplies information for formatting and parsing numbers and currencies.
|
||||
`,
|
||||
}
|
||||
|
||||
type element struct {
|
||||
name string // XML element name
|
||||
category string // elements contained by this element
|
||||
signature string // category + attrKey*
|
||||
|
||||
attr []*attribute // attributes supported by this element.
|
||||
sub []struct { // parsed and evaluated sub elements of this element.
|
||||
e *element
|
||||
repeat bool // true if the element needs to be a slice
|
||||
}
|
||||
|
||||
resolved bool // prevent multiple resolutions of this element.
|
||||
}
|
||||
|
||||
type attribute struct {
|
||||
name string
|
||||
key string
|
||||
list []string
|
||||
|
||||
tag string // Go tag
|
||||
}
|
||||
|
||||
var (
|
||||
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
|
||||
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
|
||||
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
|
||||
reToken = regexp.MustCompile(`\w\-`)
|
||||
)
|
||||
|
||||
// builder is used to read in the DTD files from CLDR and generate Go code
|
||||
// to be used with the encoding/xml package.
|
||||
type builder struct {
|
||||
w io.Writer
|
||||
index map[string]*element
|
||||
elem []*element
|
||||
info dtd
|
||||
version string
|
||||
}
|
||||
|
||||
func makeBuilder(w io.Writer, d dtd) builder {
|
||||
return builder{
|
||||
w: w,
|
||||
index: make(map[string]*element),
|
||||
elem: []*element{},
|
||||
info: d,
|
||||
}
|
||||
}
|
||||
|
||||
// parseDTD parses a DTD file.
|
||||
func (b *builder) parseDTD(r io.Reader) {
|
||||
for d := xml.NewDecoder(r); ; {
|
||||
t, err := d.Token()
|
||||
if t == nil {
|
||||
break
|
||||
}
|
||||
failOnError(err)
|
||||
dir, ok := t.(xml.Directive)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
m := reHead.FindSubmatch(dir)
|
||||
dir = dir[len(m[0]):]
|
||||
ename := string(m[2])
|
||||
el, elementFound := b.index[ename]
|
||||
switch string(m[1]) {
|
||||
case "ELEMENT":
|
||||
if elementFound {
|
||||
log.Fatal("parseDTD: duplicate entry for element %q", ename)
|
||||
}
|
||||
m := reElem.FindSubmatch(dir)
|
||||
if m == nil {
|
||||
log.Fatalf("parseDTD: invalid element %q", string(dir))
|
||||
}
|
||||
if len(m[0]) != len(dir) {
|
||||
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
|
||||
}
|
||||
s := string(m[1])
|
||||
el = &element{
|
||||
name: ename,
|
||||
category: s,
|
||||
}
|
||||
b.index[ename] = el
|
||||
case "ATTLIST":
|
||||
if !elementFound {
|
||||
log.Fatalf("parseDTD: unknown element %q", ename)
|
||||
}
|
||||
s := string(dir)
|
||||
m := reAttr.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
|
||||
}
|
||||
if m[4] == "FIXED" {
|
||||
b.version = m[5]
|
||||
} else {
|
||||
switch m[1] {
|
||||
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
|
||||
case "type", "choice":
|
||||
default:
|
||||
el.attr = append(el.attr, &attribute{
|
||||
name: m[1],
|
||||
key: s,
|
||||
list: reToken.FindAllString(m[3], -1),
|
||||
})
|
||||
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
|
||||
|
||||
// resolve takes a parsed element and converts it into structured data
|
||||
// that can be used to generate the XML code.
|
||||
func (b *builder) resolve(e *element) {
|
||||
if e.resolved {
|
||||
return
|
||||
}
|
||||
b.elem = append(b.elem, e)
|
||||
e.resolved = true
|
||||
s := e.category
|
||||
found := make(map[string]bool)
|
||||
sequenceStart := []int{}
|
||||
for len(s) > 0 {
|
||||
m := reCat.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatalf("%s: invalid category string %q", e.name, s)
|
||||
}
|
||||
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
|
||||
switch m[1] {
|
||||
case "":
|
||||
case "(":
|
||||
sequenceStart = append(sequenceStart, len(e.sub))
|
||||
case ")":
|
||||
if len(sequenceStart) == 0 {
|
||||
log.Fatalf("%s: unmatched closing parenthesis", e.name)
|
||||
}
|
||||
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
|
||||
e.sub[i].repeat = e.sub[i].repeat || repeat
|
||||
}
|
||||
sequenceStart = sequenceStart[:len(sequenceStart)-1]
|
||||
default:
|
||||
if in(b.info.skipElem, m[1]) {
|
||||
} else if sub, ok := b.index[m[1]]; ok {
|
||||
if !found[sub.name] {
|
||||
e.sub = append(e.sub, struct {
|
||||
e *element
|
||||
repeat bool
|
||||
}{sub, repeat})
|
||||
found[sub.name] = true
|
||||
b.resolve(sub)
|
||||
}
|
||||
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
|
||||
} else if m[1] != "EMPTY" {
|
||||
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
|
||||
}
|
||||
}
|
||||
s = s[len(m[0]):]
|
||||
}
|
||||
}
|
||||
|
||||
// return true if s is contained in set.
|
||||
func in(set []string, s string) bool {
|
||||
for _, v := range set {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var repl = strings.NewReplacer("-", " ", "_", " ")
|
||||
|
||||
// title puts the first character or each character following '_' in title case and
|
||||
// removes all occurrences of '_'.
|
||||
func title(s string) string {
|
||||
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
|
||||
}
|
||||
|
||||
// writeElem generates Go code for a single element, recursively.
|
||||
func (b *builder) writeElem(tab int, e *element) {
|
||||
p := func(f string, x ...interface{}) {
|
||||
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
|
||||
fmt.Fprintf(b.w, f, x...)
|
||||
}
|
||||
if len(e.sub) == 0 && len(e.attr) == 0 {
|
||||
p("Common")
|
||||
return
|
||||
}
|
||||
p("struct {")
|
||||
tab++
|
||||
p("\nCommon")
|
||||
for _, attr := range e.attr {
|
||||
if !in(b.info.skipAttr, attr.name) {
|
||||
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
|
||||
}
|
||||
}
|
||||
for _, sub := range e.sub {
|
||||
if in(b.info.predefined, sub.e.name) {
|
||||
p("\n%sElem", sub.e.name)
|
||||
continue
|
||||
}
|
||||
if in(b.info.skipElem, sub.e.name) {
|
||||
continue
|
||||
}
|
||||
p("\n%s ", title(sub.e.name))
|
||||
if sub.repeat {
|
||||
p("[]")
|
||||
}
|
||||
p("*")
|
||||
if in(b.info.top, sub.e.name) {
|
||||
p(title(sub.e.name))
|
||||
} else {
|
||||
b.writeElem(tab, sub.e)
|
||||
}
|
||||
p(" `xml:\"%s\"`", sub.e.name)
|
||||
}
|
||||
tab--
|
||||
p("\n}")
|
||||
}
|
||||
|
||||
// write generates the Go XML code.
|
||||
func (b *builder) write() {
|
||||
for i, name := range b.info.top {
|
||||
e := b.index[name]
|
||||
if e != nil {
|
||||
fmt.Fprintf(b.w, comments[name])
|
||||
name := title(e.name)
|
||||
if i == 0 {
|
||||
name = b.info.root
|
||||
}
|
||||
fmt.Fprintf(b.w, "type %s ", name)
|
||||
b.writeElem(0, e)
|
||||
fmt.Fprint(b.w, "\n")
|
||||
}
|
||||
}
|
||||
}
|
602
vendor/golang.org/x/text/unicode/cldr/resolve.go
generated
vendored
602
vendor/golang.org/x/text/unicode/cldr/resolve.go
generated
vendored
@ -1,602 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
// This file implements the various inheritance constructs defined by LDML.
|
||||
// See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
|
||||
// for more details.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// fieldIter iterates over fields in a struct. It includes
|
||||
// fields of embedded structs.
|
||||
type fieldIter struct {
|
||||
v reflect.Value
|
||||
index, n []int
|
||||
}
|
||||
|
||||
func iter(v reflect.Value) fieldIter {
|
||||
if v.Kind() != reflect.Struct {
|
||||
log.Panicf("value %v must be a struct", v)
|
||||
}
|
||||
i := fieldIter{
|
||||
v: v,
|
||||
index: []int{0},
|
||||
n: []int{v.NumField()},
|
||||
}
|
||||
i.descent()
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *fieldIter) descent() {
|
||||
for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
|
||||
i.index = append(i.index, 0)
|
||||
i.n = append(i.n, f.Type.NumField())
|
||||
}
|
||||
}
|
||||
|
||||
func (i *fieldIter) done() bool {
|
||||
return len(i.index) == 1 && i.index[0] >= i.n[0]
|
||||
}
|
||||
|
||||
func skip(f reflect.StructField) bool {
|
||||
return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
|
||||
}
|
||||
|
||||
func (i *fieldIter) next() {
|
||||
for {
|
||||
k := len(i.index) - 1
|
||||
i.index[k]++
|
||||
if i.index[k] < i.n[k] {
|
||||
if !skip(i.field()) {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if k == 0 {
|
||||
return
|
||||
}
|
||||
i.index = i.index[:k]
|
||||
i.n = i.n[:k]
|
||||
}
|
||||
}
|
||||
i.descent()
|
||||
}
|
||||
|
||||
func (i *fieldIter) value() reflect.Value {
|
||||
return i.v.FieldByIndex(i.index)
|
||||
}
|
||||
|
||||
func (i *fieldIter) field() reflect.StructField {
|
||||
return i.v.Type().FieldByIndex(i.index)
|
||||
}
|
||||
|
||||
type visitor func(v reflect.Value) error
|
||||
|
||||
var stopDescent = fmt.Errorf("do not recurse")
|
||||
|
||||
func (f visitor) visit(x interface{}) error {
|
||||
return f.visitRec(reflect.ValueOf(x))
|
||||
}
|
||||
|
||||
// visit recursively calls f on all nodes in v.
|
||||
func (f visitor) visitRec(v reflect.Value) error {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
return f.visitRec(v.Elem())
|
||||
}
|
||||
if err := f(v); err != nil {
|
||||
if err == stopDescent {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if err := f.visitRec(i.value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := f.visitRec(v.Index(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPath is used for error reporting purposes only.
|
||||
func getPath(e Elem) string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
if e.enclosing() == nil {
|
||||
return e.GetCommon().name
|
||||
}
|
||||
if e.GetCommon().Type == "" {
|
||||
return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
|
||||
}
|
||||
|
||||
// xmlName returns the xml name of the element or attribute
|
||||
func xmlName(f reflect.StructField) (name string, attr bool) {
|
||||
tags := strings.Split(f.Tag.Get("xml"), ",")
|
||||
for _, s := range tags {
|
||||
attr = attr || s == "attr"
|
||||
}
|
||||
return tags[0], attr
|
||||
}
|
||||
|
||||
func findField(v reflect.Value, key string) (reflect.Value, error) {
|
||||
v = reflect.Indirect(v)
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if n, _ := xmlName(i.field()); n == key {
|
||||
return i.value(), nil
|
||||
}
|
||||
}
|
||||
return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
|
||||
}
|
||||
|
||||
var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
|
||||
|
||||
func walkXPath(e Elem, path string) (res Elem, err error) {
|
||||
for _, c := range strings.Split(path, "/") {
|
||||
if c == ".." {
|
||||
if e = e.enclosing(); e == nil {
|
||||
panic("path ..")
|
||||
return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
|
||||
}
|
||||
continue
|
||||
} else if c == "" {
|
||||
continue
|
||||
}
|
||||
m := xpathPart.FindStringSubmatch(c)
|
||||
if len(m) == 0 || len(m[0]) != len(c) {
|
||||
return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
|
||||
}
|
||||
v, err := findField(reflect.ValueOf(e), m[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
i := 0
|
||||
if m[2] != "" || v.Len() > 1 {
|
||||
if m[2] == "" {
|
||||
m[2] = "type"
|
||||
if m[3] = e.GetCommon().Default(); m[3] == "" {
|
||||
return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
|
||||
}
|
||||
}
|
||||
for ; i < v.Len(); i++ {
|
||||
vi := v.Index(i)
|
||||
key, err := findField(vi.Elem(), m[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = reflect.Indirect(key)
|
||||
if key.Kind() == reflect.String && key.String() == m[3] {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if i == v.Len() || v.Index(i).IsNil() {
|
||||
return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
|
||||
}
|
||||
e = v.Index(i).Interface().(Elem)
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
|
||||
}
|
||||
var ok bool
|
||||
if e, ok = v.Interface().(Elem); !ok {
|
||||
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
|
||||
} else if m[2] != "" || m[3] != "" {
|
||||
return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
const absPrefix = "//ldml/"
|
||||
|
||||
func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
|
||||
if src != "locale" {
|
||||
if !strings.HasPrefix(path, absPrefix) {
|
||||
return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
|
||||
}
|
||||
path = path[len(absPrefix):]
|
||||
if e, err = cldr.resolve(src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return walkXPath(e, path)
|
||||
}
|
||||
|
||||
func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
|
||||
alias := e.GetCommon().Alias
|
||||
if alias == nil {
|
||||
return nil
|
||||
}
|
||||
a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
|
||||
}
|
||||
// Ensure alias node was already evaluated. TODO: avoid double evaluation.
|
||||
err = cldr.resolveAndMergeAlias(a)
|
||||
v := reflect.ValueOf(e).Elem()
|
||||
for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
|
||||
if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
|
||||
if _, attr := xmlName(i.field()); !attr {
|
||||
v.FieldByIndex(i.index).Set(vv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cldr *CLDR) aliasResolver() visitor {
|
||||
return func(v reflect.Value) (err error) {
|
||||
if e, ok := v.Addr().Interface().(Elem); ok {
|
||||
err = cldr.resolveAndMergeAlias(e)
|
||||
if err == nil && blocking[e.GetCommon().name] {
|
||||
return stopDescent
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// elements within blocking elements do not inherit.
|
||||
// Taken from CLDR's supplementalMetaData.xml.
|
||||
var blocking = map[string]bool{
|
||||
"identity": true,
|
||||
"supplementalData": true,
|
||||
"cldrTest": true,
|
||||
"collation": true,
|
||||
"transform": true,
|
||||
}
|
||||
|
||||
// Distinguishing attributes affect inheritance; two elements with different
|
||||
// distinguishing attributes are treated as different for purposes of inheritance,
|
||||
// except when such attributes occur in the indicated elements.
|
||||
// Taken from CLDR's supplementalMetaData.xml.
|
||||
var distinguishing = map[string][]string{
|
||||
"key": nil,
|
||||
"request_id": nil,
|
||||
"id": nil,
|
||||
"registry": nil,
|
||||
"alt": nil,
|
||||
"iso4217": nil,
|
||||
"iso3166": nil,
|
||||
"mzone": nil,
|
||||
"from": nil,
|
||||
"to": nil,
|
||||
"type": []string{
|
||||
"abbreviationFallback",
|
||||
"default",
|
||||
"mapping",
|
||||
"measurementSystem",
|
||||
"preferenceOrdering",
|
||||
},
|
||||
"numberSystem": nil,
|
||||
}
|
||||
|
||||
func in(set []string, s string) bool {
|
||||
for _, v := range set {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// attrKey computes a key based on the distinguishable attributes of
|
||||
// an element and it's values.
|
||||
func attrKey(v reflect.Value, exclude ...string) string {
|
||||
parts := []string{}
|
||||
ename := v.Interface().(Elem).GetCommon().name
|
||||
v = v.Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if name, attr := xmlName(i.field()); attr {
|
||||
if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
|
||||
v := i.value()
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.IsValid() {
|
||||
parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, ";")
|
||||
}
|
||||
|
||||
// Key returns a key for e derived from all distinguishing attributes
|
||||
// except those specified by exclude.
|
||||
func Key(e Elem, exclude ...string) string {
|
||||
return attrKey(reflect.ValueOf(e), exclude...)
|
||||
}
|
||||
|
||||
// linkEnclosing sets the enclosing element as well as the name
|
||||
// for all sub-elements of child, recursively.
|
||||
func linkEnclosing(parent, child Elem) {
|
||||
child.setEnclosing(parent)
|
||||
v := reflect.ValueOf(child).Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
if vf.Kind() == reflect.Slice {
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
linkEnclosing(child, vf.Index(j).Interface().(Elem))
|
||||
}
|
||||
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
|
||||
linkEnclosing(child, vf.Interface().(Elem))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setNames(e Elem, name string) {
|
||||
e.setName(name)
|
||||
v := reflect.ValueOf(e).Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
name, _ = xmlName(i.field())
|
||||
if vf.Kind() == reflect.Slice {
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
setNames(vf.Index(j).Interface().(Elem), name)
|
||||
}
|
||||
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
|
||||
setNames(vf.Interface().(Elem), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deepCopy copies elements of v recursively. All elements of v that may
|
||||
// be modified by inheritance are explicitly copied.
|
||||
func deepCopy(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() || v.Elem().Kind() != reflect.Struct {
|
||||
return v
|
||||
}
|
||||
nv := reflect.New(v.Elem().Type())
|
||||
nv.Elem().Set(v.Elem())
|
||||
deepCopyRec(nv.Elem(), v.Elem())
|
||||
return nv
|
||||
case reflect.Slice:
|
||||
nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
deepCopyRec(nv.Index(i), v.Index(i))
|
||||
}
|
||||
return nv
|
||||
}
|
||||
panic("deepCopy: must be called with pointer or slice")
|
||||
}
|
||||
|
||||
// deepCopyRec is only called by deepCopy.
|
||||
func deepCopyRec(nv, v reflect.Value) {
|
||||
if v.Kind() == reflect.Struct {
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if name, attr := xmlName(t.Field(i)); name != "" && !attr {
|
||||
deepCopyRec(nv.Field(i), v.Field(i))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nv.Set(deepCopy(v))
|
||||
}
|
||||
}
|
||||
|
||||
// newNode is used to insert a missing node during inheritance.
|
||||
func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
|
||||
n := reflect.New(v.Type())
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if name, attr := xmlName(i.field()); name == "" || attr {
|
||||
n.Elem().FieldByIndex(i.index).Set(i.value())
|
||||
}
|
||||
}
|
||||
n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
|
||||
return n
|
||||
}
|
||||
|
||||
// v, parent must be pointers to struct
|
||||
func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
|
||||
t := v.Type()
|
||||
nv := reflect.New(t)
|
||||
nv.Elem().Set(v)
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
f := i.field()
|
||||
name, attr := xmlName(f)
|
||||
if name == "" || attr {
|
||||
continue
|
||||
}
|
||||
pf := parent.FieldByIndex(i.index)
|
||||
if blocking[name] {
|
||||
if vf.IsNil() {
|
||||
vf = pf
|
||||
}
|
||||
nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
|
||||
continue
|
||||
}
|
||||
switch f.Type.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.Type.Elem().Kind() == reflect.Struct {
|
||||
if !vf.IsNil() {
|
||||
if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
} else if !pf.IsNil() {
|
||||
n := cldr.newNode(pf.Elem(), v)
|
||||
if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
|
||||
if err != nil {
|
||||
return reflect.Zero(t), err
|
||||
}
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
}
|
||||
}
|
||||
return nv, nil
|
||||
}
|
||||
|
||||
func root(e Elem) *LDML {
|
||||
for ; e.enclosing() != nil; e = e.enclosing() {
|
||||
}
|
||||
return e.(*LDML)
|
||||
}
|
||||
|
||||
// inheritStructPtr first merges possible aliases in with v and then inherits
|
||||
// any underspecified elements from parent.
|
||||
func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
|
||||
if !v.IsNil() {
|
||||
e := v.Interface().(Elem).GetCommon()
|
||||
alias := e.Alias
|
||||
if alias == nil && !parent.IsNil() {
|
||||
alias = parent.Interface().(Elem).GetCommon().Alias
|
||||
}
|
||||
if alias != nil {
|
||||
a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
|
||||
if a != nil {
|
||||
if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !parent.IsNil() {
|
||||
return cldr.inheritFields(v.Elem(), parent.Elem())
|
||||
}
|
||||
} else if parent.IsNil() {
|
||||
panic("should not reach here")
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Must be slice of struct pointers.
|
||||
func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
|
||||
t := v.Type()
|
||||
index := make(map[string]reflect.Value)
|
||||
if !v.IsNil() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
vi := v.Index(i)
|
||||
key := attrKey(vi)
|
||||
index[key] = vi
|
||||
}
|
||||
}
|
||||
if !parent.IsNil() {
|
||||
for i := 0; i < parent.Len(); i++ {
|
||||
vi := parent.Index(i)
|
||||
key := attrKey(vi)
|
||||
if w, ok := index[key]; ok {
|
||||
index[key], err = cldr.inheritStructPtr(w, vi)
|
||||
} else {
|
||||
n := cldr.newNode(vi.Elem(), enc)
|
||||
index[key], err = cldr.inheritStructPtr(n, vi)
|
||||
}
|
||||
index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
}
|
||||
}
|
||||
keys := make([]string, 0, len(index))
|
||||
for k, _ := range index {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
sl := reflect.MakeSlice(t, len(index), len(index))
|
||||
for i, k := range keys {
|
||||
sl.Index(i).Set(index[k])
|
||||
}
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
func parentLocale(loc string) string {
|
||||
parts := strings.Split(loc, "_")
|
||||
if len(parts) == 1 {
|
||||
return "root"
|
||||
}
|
||||
parts = parts[:len(parts)-1]
|
||||
key := strings.Join(parts, "_")
|
||||
return key
|
||||
}
|
||||
|
||||
func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
|
||||
if r := cldr.resolved[loc]; r != nil {
|
||||
return r, nil
|
||||
}
|
||||
x := cldr.RawLDML(loc)
|
||||
if x == nil {
|
||||
return nil, fmt.Errorf("cldr: unknown locale %q", loc)
|
||||
}
|
||||
var v reflect.Value
|
||||
if loc == "root" {
|
||||
x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
|
||||
linkEnclosing(nil, x)
|
||||
err = cldr.aliasResolver().visit(x)
|
||||
} else {
|
||||
key := parentLocale(loc)
|
||||
var parent *LDML
|
||||
for ; cldr.locale[key] == nil; key = parentLocale(key) {
|
||||
}
|
||||
if parent, err = cldr.resolve(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
|
||||
x = v.Interface().(*LDML)
|
||||
linkEnclosing(nil, x)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cldr.resolved[loc] = x
|
||||
return x, err
|
||||
}
|
||||
|
||||
// finalize finalizes the initialization of the raw LDML structs. It also
|
||||
// removed unwanted fields, as specified by filter, so that they will not
|
||||
// be unnecessarily evaluated.
|
||||
func (cldr *CLDR) finalize(filter []string) {
|
||||
for _, x := range cldr.locale {
|
||||
if filter != nil {
|
||||
v := reflect.ValueOf(x).Elem()
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
name, _ := xmlName(f)
|
||||
if name != "" && name != "identity" && !in(filter, name) {
|
||||
v.Field(i).Set(reflect.Zero(f.Type))
|
||||
}
|
||||
}
|
||||
}
|
||||
linkEnclosing(nil, x) // for resolving aliases and paths
|
||||
setNames(x, "ldml")
|
||||
}
|
||||
}
|
144
vendor/golang.org/x/text/unicode/cldr/slice.go
generated
vendored
144
vendor/golang.org/x/text/unicode/cldr/slice.go
generated
vendored
@ -1,144 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Slice provides utilities for modifying slices of elements.
|
||||
// It can be wrapped around any slice of which the element type implements
|
||||
// interface Elem.
|
||||
type Slice struct {
|
||||
ptr reflect.Value
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
// Value returns the reflect.Value of the underlying slice.
|
||||
func (s *Slice) Value() reflect.Value {
|
||||
return s.ptr.Elem()
|
||||
}
|
||||
|
||||
// MakeSlice wraps a pointer to a slice of Elems.
|
||||
// It replaces the array pointed to by the slice so that subsequent modifications
|
||||
// do not alter the data in a CLDR type.
|
||||
// It panics if an incorrect type is passed.
|
||||
func MakeSlice(slicePtr interface{}) Slice {
|
||||
ptr := reflect.ValueOf(slicePtr)
|
||||
if ptr.Kind() != reflect.Ptr {
|
||||
panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
|
||||
}
|
||||
sl := ptr.Elem()
|
||||
if sl.Kind() != reflect.Slice {
|
||||
panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
|
||||
}
|
||||
intf := reflect.TypeOf((*Elem)(nil)).Elem()
|
||||
if !sl.Type().Elem().Implements(intf) {
|
||||
panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
|
||||
}
|
||||
nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
|
||||
reflect.Copy(nsl, sl)
|
||||
sl.Set(nsl)
|
||||
return Slice{
|
||||
ptr: ptr,
|
||||
typ: sl.Type().Elem().Elem(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s Slice) indexForAttr(a string) []int {
|
||||
for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
|
||||
if n, _ := xmlName(i.field()); n == a {
|
||||
return i.index
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
|
||||
}
|
||||
|
||||
// Filter filters s to only include elements for which fn returns true.
|
||||
func (s Slice) Filter(fn func(e Elem) bool) {
|
||||
k := 0
|
||||
sl := s.Value()
|
||||
for i := 0; i < sl.Len(); i++ {
|
||||
vi := sl.Index(i)
|
||||
if fn(vi.Interface().(Elem)) {
|
||||
sl.Index(k).Set(vi)
|
||||
k++
|
||||
}
|
||||
}
|
||||
sl.Set(sl.Slice(0, k))
|
||||
}
|
||||
|
||||
// Group finds elements in s for which fn returns the same value and groups
|
||||
// them in a new Slice.
|
||||
func (s Slice) Group(fn func(e Elem) string) []Slice {
|
||||
m := make(map[string][]reflect.Value)
|
||||
sl := s.Value()
|
||||
for i := 0; i < sl.Len(); i++ {
|
||||
vi := sl.Index(i)
|
||||
key := fn(vi.Interface().(Elem))
|
||||
m[key] = append(m[key], vi)
|
||||
}
|
||||
keys := []string{}
|
||||
for k, _ := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
res := []Slice{}
|
||||
for _, k := range keys {
|
||||
nsl := reflect.New(sl.Type())
|
||||
nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
|
||||
res = append(res, MakeSlice(nsl.Interface()))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// SelectAnyOf filters s to contain only elements for which attr matches
|
||||
// any of the values.
|
||||
func (s Slice) SelectAnyOf(attr string, values ...string) {
|
||||
index := s.indexForAttr(attr)
|
||||
s.Filter(func(e Elem) bool {
|
||||
vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
|
||||
return in(values, vf.String())
|
||||
})
|
||||
}
|
||||
|
||||
// SelectOnePerGroup filters s to include at most one element e per group of
|
||||
// elements matching Key(attr), where e has an attribute a that matches any
|
||||
// the values in v.
|
||||
// If more than one element in a group matches a value in v preference
|
||||
// is given to the element that matches the first value in v.
|
||||
func (s Slice) SelectOnePerGroup(a string, v []string) {
|
||||
index := s.indexForAttr(a)
|
||||
grouped := s.Group(func(e Elem) string { return Key(e, a) })
|
||||
sl := s.Value()
|
||||
sl.Set(sl.Slice(0, 0))
|
||||
for _, g := range grouped {
|
||||
e := reflect.Value{}
|
||||
found := len(v)
|
||||
gsl := g.Value()
|
||||
for i := 0; i < gsl.Len(); i++ {
|
||||
vi := gsl.Index(i).Elem().FieldByIndex(index)
|
||||
j := 0
|
||||
for ; j < len(v) && v[j] != vi.String(); j++ {
|
||||
}
|
||||
if j < found {
|
||||
found = j
|
||||
e = gsl.Index(i)
|
||||
}
|
||||
}
|
||||
if found < len(v) {
|
||||
sl.Set(reflect.Append(sl, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SelectDraft drops all elements from the list with a draft level smaller than d
|
||||
// and selects the highest draft level of the remaining.
|
||||
// This method assumes that the input CLDR is canonicalized.
|
||||
func (s Slice) SelectDraft(d Draft) {
|
||||
s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
|
||||
}
|
1494
vendor/golang.org/x/text/unicode/cldr/xml.go
generated
vendored
1494
vendor/golang.org/x/text/unicode/cldr/xml.go
generated
vendored
File diff suppressed because it is too large
Load Diff
44
vendor/modules.txt
vendored
Normal file
44
vendor/modules.txt
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
# github.com/davecgh/go-spew v1.1.1
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/kirillDanshin/dlog v0.0.0-20170728000807-97d876b12bf9
|
||||
github.com/kirillDanshin/dlog
|
||||
# github.com/kirillDanshin/myutils v0.0.0-20160713214838-182269b1fbcc
|
||||
github.com/kirillDanshin/myutils
|
||||
# github.com/klauspost/compress v1.4.0
|
||||
github.com/klauspost/compress/flate
|
||||
github.com/klauspost/compress/gzip
|
||||
github.com/klauspost/compress/zlib
|
||||
# github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e
|
||||
github.com/klauspost/cpuid
|
||||
# github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
||||
github.com/pquerna/ffjson/ffjson
|
||||
github.com/pquerna/ffjson/fflib/v1
|
||||
github.com/pquerna/ffjson/fflib/v1/internal
|
||||
# github.com/rs/zerolog v1.11.0
|
||||
github.com/rs/zerolog
|
||||
github.com/rs/zerolog/internal/cbor
|
||||
github.com/rs/zerolog/internal/json
|
||||
# github.com/valyala/bytebufferpool v1.0.0
|
||||
github.com/valyala/bytebufferpool
|
||||
# github.com/valyala/fasthttp v1.0.0
|
||||
github.com/valyala/fasthttp
|
||||
github.com/valyala/fasthttp/fasthttputil
|
||||
github.com/valyala/fasthttp/stackless
|
||||
# gitlab.com/toby3d/telegram v0.0.0-20181012114749-b3f324e1b3aa
|
||||
gitlab.com/toby3d/telegram
|
||||
# golang.org/x/net v0.0.0-20181220203305-927f97764cc3
|
||||
golang.org/x/net/proxy
|
||||
golang.org/x/net/internal/socks
|
||||
# golang.org/x/text v0.3.0
|
||||
golang.org/x/text/language
|
||||
golang.org/x/text/message
|
||||
golang.org/x/text/internal/tag
|
||||
golang.org/x/text/feature/plural
|
||||
golang.org/x/text/internal/format
|
||||
golang.org/x/text/internal/number
|
||||
golang.org/x/text/message/catalog
|
||||
golang.org/x/text/internal/catmsg
|
||||
golang.org/x/text/internal
|
||||
golang.org/x/text/internal/stringset
|
||||
# gopkg.in/yaml.v2 v2.2.2
|
||||
gopkg.in/yaml.v2
|
Reference in New Issue
Block a user