Initial commit
This commit is contained in:
		
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| fw_zookeeper.yaml | ||||
							
								
								
									
										151
									
								
								Gopkg.lock
									
									
									
										generated
									
									
									
										Normal file
									
								
							
							
						
						
									
										151
									
								
								Gopkg.lock
									
									
									
										generated
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,151 @@ | ||||
| # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. | ||||
|  | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" | ||||
|   name = "github.com/davecgh/go-spew" | ||||
|   packages = ["spew"] | ||||
|   pruneopts = "UT" | ||||
|   revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" | ||||
|   version = "v1.1.1" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   digest = "1:5d27f9572f69e11b238ffd362a0e20ff8fc075cf33f3c147b281a077980616f3" | ||||
|   name = "github.com/kirillDanshin/dlog" | ||||
|   packages = ["."] | ||||
|   pruneopts = "UT" | ||||
|   revision = "97d876b12bf9f9e11dca34779fedbf017c636e87" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   digest = "1:e4cca162f88bcb8b428a0c2a0bc529196575e5c860f4ce4f61871c288c798c24" | ||||
|   name = "github.com/kirillDanshin/myutils" | ||||
|   packages = ["."] | ||||
|   pruneopts = "UT" | ||||
|   revision = "182269b1fbcc91a4bbed900124a49c92baa5b9d6" | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:aaa8e0e7e35d92e21daed3f241832cee73d15ca1cd3302ba3843159a959a7eac" | ||||
|   name = "github.com/klauspost/compress" | ||||
|   packages = [ | ||||
|     "flate", | ||||
|     "gzip", | ||||
|     "zlib", | ||||
|   ] | ||||
|   pruneopts = "UT" | ||||
|   revision = "30be6041bed523c18e269a700ebd9c2ea9328574" | ||||
|   version = "v1.4.1" | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:2d643962fac133904694fffa959bc3c5dcfdcee38c6f5ffdd99a3c93eb9c835c" | ||||
|   name = "github.com/klauspost/cpuid" | ||||
|   packages = ["."] | ||||
|   pruneopts = "UT" | ||||
|   revision = "e7e905edc00ea8827e58662220139109efea09db" | ||||
|   version = "v1.2.0" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   digest = "1:63987b971c0f3240a0f1eed70b80b360e6a57d7a8c85ed7daa8bf770319ab308" | ||||
|   name = "github.com/pquerna/ffjson" | ||||
|   packages = [ | ||||
|     "ffjson", | ||||
|     "fflib/v1", | ||||
|     "fflib/v1/internal", | ||||
|   ] | ||||
|   pruneopts = "UT" | ||||
|   revision = "e517b90714f7c0eabe6d2e570a5886ae077d6db6" | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:6112a5eaec2ec65df289ccbb7a730aaf03e3c5cce6c906d367ccf9b7ac567604" | ||||
|   name = "github.com/rs/zerolog" | ||||
|   packages = [ | ||||
|     ".", | ||||
|     "internal/cbor", | ||||
|     "internal/json", | ||||
|   ] | ||||
|   pruneopts = "UT" | ||||
|   revision = "8747b7b3a51b5d08ee7ac50eaf4869edaf9f714a" | ||||
|   version = "v1.11.0" | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:c468422f334a6b46a19448ad59aaffdfc0a36b08fdcc1c749a0b29b6453d7e59" | ||||
|   name = "github.com/valyala/bytebufferpool" | ||||
|   packages = ["."] | ||||
|   pruneopts = "UT" | ||||
|   revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" | ||||
|   version = "v1.0.0" | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:15ad8a80098fcc7a194b9db6b26d74072a852e4faa957848c8118193d3c69230" | ||||
|   name = "github.com/valyala/fasthttp" | ||||
|   packages = [ | ||||
|     ".", | ||||
|     "fasthttputil", | ||||
|     "stackless", | ||||
|   ] | ||||
|   pruneopts = "UT" | ||||
|   revision = "e5f51c11919d4f66400334047b897ef0a94c6f3c" | ||||
|   version = "v20180529" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "develop" | ||||
|   digest = "1:962c8f9e7e2c60f1f991a6f2f9090d315da010ec91361cfa14d4fdcf3ff92232" | ||||
|   name = "gitlab.com/toby3d/telegram" | ||||
|   packages = ["."] | ||||
|   pruneopts = "UT" | ||||
|   revision = "b3f324e1b3aa692425c23bc87df428ff7d2a492d" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   digest = "1:3fa70ba3ba75f47646d2a6ff518f46f3c4a215912eb6f9c26b6e956918038f01" | ||||
|   name = "golang.org/x/net" | ||||
|   packages = [ | ||||
|     "internal/socks", | ||||
|     "proxy", | ||||
|   ] | ||||
|   pruneopts = "UT" | ||||
|   revision = "fae4c4e3ad76c295c3d6d259f898136b4bf833a8" | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:b154eb17b54cec56332bb76d6b5cf1b23f96beaf19468d0da5e94fc737a9093d" | ||||
|   name = "golang.org/x/text" | ||||
|   packages = [ | ||||
|     "feature/plural", | ||||
|     "internal", | ||||
|     "internal/catmsg", | ||||
|     "internal/format", | ||||
|     "internal/gen", | ||||
|     "internal/number", | ||||
|     "internal/stringset", | ||||
|     "internal/tag", | ||||
|     "language", | ||||
|     "message", | ||||
|     "message/catalog", | ||||
|     "unicode/cldr", | ||||
|   ] | ||||
|   pruneopts = "UT" | ||||
|   revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" | ||||
|   version = "v0.3.0" | ||||
|  | ||||
| [[projects]] | ||||
|   digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" | ||||
|   name = "gopkg.in/yaml.v2" | ||||
|   packages = ["."] | ||||
|   pruneopts = "UT" | ||||
|   revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" | ||||
|   version = "v2.2.2" | ||||
|  | ||||
| [solve-meta] | ||||
|   analyzer-name = "dep" | ||||
|   analyzer-version = 1 | ||||
|   input-imports = [ | ||||
|     "github.com/rs/zerolog", | ||||
|     "github.com/valyala/fasthttp", | ||||
|     "gitlab.com/toby3d/telegram", | ||||
|     "golang.org/x/net/proxy", | ||||
|     "gopkg.in/yaml.v2", | ||||
|   ] | ||||
|   solver-name = "gps-cdcl" | ||||
|   solver-version = 1 | ||||
							
								
								
									
										34
									
								
								Gopkg.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								Gopkg.toml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| # Gopkg.toml example | ||||
| # | ||||
| # Refer to https://golang.github.io/dep/docs/Gopkg.toml.html | ||||
| # for detailed Gopkg.toml documentation. | ||||
| # | ||||
| # required = ["github.com/user/thing/cmd/thing"] | ||||
| # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] | ||||
| # | ||||
| # [[constraint]] | ||||
| #   name = "github.com/user/project" | ||||
| #   version = "1.0.0" | ||||
| # | ||||
| # [[constraint]] | ||||
| #   name = "github.com/user/project2" | ||||
| #   branch = "dev" | ||||
| #   source = "github.com/myfork/project2" | ||||
| # | ||||
| # [[override]] | ||||
| #   name = "github.com/x/y" | ||||
| #   version = "2.4.0" | ||||
| # | ||||
| # [prune] | ||||
| #   non-go = false | ||||
| #   go-tests = true | ||||
| #   unused-packages = true | ||||
|  | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "github.com/rs/zerolog" | ||||
|   version = "1.11.0" | ||||
|  | ||||
| [prune] | ||||
|   go-tests = true | ||||
|   unused-packages = true | ||||
							
								
								
									
										3
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| # Смотритель зоопарка Fantasy World | ||||
|  | ||||
| https://t.me/fw_zookeper_bot | ||||
							
								
								
									
										64
									
								
								context/context.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								context/context.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package context | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"github.com/rs/zerolog" | ||||
| 	"gopkg.in/yaml.v2" | ||||
| 	"io/ioutil" | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/config" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| ) | ||||
|  | ||||
| // getMemoryUsage returns memory usage for logger. | ||||
| func (c *Context) getMemoryUsage(e *zerolog.Event, level zerolog.Level, message string) { | ||||
| 	var m runtime.MemStats | ||||
| 	runtime.ReadMemStats(&m) | ||||
|  | ||||
| 	e.Str("memalloc", fmt.Sprintf("%dMB", m.Alloc/1024/1024)) | ||||
| 	e.Str("memsys", fmt.Sprintf("%dMB", m.Sys/1024/1024)) | ||||
| 	e.Str("numgc", fmt.Sprintf("%d", m.NumGC)) | ||||
| } | ||||
|  | ||||
| // Init is an initialization function for core context | ||||
| // Without these parts of the application we can't start at all | ||||
| func (c *Context) Init() { | ||||
| 	c.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout}).With().Timestamp().Logger() | ||||
| 	c.Logger = c.Logger.Hook(zerolog.HookFunc(c.getMemoryUsage)) | ||||
|  | ||||
| 	c.Logger.Info().Msgf("fw_zookeeper v. %s is starting...", VERSION) | ||||
| } | ||||
|  | ||||
| // InitConfiguration reads configuration from YAML and parses it in | ||||
| // config.Struct. | ||||
| func (c *Context) InitConfiguration() bool { | ||||
| 	c.Logger.Info().Msg("Loading configuration files...") | ||||
|  | ||||
| 	configPath := os.Getenv("BOT_CONFIG") | ||||
| 	if configPath == "" { | ||||
| 		configPath = "./example/fw_zookeeper.yaml" | ||||
| 	} | ||||
| 	normalizedConfigPath, _ := filepath.Abs(configPath) | ||||
| 	c.Logger.Debug().Msgf("Configuration file path: %s", normalizedConfigPath) | ||||
|  | ||||
| 	// Read configuration file into []byte. | ||||
| 	fileData, err := ioutil.ReadFile(normalizedConfigPath) | ||||
| 	if err != nil { | ||||
| 		c.Logger.Error().Err(err).Msg("Failed to read configuration file") | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	c.Config = &config.Struct{} | ||||
| 	err = yaml.Unmarshal(fileData, c.Config) | ||||
| 	if err != nil { | ||||
| 		c.Logger.Error().Err(err).Msg("Failed to parse configuration file") | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	c.Logger.Info().Msg("Configuration file parsed successfully") | ||||
| 	return true | ||||
| } | ||||
							
								
								
									
										24
									
								
								context/exported.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								context/exported.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package context | ||||
|  | ||||
| import ( | ||||
| 	"github.com/rs/zerolog" | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/config" | ||||
| ) | ||||
|  | ||||
| // VERSION is the current bot's version | ||||
| const VERSION = "0.0.1" | ||||
|  | ||||
| // Context is the main application context. | ||||
| type Context struct { | ||||
| 	Config *config.Struct | ||||
| 	Logger zerolog.Logger | ||||
| } | ||||
|  | ||||
| // NewContext is an initialization function for Context | ||||
| func NewContext() *Context { | ||||
| 	c := &Context{} | ||||
| 	return c | ||||
| } | ||||
							
								
								
									
										12
									
								
								internal/config/proxy.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								internal/config/proxy.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package config | ||||
|  | ||||
| // Proxy handles settings for Telegram SOCKS5 proxy | ||||
| type Proxy struct { | ||||
| 	Enabled  bool   `yaml:"enabled"` | ||||
| 	Address  string `yaml:"address,omitempty"` | ||||
| 	Username string `yaml:"username,omitempty"` | ||||
| 	Password string `yaml:"password,omitempty"` | ||||
| } | ||||
							
								
								
									
										10
									
								
								internal/config/struct.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								internal/config/struct.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package config | ||||
|  | ||||
| // Struct is a main configuration structure that holds all other | ||||
| // structs within. | ||||
| type Struct struct { | ||||
| 	Telegram Telegram `yaml:"telegram"` | ||||
| } | ||||
							
								
								
									
										11
									
								
								internal/config/telegram.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								internal/config/telegram.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package config | ||||
|  | ||||
| // Telegram represents bot's Telegram configuration | ||||
| type Telegram struct { | ||||
| 	Token   string  `yaml:"token"` | ||||
| 	Webhook Webhook `yaml:"webhook"` | ||||
| 	Proxy   Proxy   `yaml:"proxy"` | ||||
| } | ||||
							
								
								
									
										11
									
								
								internal/config/webhook.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								internal/config/webhook.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package config | ||||
|  | ||||
| // Webhook handles settings for Telegram webhook | ||||
| type Webhook struct { | ||||
| 	Enabled bool   `yaml:"enabled"` | ||||
| 	Domain  string `yaml:"domain,omitempty"` | ||||
| 	Listen  string `yaml:"listen,omitempty"` | ||||
| } | ||||
							
								
								
									
										45
									
								
								internal/router/exported.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								internal/router/exported.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,45 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package router | ||||
|  | ||||
| import ( | ||||
| 	"github.com/rs/zerolog" | ||||
| 	"gitlab.com/toby3d/telegram" | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/context" | ||||
| 	"regexp" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	c   *context.Context | ||||
| 	log zerolog.Logger | ||||
|  | ||||
| 	// Requests is a pointer to initialized Router object | ||||
| 	Requests *Router | ||||
| ) | ||||
|  | ||||
| // Router is a struct which handles router functions | ||||
| type Router struct { | ||||
| 	privateCommands map[string]func(update *telegram.Update) | ||||
| 	groupCommands   map[string]func(update *telegram.Update) | ||||
| 	privateRegulars map[*regexp.Regexp]func(update *telegram.Update) | ||||
| 	groupRegulars   map[*regexp.Regexp]func(update *telegram.Update) | ||||
| 	inlineQueries   map[*regexp.Regexp]func(update *telegram.Update) | ||||
| } | ||||
|  | ||||
| // New initializes package | ||||
| func New(cc *context.Context) { | ||||
| 	c = cc | ||||
| 	log = c.Logger.With().Str("domain", "router").Int("version", 1).Logger() | ||||
| 	r := &Router{} | ||||
|  | ||||
| 	r.privateCommands = make(map[string]func(update *telegram.Update)) | ||||
| 	r.groupCommands = make(map[string]func(update *telegram.Update)) | ||||
| 	r.privateRegulars = make(map[*regexp.Regexp]func(update *telegram.Update)) | ||||
| 	r.groupRegulars = make(map[*regexp.Regexp]func(update *telegram.Update)) | ||||
| 	r.inlineQueries = make(map[*regexp.Regexp]func(update *telegram.Update)) | ||||
|  | ||||
| 	log.Info().Msg("Initialized requests router") | ||||
|  | ||||
| 	Requests = r | ||||
| } | ||||
							
								
								
									
										146
									
								
								internal/router/router.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										146
									
								
								internal/router/router.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,146 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package router | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"gitlab.com/toby3d/telegram" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// acceptingForwardsFrom defines list of users which messages can be used as | ||||
| 	// forwarded | ||||
| 	// In case of this bot, this is @FWorldBot | ||||
| 	acceptingForwardsFrom = []int{} | ||||
| ) | ||||
|  | ||||
| func (r *Router) checkForward(update *telegram.Update) error { | ||||
| 	if update.Message.ForwardFrom != nil { | ||||
| 		log.Debug().Msgf("Processing forward from Telegram ID = %d", update.Message.ForwardFrom.ID) | ||||
| 		for i := range acceptingForwardsFrom { | ||||
| 			if acceptingForwardsFrom[i] == update.Message.ForwardFrom.ID { | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return errors.New("Can't handle forward from Telegram user with ID =" + strconv.Itoa(update.Message.ForwardFrom.ID)) | ||||
| } | ||||
|  | ||||
| func (r *Router) handleInlineQuery(update *telegram.Update) { | ||||
| 	rxpMatched := false | ||||
| 	for rxp, function := range r.inlineQueries { | ||||
| 		if rxp.MatchString(update.InlineQuery.Query) { | ||||
| 			if rxpMatched { | ||||
| 				log.Warn().Msgf("The message handled more than once: %s, %s", update.InlineQuery.Query, strings.Replace(rxp.String(), "\n", "\\n", -1)) | ||||
| 			} else { | ||||
| 				rxpMatched = true | ||||
| 				function(update) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if !rxpMatched { | ||||
| 		log.Debug().Msgf("There is no handler for inline: %s", update.InlineQuery.Query) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Router) handleRequest(update *telegram.Update, commands map[string]func(*telegram.Update), rxps map[*regexp.Regexp]func(*telegram.Update)) { | ||||
| 	switch { | ||||
| 	case update.Message.IsCommand(): | ||||
| 		if commands[update.Message.Command()] != nil { | ||||
| 			commands[update.Message.Command()](update) | ||||
| 		} else { | ||||
| 			log.Warn().Msgf("There is no handler for command /%s", update.Message.Command()) | ||||
| 		} | ||||
| 	default: | ||||
| 		rxpMatched := false | ||||
| 		for rxp, function := range rxps { | ||||
| 			if rxp.MatchString(update.Message.Text) { | ||||
| 				if rxpMatched { | ||||
| 					log.Warn().Msgf("The message handled more than once: %s, %s", update.Message.Text, strings.Replace(rxp.String(), "\n", "\\n", -1)) | ||||
| 				} else { | ||||
| 					rxpMatched = true | ||||
| 					function(update) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		if !rxpMatched { | ||||
| 			log.Debug().Msgf("There is no handler for message: %s", update.Message.Text) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Router) handleGroupRequest(update *telegram.Update) { | ||||
| 	r.handleRequest(update, r.groupCommands, r.groupRegulars) | ||||
| } | ||||
|  | ||||
| func (r *Router) handlePrivateRequest(update *telegram.Update) { | ||||
| 	r.handleRequest(update, r.privateCommands, r.privateRegulars) | ||||
| } | ||||
|  | ||||
| // RegisterPrivateCommand adds function to private commands list | ||||
| func (r *Router) RegisterPrivateCommand(command string, handleFunc func(update *telegram.Update)) { | ||||
| 	log.Debug().Msgf("Registering handler for private command /%s", command) | ||||
| 	r.privateCommands[command] = handleFunc | ||||
| } | ||||
|  | ||||
| // RegisterPrivateRegexp adds function to private regexp list | ||||
| func (r *Router) RegisterPrivateRegexp(rxp *regexp.Regexp, handleFunc func(update *telegram.Update)) { | ||||
| 	log.Debug().Msgf("Registering handler for regular expresson: %s", strings.Replace(rxp.String(), "\n", "\\n", -1)) | ||||
| 	r.privateRegulars[rxp] = handleFunc | ||||
| } | ||||
|  | ||||
| // RegisterGroupCommand adds function to group commands list | ||||
| func (r *Router) RegisterGroupCommand(command string, handleFunc func(update *telegram.Update)) { | ||||
| 	log.Debug().Msgf("Registering handler for group command /%s", command) | ||||
| 	r.groupCommands[command] = handleFunc | ||||
| } | ||||
|  | ||||
| // RegisterGroupRegexp adds function to group regexp list | ||||
| func (r *Router) RegisterGroupRegexp(rxp *regexp.Regexp, handleFunc func(update *telegram.Update)) { | ||||
| 	log.Debug().Msgf("Registering handler for regular expresson: %s", strings.Replace(rxp.String(), "\n", "\\n", -1)) | ||||
| 	r.groupRegulars[rxp] = handleFunc | ||||
| } | ||||
|  | ||||
| // RegisterInlineQueryResult adds function to list of inline queries | ||||
| func (r *Router) RegisterInlineQueryResult(rxp *regexp.Regexp, handleFunc func(update *telegram.Update)) { | ||||
| 	log.Debug().Msgf("Registering handler for inline regular expresson: %s", strings.Replace(rxp.String(), "\n", "\\n", -1)) | ||||
| 	r.inlineQueries[rxp] = handleFunc | ||||
| } | ||||
|  | ||||
| // Respond searches for appropriative answer to the request and passes request to found function | ||||
| // If none of the functions can handle this request, it will be warned in log file | ||||
| func (r *Router) Respond(update telegram.Update) { | ||||
| 	switch { | ||||
| 	case update.Message != nil: | ||||
| 		if update.Message.Text != "" { | ||||
| 			if update.Message.ForwardFrom != nil { | ||||
| 				err := r.checkForward(&update) | ||||
| 				if err != nil { | ||||
| 					log.Warn().Err(err) | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			if update.Message.Chat.IsPrivate() { | ||||
| 				r.handlePrivateRequest(&update) | ||||
| 			} else if update.Message.Chat.IsGroup() || update.Message.Chat.IsSuperGroup() { | ||||
| 				r.handleGroupRequest(&update) | ||||
| 			} else { | ||||
| 				log.Debug().Msg("Can't handle update") | ||||
| 			} | ||||
| 		} else { | ||||
| 			log.Debug().Msg("Can't handle empty Message for now") | ||||
| 		} | ||||
| 	case update.InlineQuery != nil: | ||||
| 		if update.InlineQuery.Query != "" { | ||||
| 			r.handleInlineQuery(&update) | ||||
| 		} | ||||
| 	default: | ||||
| 		log.Debug().Msg("Can't handle empty Message for now") | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										31
									
								
								internal/telegram/exported.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								internal/telegram/exported.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,31 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package telegram | ||||
|  | ||||
| import ( | ||||
| 	"github.com/rs/zerolog" | ||||
| 	"gitlab.com/toby3d/telegram" | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/context" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	c   *context.Context | ||||
| 	log zerolog.Logger | ||||
| ) | ||||
|  | ||||
| // Telegram is a struch which handles Telegram instance handling functions | ||||
| type Telegram struct { | ||||
| 	bot *telegram.Bot | ||||
| } | ||||
|  | ||||
| // New initializes package | ||||
| func New(cc *context.Context) { | ||||
| 	c = cc | ||||
| 	log = c.Logger.With().Str("domain", "telegram").Int("version", 1).Logger() | ||||
| 	t := &Telegram{} | ||||
|  | ||||
| 	log.Info().Msg("Starting Telegram instance") | ||||
|  | ||||
| 	t.StartBot() | ||||
| } | ||||
							
								
								
									
										97
									
								
								internal/telegram/telegram.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								internal/telegram/telegram.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package telegram | ||||
|  | ||||
| import ( | ||||
| 	"net" | ||||
|  | ||||
| 	http "github.com/valyala/fasthttp" | ||||
| 	"gitlab.com/toby3d/telegram" | ||||
| 	"golang.org/x/net/proxy" | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/router" | ||||
| ) | ||||
|  | ||||
| func (t *Telegram) proxyDialer(addr string) (net.Conn, error) { | ||||
| 	log.Debug().Msgf("Proxy used: %s", c.Config.Telegram.Proxy.Address) | ||||
| 	proxyAuth := proxy.Auth{} | ||||
| 	if c.Config.Telegram.Proxy.Username != "" { | ||||
| 		proxyAuth.User = c.Config.Telegram.Proxy.Username | ||||
| 		proxyAuth.Password = c.Config.Telegram.Proxy.Password | ||||
| 	} | ||||
| 	var dialProxy proxy.Dialer | ||||
| 	var err error | ||||
| 	dialProxy, err = proxy.SOCKS5("tcp", c.Config.Telegram.Proxy.Address, &proxyAuth, proxy.Direct) | ||||
| 	if err != nil { | ||||
| 		log.Fatal().Err(err).Msg("Failed to dial proxy") | ||||
| 	} | ||||
|  | ||||
| 	return dialProxy.Dial("tcp", addr) | ||||
| } | ||||
|  | ||||
| // Bot returns Telegram instance | ||||
| func (t *Telegram) Bot() *telegram.Bot { | ||||
| 	return t.bot | ||||
| } | ||||
|  | ||||
| // StartBot starts connection with Telegram | ||||
| func (t *Telegram) StartBot() { | ||||
| 	// Any errors here considered fatal, because main purpose of this app is Telegram interactions | ||||
| 	var err error | ||||
| 	var updates telegram.UpdatesChannel | ||||
| 	if c.Config.Telegram.Proxy.Enabled { | ||||
| 		t.bot = new(telegram.Bot) | ||||
| 		client := new(http.Client) | ||||
| 		client.Dial = t.proxyDialer | ||||
| 		t.bot.SetClient(client) | ||||
| 		t.bot.AccessToken = c.Config.Telegram.Token | ||||
| 		t.bot.User, err = t.bot.GetMe() | ||||
| 	} else { | ||||
| 		t.bot, err = telegram.New(c.Config.Telegram.Token) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		log.Fatal().Err(err) | ||||
| 	} | ||||
|  | ||||
| 	if c.Config.Telegram.Webhook.Enabled { | ||||
| 		var url *http.URI | ||||
| 		url.Parse(nil, []byte(c.Config.Telegram.Webhook.Domain)) | ||||
| 		if len(url.Host()) == 0 { | ||||
| 			log.Fatal().Msg("Can't parse webhook URL: got empty host") | ||||
| 		} | ||||
| 		log.Info().Msg("Trying to set webhook: " + url.String() + t.bot.AccessToken) | ||||
|  | ||||
| 		webhook := telegram.NewWebhook(url.String()+t.bot.AccessToken, nil) | ||||
| 		webhook.MaxConnections = 40 | ||||
|  | ||||
| 		updates = t.bot.NewWebhookChannel(url, webhook, "", "", c.Config.Telegram.Webhook.Listen) | ||||
| 	} else { | ||||
| 		log.Warn().Msg("Using long-polling for updates (not recommended)") | ||||
| 		var info *telegram.WebhookInfo | ||||
| 		info, err = t.bot.GetWebhookInfo() | ||||
| 		if err != nil { | ||||
| 			log.Fatal().Err(err) | ||||
| 		} | ||||
| 		if info != nil && info.URL != "" { | ||||
| 			log.Info().Msg("Deleting old webhook...") | ||||
| 			_, err := t.bot.DeleteWebhook() | ||||
| 			if err != nil { | ||||
| 				log.Fatal().Err(err) | ||||
| 			} | ||||
| 		} | ||||
| 		updatesParams := telegram.GetUpdatesParameters{ | ||||
| 			Offset:  0, | ||||
| 			Limit:   100, | ||||
| 			Timeout: 60, | ||||
| 		} | ||||
| 		updates = t.bot.NewLongPollingChannel(&updatesParams) | ||||
| 	} | ||||
|  | ||||
| 	log.Info().Msg("Connection with Telegram established") | ||||
|  | ||||
| 	for update := range updates { | ||||
| 		log.Debug().Msgf("%+v", update) | ||||
| 		go router.Requests.Respond(update) | ||||
| 	} | ||||
|  | ||||
| } | ||||
							
								
								
									
										45
									
								
								main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								main.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,45 @@ | ||||
| // Fantasy World Zookeeper Bot | ||||
| // Copyright (c) 2018 Vladimir "fat0troll" Hodakov | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/context" | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/router" | ||||
| 	"lab.wtfteam.pro/fat0troll/fw_zookeeper/internal/telegram" | ||||
| 	"os" | ||||
| 	"os/signal" | ||||
| 	"runtime" | ||||
| 	"syscall" | ||||
| ) | ||||
|  | ||||
| func main() { | ||||
| 	// Before any real work - lock to OS thread. We shouldn't leave it until | ||||
| 	// shutdown | ||||
| 	runtime.LockOSThread() | ||||
|  | ||||
| 	// Initializing context | ||||
|  | ||||
| 	c := context.NewContext() | ||||
| 	c.Init() | ||||
| 	c.InitConfiguration() | ||||
|  | ||||
| 	router.New(c) | ||||
| 	telegram.New(c) | ||||
|  | ||||
| 	// CTRL+C handler. | ||||
| 	interrupt := make(chan os.Signal, 1) | ||||
| 	signal.Notify(interrupt) | ||||
| 	shutdownDone := make(chan bool, 1) | ||||
| 	go func() { | ||||
| 		signalThing := <-interrupt | ||||
| 		if signalThing == syscall.SIGTERM || signalThing == syscall.SIGINT { | ||||
| 			c.Logger.Info().Msg("Got " + signalThing.String() + " signal, shutting down...") | ||||
| 			shutdownDone <- true | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	<-shutdownDone | ||||
| 	os.Exit(0) | ||||
|  | ||||
| } | ||||
							
								
								
									
										15
									
								
								vendor/github.com/davecgh/go-spew/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/davecgh/go-spew/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| ISC License | ||||
|  | ||||
| Copyright (c) 2012-2016 Dave Collins <dave@davec.name> | ||||
|  | ||||
| Permission to use, copy, modify, and/or distribute this software for any | ||||
| purpose with or without fee is hereby granted, provided that the above | ||||
| copyright notice and this permission notice appear in all copies. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
| WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
| MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
| ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
| WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
| ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
| OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
							
								
								
									
										145
									
								
								vendor/github.com/davecgh/go-spew/spew/bypass.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										145
									
								
								vendor/github.com/davecgh/go-spew/spew/bypass.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,145 @@ | ||||
| // Copyright (c) 2015-2016 Dave Collins <dave@davec.name> | ||||
| // | ||||
| // Permission to use, copy, modify, and distribute this software for any | ||||
| // purpose with or without fee is hereby granted, provided that the above | ||||
| // copyright notice and this permission notice appear in all copies. | ||||
| // | ||||
| // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
| // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
| // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
| // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
| // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
| // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
| // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  | ||||
| // NOTE: Due to the following build constraints, this file will only be compiled | ||||
| // when the code is not running on Google App Engine, compiled by GopherJS, and | ||||
| // "-tags safe" is not added to the go build command line.  The "disableunsafe" | ||||
| // tag is deprecated and thus should not be used. | ||||
| // Go versions prior to 1.4 are disabled because they use a different layout | ||||
| // for interfaces which make the implementation of unsafeReflectValue more complex. | ||||
| // +build !js,!appengine,!safe,!disableunsafe,go1.4 | ||||
|  | ||||
| package spew | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// UnsafeDisabled is a build-time constant which specifies whether or | ||||
| 	// not access to the unsafe package is available. | ||||
| 	UnsafeDisabled = false | ||||
|  | ||||
| 	// ptrSize is the size of a pointer on the current arch. | ||||
| 	ptrSize = unsafe.Sizeof((*byte)(nil)) | ||||
| ) | ||||
|  | ||||
| type flag uintptr | ||||
|  | ||||
| var ( | ||||
| 	// flagRO indicates whether the value field of a reflect.Value | ||||
| 	// is read-only. | ||||
| 	flagRO flag | ||||
|  | ||||
| 	// flagAddr indicates whether the address of the reflect.Value's | ||||
| 	// value may be taken. | ||||
| 	flagAddr flag | ||||
| ) | ||||
|  | ||||
| // flagKindMask holds the bits that make up the kind | ||||
| // part of the flags field. In all the supported versions, | ||||
| // it is in the lower 5 bits. | ||||
| const flagKindMask = flag(0x1f) | ||||
|  | ||||
| // Different versions of Go have used different | ||||
| // bit layouts for the flags type. This table | ||||
| // records the known combinations. | ||||
| var okFlags = []struct { | ||||
| 	ro, addr flag | ||||
| }{{ | ||||
| 	// From Go 1.4 to 1.5 | ||||
| 	ro:   1 << 5, | ||||
| 	addr: 1 << 7, | ||||
| }, { | ||||
| 	// Up to Go tip. | ||||
| 	ro:   1<<5 | 1<<6, | ||||
| 	addr: 1 << 8, | ||||
| }} | ||||
|  | ||||
| var flagValOffset = func() uintptr { | ||||
| 	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") | ||||
| 	if !ok { | ||||
| 		panic("reflect.Value has no flag field") | ||||
| 	} | ||||
| 	return field.Offset | ||||
| }() | ||||
|  | ||||
| // flagField returns a pointer to the flag field of a reflect.Value. | ||||
| func flagField(v *reflect.Value) *flag { | ||||
| 	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) | ||||
| } | ||||
|  | ||||
| // unsafeReflectValue converts the passed reflect.Value into a one that bypasses | ||||
| // the typical safety restrictions preventing access to unaddressable and | ||||
| // unexported data.  It works by digging the raw pointer to the underlying | ||||
| // value out of the protected value and generating a new unprotected (unsafe) | ||||
| // reflect.Value to it. | ||||
| // | ||||
| // This allows us to check for implementations of the Stringer and error | ||||
| // interfaces to be used for pretty printing ordinarily unaddressable and | ||||
| // inaccessible values such as unexported struct fields. | ||||
| func unsafeReflectValue(v reflect.Value) reflect.Value { | ||||
| 	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { | ||||
| 		return v | ||||
| 	} | ||||
| 	flagFieldPtr := flagField(&v) | ||||
| 	*flagFieldPtr &^= flagRO | ||||
| 	*flagFieldPtr |= flagAddr | ||||
| 	return v | ||||
| } | ||||
|  | ||||
| // Sanity checks against future reflect package changes | ||||
| // to the type or semantics of the Value.flag field. | ||||
| func init() { | ||||
| 	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") | ||||
| 	if !ok { | ||||
| 		panic("reflect.Value has no flag field") | ||||
| 	} | ||||
| 	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { | ||||
| 		panic("reflect.Value flag field has changed kind") | ||||
| 	} | ||||
| 	type t0 int | ||||
| 	var t struct { | ||||
| 		A t0 | ||||
| 		// t0 will have flagEmbedRO set. | ||||
| 		t0 | ||||
| 		// a will have flagStickyRO set | ||||
| 		a t0 | ||||
| 	} | ||||
| 	vA := reflect.ValueOf(t).FieldByName("A") | ||||
| 	va := reflect.ValueOf(t).FieldByName("a") | ||||
| 	vt0 := reflect.ValueOf(t).FieldByName("t0") | ||||
|  | ||||
| 	// Infer flagRO from the difference between the flags | ||||
| 	// for the (otherwise identical) fields in t. | ||||
| 	flagPublic := *flagField(&vA) | ||||
| 	flagWithRO := *flagField(&va) | *flagField(&vt0) | ||||
| 	flagRO = flagPublic ^ flagWithRO | ||||
|  | ||||
| 	// Infer flagAddr from the difference between a value | ||||
| 	// taken from a pointer and not. | ||||
| 	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") | ||||
| 	flagNoPtr := *flagField(&vA) | ||||
| 	flagPtr := *flagField(&vPtrA) | ||||
| 	flagAddr = flagNoPtr ^ flagPtr | ||||
|  | ||||
| 	// Check that the inferred flags tally with one of the known versions. | ||||
| 	for _, f := range okFlags { | ||||
| 		if flagRO == f.ro && flagAddr == f.addr { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	panic("reflect.Value read-only flag has changed semantics") | ||||
| } | ||||
							
								
								
									
										38
									
								
								vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| // Copyright (c) 2015-2016 Dave Collins <dave@davec.name> | ||||
| // | ||||
| // Permission to use, copy, modify, and distribute this software for any | ||||
| // purpose with or without fee is hereby granted, provided that the above | ||||
| // copyright notice and this permission notice appear in all copies. | ||||
| // | ||||
| // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
| // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
| // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
| // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
| // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
| // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
| // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  | ||||
| // NOTE: Due to the following build constraints, this file will only be compiled | ||||
| // when the code is running on Google App Engine, compiled by GopherJS, or | ||||
| // "-tags safe" is added to the go build command line.  The "disableunsafe" | ||||
| // tag is deprecated and thus should not be used. | ||||
| // +build js appengine safe disableunsafe !go1.4 | ||||
|  | ||||
| package spew | ||||
|  | ||||
| import "reflect" | ||||
|  | ||||
| const ( | ||||
| 	// UnsafeDisabled is a build-time constant which specifies whether or | ||||
| 	// not access to the unsafe package is available. | ||||
| 	UnsafeDisabled = true | ||||
| ) | ||||
|  | ||||
| // unsafeReflectValue typically converts the passed reflect.Value into a one | ||||
| // that bypasses the typical safety restrictions preventing access to | ||||
| // unaddressable and unexported data.  However, doing this relies on access to | ||||
| // the unsafe package.  This is a stub version which simply returns the passed | ||||
| // reflect.Value when the unsafe package is not available. | ||||
| func unsafeReflectValue(v reflect.Value) reflect.Value { | ||||
| 	return v | ||||
| } | ||||
							
								
								
									
										341
									
								
								vendor/github.com/davecgh/go-spew/spew/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										341
									
								
								vendor/github.com/davecgh/go-spew/spew/common.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,341 @@ | ||||
| /* | ||||
|  * Copyright (c) 2013-2016 Dave Collins <dave@davec.name> | ||||
|  * | ||||
|  * Permission to use, copy, modify, and distribute this software for any | ||||
|  * purpose with or without fee is hereby granted, provided that the above | ||||
|  * copyright notice and this permission notice appear in all copies. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
|  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
|  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
|  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
|  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
|  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  */ | ||||
|  | ||||
| package spew | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| // Some constants in the form of bytes to avoid string overhead.  This mirrors | ||||
| // the technique used in the fmt package. | ||||
| var ( | ||||
| 	panicBytes            = []byte("(PANIC=") | ||||
| 	plusBytes             = []byte("+") | ||||
| 	iBytes                = []byte("i") | ||||
| 	trueBytes             = []byte("true") | ||||
| 	falseBytes            = []byte("false") | ||||
| 	interfaceBytes        = []byte("(interface {})") | ||||
| 	commaNewlineBytes     = []byte(",\n") | ||||
| 	newlineBytes          = []byte("\n") | ||||
| 	openBraceBytes        = []byte("{") | ||||
| 	openBraceNewlineBytes = []byte("{\n") | ||||
| 	closeBraceBytes       = []byte("}") | ||||
| 	asteriskBytes         = []byte("*") | ||||
| 	colonBytes            = []byte(":") | ||||
| 	colonSpaceBytes       = []byte(": ") | ||||
| 	openParenBytes        = []byte("(") | ||||
| 	closeParenBytes       = []byte(")") | ||||
| 	spaceBytes            = []byte(" ") | ||||
| 	pointerChainBytes     = []byte("->") | ||||
| 	nilAngleBytes         = []byte("<nil>") | ||||
| 	maxNewlineBytes       = []byte("<max depth reached>\n") | ||||
| 	maxShortBytes         = []byte("<max>") | ||||
| 	circularBytes         = []byte("<already shown>") | ||||
| 	circularShortBytes    = []byte("<shown>") | ||||
| 	invalidAngleBytes     = []byte("<invalid>") | ||||
| 	openBracketBytes      = []byte("[") | ||||
| 	closeBracketBytes     = []byte("]") | ||||
| 	percentBytes          = []byte("%") | ||||
| 	precisionBytes        = []byte(".") | ||||
| 	openAngleBytes        = []byte("<") | ||||
| 	closeAngleBytes       = []byte(">") | ||||
| 	openMapBytes          = []byte("map[") | ||||
| 	closeMapBytes         = []byte("]") | ||||
| 	lenEqualsBytes        = []byte("len=") | ||||
| 	capEqualsBytes        = []byte("cap=") | ||||
| ) | ||||
|  | ||||
| // hexDigits is used to map a decimal value to a hex digit. | ||||
| var hexDigits = "0123456789abcdef" | ||||
|  | ||||
| // catchPanic handles any panics that might occur during the handleMethods | ||||
| // calls. | ||||
| func catchPanic(w io.Writer, v reflect.Value) { | ||||
| 	if err := recover(); err != nil { | ||||
| 		w.Write(panicBytes) | ||||
| 		fmt.Fprintf(w, "%v", err) | ||||
| 		w.Write(closeParenBytes) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // handleMethods attempts to call the Error and String methods on the underlying | ||||
| // type the passed reflect.Value represents and outputes the result to Writer w. | ||||
| // | ||||
| // It handles panics in any called methods by catching and displaying the error | ||||
| // as the formatted value. | ||||
| func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { | ||||
| 	// We need an interface to check if the type implements the error or | ||||
| 	// Stringer interface.  However, the reflect package won't give us an | ||||
| 	// interface on certain things like unexported struct fields in order | ||||
| 	// to enforce visibility rules.  We use unsafe, when it's available, | ||||
| 	// to bypass these restrictions since this package does not mutate the | ||||
| 	// values. | ||||
| 	if !v.CanInterface() { | ||||
| 		if UnsafeDisabled { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		v = unsafeReflectValue(v) | ||||
| 	} | ||||
|  | ||||
| 	// Choose whether or not to do error and Stringer interface lookups against | ||||
| 	// the base type or a pointer to the base type depending on settings. | ||||
| 	// Technically calling one of these methods with a pointer receiver can | ||||
| 	// mutate the value, however, types which choose to satisify an error or | ||||
| 	// Stringer interface with a pointer receiver should not be mutating their | ||||
| 	// state inside these interface methods. | ||||
| 	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { | ||||
| 		v = unsafeReflectValue(v) | ||||
| 	} | ||||
| 	if v.CanAddr() { | ||||
| 		v = v.Addr() | ||||
| 	} | ||||
|  | ||||
| 	// Is it an error or Stringer? | ||||
| 	switch iface := v.Interface().(type) { | ||||
| 	case error: | ||||
| 		defer catchPanic(w, v) | ||||
| 		if cs.ContinueOnMethod { | ||||
| 			w.Write(openParenBytes) | ||||
| 			w.Write([]byte(iface.Error())) | ||||
| 			w.Write(closeParenBytes) | ||||
| 			w.Write(spaceBytes) | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		w.Write([]byte(iface.Error())) | ||||
| 		return true | ||||
|  | ||||
| 	case fmt.Stringer: | ||||
| 		defer catchPanic(w, v) | ||||
| 		if cs.ContinueOnMethod { | ||||
| 			w.Write(openParenBytes) | ||||
| 			w.Write([]byte(iface.String())) | ||||
| 			w.Write(closeParenBytes) | ||||
| 			w.Write(spaceBytes) | ||||
| 			return false | ||||
| 		} | ||||
| 		w.Write([]byte(iface.String())) | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // printBool outputs a boolean value as true or false to Writer w. | ||||
| func printBool(w io.Writer, val bool) { | ||||
| 	if val { | ||||
| 		w.Write(trueBytes) | ||||
| 	} else { | ||||
| 		w.Write(falseBytes) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // printInt outputs a signed integer value to Writer w. | ||||
| func printInt(w io.Writer, val int64, base int) { | ||||
| 	w.Write([]byte(strconv.FormatInt(val, base))) | ||||
| } | ||||
|  | ||||
| // printUint outputs an unsigned integer value to Writer w. | ||||
| func printUint(w io.Writer, val uint64, base int) { | ||||
| 	w.Write([]byte(strconv.FormatUint(val, base))) | ||||
| } | ||||
|  | ||||
| // printFloat outputs a floating point value using the specified precision, | ||||
| // which is expected to be 32 or 64bit, to Writer w. | ||||
| func printFloat(w io.Writer, val float64, precision int) { | ||||
| 	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) | ||||
| } | ||||
|  | ||||
| // printComplex outputs a complex value using the specified float precision | ||||
| // for the real and imaginary parts to Writer w. | ||||
| func printComplex(w io.Writer, c complex128, floatPrecision int) { | ||||
| 	r := real(c) | ||||
| 	w.Write(openParenBytes) | ||||
| 	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) | ||||
| 	i := imag(c) | ||||
| 	if i >= 0 { | ||||
| 		w.Write(plusBytes) | ||||
| 	} | ||||
| 	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) | ||||
| 	w.Write(iBytes) | ||||
| 	w.Write(closeParenBytes) | ||||
| } | ||||
|  | ||||
| // printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' | ||||
| // prefix to Writer w. | ||||
| func printHexPtr(w io.Writer, p uintptr) { | ||||
| 	// Null pointer. | ||||
| 	num := uint64(p) | ||||
| 	if num == 0 { | ||||
| 		w.Write(nilAngleBytes) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix | ||||
| 	buf := make([]byte, 18) | ||||
|  | ||||
| 	// It's simpler to construct the hex string right to left. | ||||
| 	base := uint64(16) | ||||
| 	i := len(buf) - 1 | ||||
| 	for num >= base { | ||||
| 		buf[i] = hexDigits[num%base] | ||||
| 		num /= base | ||||
| 		i-- | ||||
| 	} | ||||
| 	buf[i] = hexDigits[num] | ||||
|  | ||||
| 	// Add '0x' prefix. | ||||
| 	i-- | ||||
| 	buf[i] = 'x' | ||||
| 	i-- | ||||
| 	buf[i] = '0' | ||||
|  | ||||
| 	// Strip unused leading bytes. | ||||
| 	buf = buf[i:] | ||||
| 	w.Write(buf) | ||||
| } | ||||
|  | ||||
| // valuesSorter implements sort.Interface to allow a slice of reflect.Value | ||||
| // elements to be sorted. | ||||
| type valuesSorter struct { | ||||
| 	values  []reflect.Value | ||||
| 	strings []string // either nil or same len and values | ||||
| 	cs      *ConfigState | ||||
| } | ||||
|  | ||||
| // newValuesSorter initializes a valuesSorter instance, which holds a set of | ||||
| // surrogate keys on which the data should be sorted.  It uses flags in | ||||
| // ConfigState to decide if and how to populate those surrogate keys. | ||||
| func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { | ||||
| 	vs := &valuesSorter{values: values, cs: cs} | ||||
| 	if canSortSimply(vs.values[0].Kind()) { | ||||
| 		return vs | ||||
| 	} | ||||
| 	if !cs.DisableMethods { | ||||
| 		vs.strings = make([]string, len(values)) | ||||
| 		for i := range vs.values { | ||||
| 			b := bytes.Buffer{} | ||||
| 			if !handleMethods(cs, &b, vs.values[i]) { | ||||
| 				vs.strings = nil | ||||
| 				break | ||||
| 			} | ||||
| 			vs.strings[i] = b.String() | ||||
| 		} | ||||
| 	} | ||||
| 	if vs.strings == nil && cs.SpewKeys { | ||||
| 		vs.strings = make([]string, len(values)) | ||||
| 		for i := range vs.values { | ||||
| 			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) | ||||
| 		} | ||||
| 	} | ||||
| 	return vs | ||||
| } | ||||
|  | ||||
| // canSortSimply tests whether a reflect.Kind is a primitive that can be sorted | ||||
| // directly, or whether it should be considered for sorting by surrogate keys | ||||
| // (if the ConfigState allows it). | ||||
| func canSortSimply(kind reflect.Kind) bool { | ||||
| 	// This switch parallels valueSortLess, except for the default case. | ||||
| 	switch kind { | ||||
| 	case reflect.Bool: | ||||
| 		return true | ||||
| 	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: | ||||
| 		return true | ||||
| 	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: | ||||
| 		return true | ||||
| 	case reflect.Float32, reflect.Float64: | ||||
| 		return true | ||||
| 	case reflect.String: | ||||
| 		return true | ||||
| 	case reflect.Uintptr: | ||||
| 		return true | ||||
| 	case reflect.Array: | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Len returns the number of values in the slice.  It is part of the | ||||
| // sort.Interface implementation. | ||||
| func (s *valuesSorter) Len() int { | ||||
| 	return len(s.values) | ||||
| } | ||||
|  | ||||
| // Swap swaps the values at the passed indices.  It is part of the | ||||
| // sort.Interface implementation. | ||||
| func (s *valuesSorter) Swap(i, j int) { | ||||
| 	s.values[i], s.values[j] = s.values[j], s.values[i] | ||||
| 	if s.strings != nil { | ||||
| 		s.strings[i], s.strings[j] = s.strings[j], s.strings[i] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // valueSortLess returns whether the first value should sort before the second | ||||
| // value.  It is used by valueSorter.Less as part of the sort.Interface | ||||
| // implementation. | ||||
| func valueSortLess(a, b reflect.Value) bool { | ||||
| 	switch a.Kind() { | ||||
| 	case reflect.Bool: | ||||
| 		return !a.Bool() && b.Bool() | ||||
| 	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: | ||||
| 		return a.Int() < b.Int() | ||||
| 	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: | ||||
| 		return a.Uint() < b.Uint() | ||||
| 	case reflect.Float32, reflect.Float64: | ||||
| 		return a.Float() < b.Float() | ||||
| 	case reflect.String: | ||||
| 		return a.String() < b.String() | ||||
| 	case reflect.Uintptr: | ||||
| 		return a.Uint() < b.Uint() | ||||
| 	case reflect.Array: | ||||
| 		// Compare the contents of both arrays. | ||||
| 		l := a.Len() | ||||
| 		for i := 0; i < l; i++ { | ||||
| 			av := a.Index(i) | ||||
| 			bv := b.Index(i) | ||||
| 			if av.Interface() == bv.Interface() { | ||||
| 				continue | ||||
| 			} | ||||
| 			return valueSortLess(av, bv) | ||||
| 		} | ||||
| 	} | ||||
| 	return a.String() < b.String() | ||||
| } | ||||
|  | ||||
| // Less returns whether the value at index i should sort before the | ||||
| // value at index j.  It is part of the sort.Interface implementation. | ||||
| func (s *valuesSorter) Less(i, j int) bool { | ||||
| 	if s.strings == nil { | ||||
| 		return valueSortLess(s.values[i], s.values[j]) | ||||
| 	} | ||||
| 	return s.strings[i] < s.strings[j] | ||||
| } | ||||
|  | ||||
| // sortValues is a sort function that handles both native types and any type that | ||||
| // can be converted to error or Stringer.  Other inputs are sorted according to | ||||
| // their Value.String() value to ensure display stability. | ||||
| func sortValues(values []reflect.Value, cs *ConfigState) { | ||||
| 	if len(values) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	sort.Sort(newValuesSorter(values, cs)) | ||||
| } | ||||
							
								
								
									
										306
									
								
								vendor/github.com/davecgh/go-spew/spew/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										306
									
								
								vendor/github.com/davecgh/go-spew/spew/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,306 @@ | ||||
| /* | ||||
|  * Copyright (c) 2013-2016 Dave Collins <dave@davec.name> | ||||
|  * | ||||
|  * Permission to use, copy, modify, and distribute this software for any | ||||
|  * purpose with or without fee is hereby granted, provided that the above | ||||
|  * copyright notice and this permission notice appear in all copies. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
|  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
|  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
|  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
|  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
|  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  */ | ||||
|  | ||||
| package spew | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"os" | ||||
| ) | ||||
|  | ||||
| // ConfigState houses the configuration options used by spew to format and | ||||
| // display values.  There is a global instance, Config, that is used to control | ||||
| // all top-level Formatter and Dump functionality.  Each ConfigState instance | ||||
| // provides methods equivalent to the top-level functions. | ||||
| // | ||||
| // The zero value for ConfigState provides no indentation.  You would typically | ||||
| // want to set it to a space or a tab. | ||||
| // | ||||
| // Alternatively, you can use NewDefaultConfig to get a ConfigState instance | ||||
| // with default settings.  See the documentation of NewDefaultConfig for default | ||||
| // values. | ||||
| type ConfigState struct { | ||||
| 	// Indent specifies the string to use for each indentation level.  The | ||||
| 	// global config instance that all top-level functions use set this to a | ||||
| 	// single space by default.  If you would like more indentation, you might | ||||
| 	// set this to a tab with "\t" or perhaps two spaces with "  ". | ||||
| 	Indent string | ||||
|  | ||||
| 	// MaxDepth controls the maximum number of levels to descend into nested | ||||
| 	// data structures.  The default, 0, means there is no limit. | ||||
| 	// | ||||
| 	// NOTE: Circular data structures are properly detected, so it is not | ||||
| 	// necessary to set this value unless you specifically want to limit deeply | ||||
| 	// nested data structures. | ||||
| 	MaxDepth int | ||||
|  | ||||
| 	// DisableMethods specifies whether or not error and Stringer interfaces are | ||||
| 	// invoked for types that implement them. | ||||
| 	DisableMethods bool | ||||
|  | ||||
| 	// DisablePointerMethods specifies whether or not to check for and invoke | ||||
| 	// error and Stringer interfaces on types which only accept a pointer | ||||
| 	// receiver when the current type is not a pointer. | ||||
| 	// | ||||
| 	// NOTE: This might be an unsafe action since calling one of these methods | ||||
| 	// with a pointer receiver could technically mutate the value, however, | ||||
| 	// in practice, types which choose to satisify an error or Stringer | ||||
| 	// interface with a pointer receiver should not be mutating their state | ||||
| 	// inside these interface methods.  As a result, this option relies on | ||||
| 	// access to the unsafe package, so it will not have any effect when | ||||
| 	// running in environments without access to the unsafe package such as | ||||
| 	// Google App Engine or with the "safe" build tag specified. | ||||
| 	DisablePointerMethods bool | ||||
|  | ||||
| 	// DisablePointerAddresses specifies whether to disable the printing of | ||||
| 	// pointer addresses. This is useful when diffing data structures in tests. | ||||
| 	DisablePointerAddresses bool | ||||
|  | ||||
| 	// DisableCapacities specifies whether to disable the printing of capacities | ||||
| 	// for arrays, slices, maps and channels. This is useful when diffing | ||||
| 	// data structures in tests. | ||||
| 	DisableCapacities bool | ||||
|  | ||||
| 	// ContinueOnMethod specifies whether or not recursion should continue once | ||||
| 	// a custom error or Stringer interface is invoked.  The default, false, | ||||
| 	// means it will print the results of invoking the custom error or Stringer | ||||
| 	// interface and return immediately instead of continuing to recurse into | ||||
| 	// the internals of the data type. | ||||
| 	// | ||||
| 	// NOTE: This flag does not have any effect if method invocation is disabled | ||||
| 	// via the DisableMethods or DisablePointerMethods options. | ||||
| 	ContinueOnMethod bool | ||||
|  | ||||
| 	// SortKeys specifies map keys should be sorted before being printed. Use | ||||
| 	// this to have a more deterministic, diffable output.  Note that only | ||||
| 	// native types (bool, int, uint, floats, uintptr and string) and types | ||||
| 	// that support the error or Stringer interfaces (if methods are | ||||
| 	// enabled) are supported, with other types sorted according to the | ||||
| 	// reflect.Value.String() output which guarantees display stability. | ||||
| 	SortKeys bool | ||||
|  | ||||
| 	// SpewKeys specifies that, as a last resort attempt, map keys should | ||||
| 	// be spewed to strings and sorted by those strings.  This is only | ||||
| 	// considered if SortKeys is true. | ||||
| 	SpewKeys bool | ||||
| } | ||||
|  | ||||
| // Config is the active configuration of the top-level functions. | ||||
| // The configuration can be changed by modifying the contents of spew.Config. | ||||
| var Config = ConfigState{Indent: " "} | ||||
|  | ||||
| // Errorf is a wrapper for fmt.Errorf that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the formatted string as a value that satisfies error.  See NewFormatter | ||||
| // for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { | ||||
| 	return fmt.Errorf(format, c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Fprint is a wrapper for fmt.Fprint that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Fprint(w, c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Fprintf(w, format, c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Fprintln(w, c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Print is a wrapper for fmt.Print that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Print(a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Print(c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Printf is a wrapper for fmt.Printf that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Printf(format, c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Println is a wrapper for fmt.Println that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Println(a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Println(c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Sprint is a wrapper for fmt.Sprint that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the resulting string.  See NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Sprint(a ...interface{}) string { | ||||
| 	return fmt.Sprint(c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were | ||||
| // passed with a Formatter interface returned by c.NewFormatter.  It returns | ||||
| // the resulting string.  See NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Sprintf(format string, a ...interface{}) string { | ||||
| 	return fmt.Sprintf(format, c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it | ||||
| // were passed with a Formatter interface returned by c.NewFormatter.  It | ||||
| // returns the resulting string.  See NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) | ||||
| func (c *ConfigState) Sprintln(a ...interface{}) string { | ||||
| 	return fmt.Sprintln(c.convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| /* | ||||
| NewFormatter returns a custom formatter that satisfies the fmt.Formatter | ||||
| interface.  As a result, it integrates cleanly with standard fmt package | ||||
| printing functions.  The formatter is useful for inline printing of smaller data | ||||
| types similar to the standard %v format specifier. | ||||
|  | ||||
| The custom formatter only responds to the %v (most compact), %+v (adds pointer | ||||
| addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb | ||||
| combinations.  Any other verbs such as %x and %q will be sent to the the | ||||
| standard fmt package for formatting.  In addition, the custom formatter ignores | ||||
| the width and precision arguments (however they will still work on the format | ||||
| specifiers not handled by the custom formatter). | ||||
|  | ||||
| Typically this function shouldn't be called directly.  It is much easier to make | ||||
| use of the custom formatter by calling one of the convenience functions such as | ||||
| c.Printf, c.Println, or c.Printf. | ||||
| */ | ||||
| func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { | ||||
| 	return newFormatter(c, v) | ||||
| } | ||||
|  | ||||
| // Fdump formats and displays the passed arguments to io.Writer w.  It formats | ||||
| // exactly the same as Dump. | ||||
| func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { | ||||
| 	fdump(c, w, a...) | ||||
| } | ||||
|  | ||||
| /* | ||||
| Dump displays the passed parameters to standard out with newlines, customizable | ||||
| indentation, and additional debug information such as complete types and all | ||||
| pointer addresses used to indirect to the final value.  It provides the | ||||
| following features over the built-in printing facilities provided by the fmt | ||||
| package: | ||||
|  | ||||
| 	* Pointers are dereferenced and followed | ||||
| 	* Circular data structures are detected and handled properly | ||||
| 	* Custom Stringer/error interfaces are optionally invoked, including | ||||
| 	  on unexported types | ||||
| 	* Custom types which only implement the Stringer/error interfaces via | ||||
| 	  a pointer receiver are optionally invoked when passing non-pointer | ||||
| 	  variables | ||||
| 	* Byte arrays and slices are dumped like the hexdump -C command which | ||||
| 	  includes offsets, byte values in hex, and ASCII output | ||||
|  | ||||
| The configuration options are controlled by modifying the public members | ||||
| of c.  See ConfigState for options documentation. | ||||
|  | ||||
| See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to | ||||
| get the formatted result as a string. | ||||
| */ | ||||
| func (c *ConfigState) Dump(a ...interface{}) { | ||||
| 	fdump(c, os.Stdout, a...) | ||||
| } | ||||
|  | ||||
| // Sdump returns a string with the passed arguments formatted exactly the same | ||||
| // as Dump. | ||||
| func (c *ConfigState) Sdump(a ...interface{}) string { | ||||
| 	var buf bytes.Buffer | ||||
| 	fdump(c, &buf, a...) | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // convertArgs accepts a slice of arguments and returns a slice of the same | ||||
| // length with each argument converted to a spew Formatter interface using | ||||
| // the ConfigState associated with s. | ||||
| func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { | ||||
| 	formatters = make([]interface{}, len(args)) | ||||
| 	for index, arg := range args { | ||||
| 		formatters[index] = newFormatter(c, arg) | ||||
| 	} | ||||
| 	return formatters | ||||
| } | ||||
|  | ||||
| // NewDefaultConfig returns a ConfigState with the following default settings. | ||||
| // | ||||
| // 	Indent: " " | ||||
| // 	MaxDepth: 0 | ||||
| // 	DisableMethods: false | ||||
| // 	DisablePointerMethods: false | ||||
| // 	ContinueOnMethod: false | ||||
| // 	SortKeys: false | ||||
| func NewDefaultConfig() *ConfigState { | ||||
| 	return &ConfigState{Indent: " "} | ||||
| } | ||||
							
								
								
									
										211
									
								
								vendor/github.com/davecgh/go-spew/spew/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										211
									
								
								vendor/github.com/davecgh/go-spew/spew/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,211 @@ | ||||
| /* | ||||
|  * Copyright (c) 2013-2016 Dave Collins <dave@davec.name> | ||||
|  * | ||||
|  * Permission to use, copy, modify, and distribute this software for any | ||||
|  * purpose with or without fee is hereby granted, provided that the above | ||||
|  * copyright notice and this permission notice appear in all copies. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
|  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
|  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
|  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
|  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
|  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  */ | ||||
|  | ||||
| /* | ||||
| Package spew implements a deep pretty printer for Go data structures to aid in | ||||
| debugging. | ||||
|  | ||||
| A quick overview of the additional features spew provides over the built-in | ||||
| printing facilities for Go data types are as follows: | ||||
|  | ||||
| 	* Pointers are dereferenced and followed | ||||
| 	* Circular data structures are detected and handled properly | ||||
| 	* Custom Stringer/error interfaces are optionally invoked, including | ||||
| 	  on unexported types | ||||
| 	* Custom types which only implement the Stringer/error interfaces via | ||||
| 	  a pointer receiver are optionally invoked when passing non-pointer | ||||
| 	  variables | ||||
| 	* Byte arrays and slices are dumped like the hexdump -C command which | ||||
| 	  includes offsets, byte values in hex, and ASCII output (only when using | ||||
| 	  Dump style) | ||||
|  | ||||
| There are two different approaches spew allows for dumping Go data structures: | ||||
|  | ||||
| 	* Dump style which prints with newlines, customizable indentation, | ||||
| 	  and additional debug information such as types and all pointer addresses | ||||
| 	  used to indirect to the final value | ||||
| 	* A custom Formatter interface that integrates cleanly with the standard fmt | ||||
| 	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing | ||||
| 	  similar to the default %v while providing the additional functionality | ||||
| 	  outlined above and passing unsupported format verbs such as %x and %q | ||||
| 	  along to fmt | ||||
|  | ||||
| Quick Start | ||||
|  | ||||
| This section demonstrates how to quickly get started with spew.  See the | ||||
| sections below for further details on formatting and configuration options. | ||||
|  | ||||
| To dump a variable with full newlines, indentation, type, and pointer | ||||
| information use Dump, Fdump, or Sdump: | ||||
| 	spew.Dump(myVar1, myVar2, ...) | ||||
| 	spew.Fdump(someWriter, myVar1, myVar2, ...) | ||||
| 	str := spew.Sdump(myVar1, myVar2, ...) | ||||
|  | ||||
| Alternatively, if you would prefer to use format strings with a compacted inline | ||||
| printing style, use the convenience wrappers Printf, Fprintf, etc with | ||||
| %v (most compact), %+v (adds pointer addresses), %#v (adds types), or | ||||
| %#+v (adds types and pointer addresses): | ||||
| 	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) | ||||
| 	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) | ||||
| 	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) | ||||
| 	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) | ||||
|  | ||||
| Configuration Options | ||||
|  | ||||
| Configuration of spew is handled by fields in the ConfigState type.  For | ||||
| convenience, all of the top-level functions use a global state available | ||||
| via the spew.Config global. | ||||
|  | ||||
| It is also possible to create a ConfigState instance that provides methods | ||||
| equivalent to the top-level functions.  This allows concurrent configuration | ||||
| options.  See the ConfigState documentation for more details. | ||||
|  | ||||
| The following configuration options are available: | ||||
| 	* Indent | ||||
| 		String to use for each indentation level for Dump functions. | ||||
| 		It is a single space by default.  A popular alternative is "\t". | ||||
|  | ||||
| 	* MaxDepth | ||||
| 		Maximum number of levels to descend into nested data structures. | ||||
| 		There is no limit by default. | ||||
|  | ||||
| 	* DisableMethods | ||||
| 		Disables invocation of error and Stringer interface methods. | ||||
| 		Method invocation is enabled by default. | ||||
|  | ||||
| 	* DisablePointerMethods | ||||
| 		Disables invocation of error and Stringer interface methods on types | ||||
| 		which only accept pointer receivers from non-pointer variables. | ||||
| 		Pointer method invocation is enabled by default. | ||||
|  | ||||
| 	* DisablePointerAddresses | ||||
| 		DisablePointerAddresses specifies whether to disable the printing of | ||||
| 		pointer addresses. This is useful when diffing data structures in tests. | ||||
|  | ||||
| 	* DisableCapacities | ||||
| 		DisableCapacities specifies whether to disable the printing of | ||||
| 		capacities for arrays, slices, maps and channels. This is useful when | ||||
| 		diffing data structures in tests. | ||||
|  | ||||
| 	* ContinueOnMethod | ||||
| 		Enables recursion into types after invoking error and Stringer interface | ||||
| 		methods. Recursion after method invocation is disabled by default. | ||||
|  | ||||
| 	* SortKeys | ||||
| 		Specifies map keys should be sorted before being printed. Use | ||||
| 		this to have a more deterministic, diffable output.  Note that | ||||
| 		only native types (bool, int, uint, floats, uintptr and string) | ||||
| 		and types which implement error or Stringer interfaces are | ||||
| 		supported with other types sorted according to the | ||||
| 		reflect.Value.String() output which guarantees display | ||||
| 		stability.  Natural map order is used by default. | ||||
|  | ||||
| 	* SpewKeys | ||||
| 		Specifies that, as a last resort attempt, map keys should be | ||||
| 		spewed to strings and sorted by those strings.  This is only | ||||
| 		considered if SortKeys is true. | ||||
|  | ||||
| Dump Usage | ||||
|  | ||||
| Simply call spew.Dump with a list of variables you want to dump: | ||||
|  | ||||
| 	spew.Dump(myVar1, myVar2, ...) | ||||
|  | ||||
| You may also call spew.Fdump if you would prefer to output to an arbitrary | ||||
| io.Writer.  For example, to dump to standard error: | ||||
|  | ||||
| 	spew.Fdump(os.Stderr, myVar1, myVar2, ...) | ||||
|  | ||||
| A third option is to call spew.Sdump to get the formatted output as a string: | ||||
|  | ||||
| 	str := spew.Sdump(myVar1, myVar2, ...) | ||||
|  | ||||
| Sample Dump Output | ||||
|  | ||||
| See the Dump example for details on the setup of the types and variables being | ||||
| shown here. | ||||
|  | ||||
| 	(main.Foo) { | ||||
| 	 unexportedField: (*main.Bar)(0xf84002e210)({ | ||||
| 	  flag: (main.Flag) flagTwo, | ||||
| 	  data: (uintptr) <nil> | ||||
| 	 }), | ||||
| 	 ExportedField: (map[interface {}]interface {}) (len=1) { | ||||
| 	  (string) (len=3) "one": (bool) true | ||||
| 	 } | ||||
| 	} | ||||
|  | ||||
| Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C | ||||
| command as shown. | ||||
| 	([]uint8) (len=32 cap=32) { | ||||
| 	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... | | ||||
| 	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0| | ||||
| 	 00000020  31 32                                             |12| | ||||
| 	} | ||||
|  | ||||
| Custom Formatter | ||||
|  | ||||
| Spew provides a custom formatter that implements the fmt.Formatter interface | ||||
| so that it integrates cleanly with standard fmt package printing functions. The | ||||
| formatter is useful for inline printing of smaller data types similar to the | ||||
| standard %v format specifier. | ||||
|  | ||||
| The custom formatter only responds to the %v (most compact), %+v (adds pointer | ||||
| addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb | ||||
| combinations.  Any other verbs such as %x and %q will be sent to the the | ||||
| standard fmt package for formatting.  In addition, the custom formatter ignores | ||||
| the width and precision arguments (however they will still work on the format | ||||
| specifiers not handled by the custom formatter). | ||||
|  | ||||
| Custom Formatter Usage | ||||
|  | ||||
| The simplest way to make use of the spew custom formatter is to call one of the | ||||
| convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The | ||||
| functions have syntax you are most likely already familiar with: | ||||
|  | ||||
| 	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) | ||||
| 	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) | ||||
| 	spew.Println(myVar, myVar2) | ||||
| 	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) | ||||
| 	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) | ||||
|  | ||||
| See the Index for the full list convenience functions. | ||||
|  | ||||
| Sample Formatter Output | ||||
|  | ||||
| Double pointer to a uint8: | ||||
| 	  %v: <**>5 | ||||
| 	 %+v: <**>(0xf8400420d0->0xf8400420c8)5 | ||||
| 	 %#v: (**uint8)5 | ||||
| 	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 | ||||
|  | ||||
| Pointer to circular struct with a uint8 field and a pointer to itself: | ||||
| 	  %v: <*>{1 <*><shown>} | ||||
| 	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>} | ||||
| 	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>} | ||||
| 	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>} | ||||
|  | ||||
| See the Printf example for details on the setup of variables being shown | ||||
| here. | ||||
|  | ||||
| Errors | ||||
|  | ||||
| Since it is possible for custom Stringer/error interfaces to panic, spew | ||||
| detects them and handles them internally by printing the panic information | ||||
| inline with the output.  Since spew is intended to provide deep pretty printing | ||||
| capabilities on structures, it intentionally does not return any errors. | ||||
| */ | ||||
| package spew | ||||
							
								
								
									
										509
									
								
								vendor/github.com/davecgh/go-spew/spew/dump.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										509
									
								
								vendor/github.com/davecgh/go-spew/spew/dump.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,509 @@ | ||||
| /* | ||||
|  * Copyright (c) 2013-2016 Dave Collins <dave@davec.name> | ||||
|  * | ||||
|  * Permission to use, copy, modify, and distribute this software for any | ||||
|  * purpose with or without fee is hereby granted, provided that the above | ||||
|  * copyright notice and this permission notice appear in all copies. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
|  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
|  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
|  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
|  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
|  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  */ | ||||
|  | ||||
| package spew | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/hex" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"reflect" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// uint8Type is a reflect.Type representing a uint8.  It is used to | ||||
| 	// convert cgo types to uint8 slices for hexdumping. | ||||
| 	uint8Type = reflect.TypeOf(uint8(0)) | ||||
|  | ||||
| 	// cCharRE is a regular expression that matches a cgo char. | ||||
| 	// It is used to detect character arrays to hexdump them. | ||||
| 	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) | ||||
|  | ||||
| 	// cUnsignedCharRE is a regular expression that matches a cgo unsigned | ||||
| 	// char.  It is used to detect unsigned character arrays to hexdump | ||||
| 	// them. | ||||
| 	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) | ||||
|  | ||||
| 	// cUint8tCharRE is a regular expression that matches a cgo uint8_t. | ||||
| 	// It is used to detect uint8_t arrays to hexdump them. | ||||
| 	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) | ||||
| ) | ||||
|  | ||||
| // dumpState contains information about the state of a dump operation. | ||||
| type dumpState struct { | ||||
| 	w                io.Writer | ||||
| 	depth            int | ||||
| 	pointers         map[uintptr]int | ||||
| 	ignoreNextType   bool | ||||
| 	ignoreNextIndent bool | ||||
| 	cs               *ConfigState | ||||
| } | ||||
|  | ||||
| // indent performs indentation according to the depth level and cs.Indent | ||||
| // option. | ||||
| func (d *dumpState) indent() { | ||||
| 	if d.ignoreNextIndent { | ||||
| 		d.ignoreNextIndent = false | ||||
| 		return | ||||
| 	} | ||||
| 	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) | ||||
| } | ||||
|  | ||||
| // unpackValue returns values inside of non-nil interfaces when possible. | ||||
| // This is useful for data types like structs, arrays, slices, and maps which | ||||
| // can contain varying types packed inside an interface. | ||||
| func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { | ||||
| 	if v.Kind() == reflect.Interface && !v.IsNil() { | ||||
| 		v = v.Elem() | ||||
| 	} | ||||
| 	return v | ||||
| } | ||||
|  | ||||
| // dumpPtr handles formatting of pointers by indirecting them as necessary. | ||||
| func (d *dumpState) dumpPtr(v reflect.Value) { | ||||
| 	// Remove pointers at or below the current depth from map used to detect | ||||
| 	// circular refs. | ||||
| 	for k, depth := range d.pointers { | ||||
| 		if depth >= d.depth { | ||||
| 			delete(d.pointers, k) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Keep list of all dereferenced pointers to show later. | ||||
| 	pointerChain := make([]uintptr, 0) | ||||
|  | ||||
| 	// Figure out how many levels of indirection there are by dereferencing | ||||
| 	// pointers and unpacking interfaces down the chain while detecting circular | ||||
| 	// references. | ||||
| 	nilFound := false | ||||
| 	cycleFound := false | ||||
| 	indirects := 0 | ||||
| 	ve := v | ||||
| 	for ve.Kind() == reflect.Ptr { | ||||
| 		if ve.IsNil() { | ||||
| 			nilFound = true | ||||
| 			break | ||||
| 		} | ||||
| 		indirects++ | ||||
| 		addr := ve.Pointer() | ||||
| 		pointerChain = append(pointerChain, addr) | ||||
| 		if pd, ok := d.pointers[addr]; ok && pd < d.depth { | ||||
| 			cycleFound = true | ||||
| 			indirects-- | ||||
| 			break | ||||
| 		} | ||||
| 		d.pointers[addr] = d.depth | ||||
|  | ||||
| 		ve = ve.Elem() | ||||
| 		if ve.Kind() == reflect.Interface { | ||||
| 			if ve.IsNil() { | ||||
| 				nilFound = true | ||||
| 				break | ||||
| 			} | ||||
| 			ve = ve.Elem() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Display type information. | ||||
| 	d.w.Write(openParenBytes) | ||||
| 	d.w.Write(bytes.Repeat(asteriskBytes, indirects)) | ||||
| 	d.w.Write([]byte(ve.Type().String())) | ||||
| 	d.w.Write(closeParenBytes) | ||||
|  | ||||
| 	// Display pointer information. | ||||
| 	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { | ||||
| 		d.w.Write(openParenBytes) | ||||
| 		for i, addr := range pointerChain { | ||||
| 			if i > 0 { | ||||
| 				d.w.Write(pointerChainBytes) | ||||
| 			} | ||||
| 			printHexPtr(d.w, addr) | ||||
| 		} | ||||
| 		d.w.Write(closeParenBytes) | ||||
| 	} | ||||
|  | ||||
| 	// Display dereferenced value. | ||||
| 	d.w.Write(openParenBytes) | ||||
| 	switch { | ||||
| 	case nilFound: | ||||
| 		d.w.Write(nilAngleBytes) | ||||
|  | ||||
| 	case cycleFound: | ||||
| 		d.w.Write(circularBytes) | ||||
|  | ||||
| 	default: | ||||
| 		d.ignoreNextType = true | ||||
| 		d.dump(ve) | ||||
| 	} | ||||
| 	d.w.Write(closeParenBytes) | ||||
| } | ||||
|  | ||||
| // dumpSlice handles formatting of arrays and slices.  Byte (uint8 under | ||||
| // reflection) arrays and slices are dumped in hexdump -C fashion. | ||||
| func (d *dumpState) dumpSlice(v reflect.Value) { | ||||
| 	// Determine whether this type should be hex dumped or not.  Also, | ||||
| 	// for types which should be hexdumped, try to use the underlying data | ||||
| 	// first, then fall back to trying to convert them to a uint8 slice. | ||||
| 	var buf []uint8 | ||||
| 	doConvert := false | ||||
| 	doHexDump := false | ||||
| 	numEntries := v.Len() | ||||
| 	if numEntries > 0 { | ||||
| 		vt := v.Index(0).Type() | ||||
| 		vts := vt.String() | ||||
| 		switch { | ||||
| 		// C types that need to be converted. | ||||
| 		case cCharRE.MatchString(vts): | ||||
| 			fallthrough | ||||
| 		case cUnsignedCharRE.MatchString(vts): | ||||
| 			fallthrough | ||||
| 		case cUint8tCharRE.MatchString(vts): | ||||
| 			doConvert = true | ||||
|  | ||||
| 		// Try to use existing uint8 slices and fall back to converting | ||||
| 		// and copying if that fails. | ||||
| 		case vt.Kind() == reflect.Uint8: | ||||
| 			// We need an addressable interface to convert the type | ||||
| 			// to a byte slice.  However, the reflect package won't | ||||
| 			// give us an interface on certain things like | ||||
| 			// unexported struct fields in order to enforce | ||||
| 			// visibility rules.  We use unsafe, when available, to | ||||
| 			// bypass these restrictions since this package does not | ||||
| 			// mutate the values. | ||||
| 			vs := v | ||||
| 			if !vs.CanInterface() || !vs.CanAddr() { | ||||
| 				vs = unsafeReflectValue(vs) | ||||
| 			} | ||||
| 			if !UnsafeDisabled { | ||||
| 				vs = vs.Slice(0, numEntries) | ||||
|  | ||||
| 				// Use the existing uint8 slice if it can be | ||||
| 				// type asserted. | ||||
| 				iface := vs.Interface() | ||||
| 				if slice, ok := iface.([]uint8); ok { | ||||
| 					buf = slice | ||||
| 					doHexDump = true | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			// The underlying data needs to be converted if it can't | ||||
| 			// be type asserted to a uint8 slice. | ||||
| 			doConvert = true | ||||
| 		} | ||||
|  | ||||
| 		// Copy and convert the underlying type if needed. | ||||
| 		if doConvert && vt.ConvertibleTo(uint8Type) { | ||||
| 			// Convert and copy each element into a uint8 byte | ||||
| 			// slice. | ||||
| 			buf = make([]uint8, numEntries) | ||||
| 			for i := 0; i < numEntries; i++ { | ||||
| 				vv := v.Index(i) | ||||
| 				buf[i] = uint8(vv.Convert(uint8Type).Uint()) | ||||
| 			} | ||||
| 			doHexDump = true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Hexdump the entire slice as needed. | ||||
| 	if doHexDump { | ||||
| 		indent := strings.Repeat(d.cs.Indent, d.depth) | ||||
| 		str := indent + hex.Dump(buf) | ||||
| 		str = strings.Replace(str, "\n", "\n"+indent, -1) | ||||
| 		str = strings.TrimRight(str, d.cs.Indent) | ||||
| 		d.w.Write([]byte(str)) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Recursively call dump for each item. | ||||
| 	for i := 0; i < numEntries; i++ { | ||||
| 		d.dump(d.unpackValue(v.Index(i))) | ||||
| 		if i < (numEntries - 1) { | ||||
| 			d.w.Write(commaNewlineBytes) | ||||
| 		} else { | ||||
| 			d.w.Write(newlineBytes) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // dump is the main workhorse for dumping a value.  It uses the passed reflect | ||||
| // value to figure out what kind of object we are dealing with and formats it | ||||
| // appropriately.  It is a recursive function, however circular data structures | ||||
| // are detected and handled properly. | ||||
| func (d *dumpState) dump(v reflect.Value) { | ||||
| 	// Handle invalid reflect values immediately. | ||||
| 	kind := v.Kind() | ||||
| 	if kind == reflect.Invalid { | ||||
| 		d.w.Write(invalidAngleBytes) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Handle pointers specially. | ||||
| 	if kind == reflect.Ptr { | ||||
| 		d.indent() | ||||
| 		d.dumpPtr(v) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Print type information unless already handled elsewhere. | ||||
| 	if !d.ignoreNextType { | ||||
| 		d.indent() | ||||
| 		d.w.Write(openParenBytes) | ||||
| 		d.w.Write([]byte(v.Type().String())) | ||||
| 		d.w.Write(closeParenBytes) | ||||
| 		d.w.Write(spaceBytes) | ||||
| 	} | ||||
| 	d.ignoreNextType = false | ||||
|  | ||||
| 	// Display length and capacity if the built-in len and cap functions | ||||
| 	// work with the value's kind and the len/cap itself is non-zero. | ||||
| 	valueLen, valueCap := 0, 0 | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.Array, reflect.Slice, reflect.Chan: | ||||
| 		valueLen, valueCap = v.Len(), v.Cap() | ||||
| 	case reflect.Map, reflect.String: | ||||
| 		valueLen = v.Len() | ||||
| 	} | ||||
| 	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { | ||||
| 		d.w.Write(openParenBytes) | ||||
| 		if valueLen != 0 { | ||||
| 			d.w.Write(lenEqualsBytes) | ||||
| 			printInt(d.w, int64(valueLen), 10) | ||||
| 		} | ||||
| 		if !d.cs.DisableCapacities && valueCap != 0 { | ||||
| 			if valueLen != 0 { | ||||
| 				d.w.Write(spaceBytes) | ||||
| 			} | ||||
| 			d.w.Write(capEqualsBytes) | ||||
| 			printInt(d.w, int64(valueCap), 10) | ||||
| 		} | ||||
| 		d.w.Write(closeParenBytes) | ||||
| 		d.w.Write(spaceBytes) | ||||
| 	} | ||||
|  | ||||
| 	// Call Stringer/error interfaces if they exist and the handle methods flag | ||||
| 	// is enabled | ||||
| 	if !d.cs.DisableMethods { | ||||
| 		if (kind != reflect.Invalid) && (kind != reflect.Interface) { | ||||
| 			if handled := handleMethods(d.cs, d.w, v); handled { | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	switch kind { | ||||
| 	case reflect.Invalid: | ||||
| 		// Do nothing.  We should never get here since invalid has already | ||||
| 		// been handled above. | ||||
|  | ||||
| 	case reflect.Bool: | ||||
| 		printBool(d.w, v.Bool()) | ||||
|  | ||||
| 	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: | ||||
| 		printInt(d.w, v.Int(), 10) | ||||
|  | ||||
| 	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: | ||||
| 		printUint(d.w, v.Uint(), 10) | ||||
|  | ||||
| 	case reflect.Float32: | ||||
| 		printFloat(d.w, v.Float(), 32) | ||||
|  | ||||
| 	case reflect.Float64: | ||||
| 		printFloat(d.w, v.Float(), 64) | ||||
|  | ||||
| 	case reflect.Complex64: | ||||
| 		printComplex(d.w, v.Complex(), 32) | ||||
|  | ||||
| 	case reflect.Complex128: | ||||
| 		printComplex(d.w, v.Complex(), 64) | ||||
|  | ||||
| 	case reflect.Slice: | ||||
| 		if v.IsNil() { | ||||
| 			d.w.Write(nilAngleBytes) | ||||
| 			break | ||||
| 		} | ||||
| 		fallthrough | ||||
|  | ||||
| 	case reflect.Array: | ||||
| 		d.w.Write(openBraceNewlineBytes) | ||||
| 		d.depth++ | ||||
| 		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { | ||||
| 			d.indent() | ||||
| 			d.w.Write(maxNewlineBytes) | ||||
| 		} else { | ||||
| 			d.dumpSlice(v) | ||||
| 		} | ||||
| 		d.depth-- | ||||
| 		d.indent() | ||||
| 		d.w.Write(closeBraceBytes) | ||||
|  | ||||
| 	case reflect.String: | ||||
| 		d.w.Write([]byte(strconv.Quote(v.String()))) | ||||
|  | ||||
| 	case reflect.Interface: | ||||
| 		// The only time we should get here is for nil interfaces due to | ||||
| 		// unpackValue calls. | ||||
| 		if v.IsNil() { | ||||
| 			d.w.Write(nilAngleBytes) | ||||
| 		} | ||||
|  | ||||
| 	case reflect.Ptr: | ||||
| 		// Do nothing.  We should never get here since pointers have already | ||||
| 		// been handled above. | ||||
|  | ||||
| 	case reflect.Map: | ||||
| 		// nil maps should be indicated as different than empty maps | ||||
| 		if v.IsNil() { | ||||
| 			d.w.Write(nilAngleBytes) | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		d.w.Write(openBraceNewlineBytes) | ||||
| 		d.depth++ | ||||
| 		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { | ||||
| 			d.indent() | ||||
| 			d.w.Write(maxNewlineBytes) | ||||
| 		} else { | ||||
| 			numEntries := v.Len() | ||||
| 			keys := v.MapKeys() | ||||
| 			if d.cs.SortKeys { | ||||
| 				sortValues(keys, d.cs) | ||||
| 			} | ||||
| 			for i, key := range keys { | ||||
| 				d.dump(d.unpackValue(key)) | ||||
| 				d.w.Write(colonSpaceBytes) | ||||
| 				d.ignoreNextIndent = true | ||||
| 				d.dump(d.unpackValue(v.MapIndex(key))) | ||||
| 				if i < (numEntries - 1) { | ||||
| 					d.w.Write(commaNewlineBytes) | ||||
| 				} else { | ||||
| 					d.w.Write(newlineBytes) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		d.depth-- | ||||
| 		d.indent() | ||||
| 		d.w.Write(closeBraceBytes) | ||||
|  | ||||
| 	case reflect.Struct: | ||||
| 		d.w.Write(openBraceNewlineBytes) | ||||
| 		d.depth++ | ||||
| 		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { | ||||
| 			d.indent() | ||||
| 			d.w.Write(maxNewlineBytes) | ||||
| 		} else { | ||||
| 			vt := v.Type() | ||||
| 			numFields := v.NumField() | ||||
| 			for i := 0; i < numFields; i++ { | ||||
| 				d.indent() | ||||
| 				vtf := vt.Field(i) | ||||
| 				d.w.Write([]byte(vtf.Name)) | ||||
| 				d.w.Write(colonSpaceBytes) | ||||
| 				d.ignoreNextIndent = true | ||||
| 				d.dump(d.unpackValue(v.Field(i))) | ||||
| 				if i < (numFields - 1) { | ||||
| 					d.w.Write(commaNewlineBytes) | ||||
| 				} else { | ||||
| 					d.w.Write(newlineBytes) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		d.depth-- | ||||
| 		d.indent() | ||||
| 		d.w.Write(closeBraceBytes) | ||||
|  | ||||
| 	case reflect.Uintptr: | ||||
| 		printHexPtr(d.w, uintptr(v.Uint())) | ||||
|  | ||||
| 	case reflect.UnsafePointer, reflect.Chan, reflect.Func: | ||||
| 		printHexPtr(d.w, v.Pointer()) | ||||
|  | ||||
| 	// There were not any other types at the time this code was written, but | ||||
| 	// fall back to letting the default fmt package handle it in case any new | ||||
| 	// types are added. | ||||
| 	default: | ||||
| 		if v.CanInterface() { | ||||
| 			fmt.Fprintf(d.w, "%v", v.Interface()) | ||||
| 		} else { | ||||
| 			fmt.Fprintf(d.w, "%v", v.String()) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // fdump is a helper function to consolidate the logic from the various public | ||||
| // methods which take varying writers and config states. | ||||
| func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { | ||||
| 	for _, arg := range a { | ||||
| 		if arg == nil { | ||||
| 			w.Write(interfaceBytes) | ||||
| 			w.Write(spaceBytes) | ||||
| 			w.Write(nilAngleBytes) | ||||
| 			w.Write(newlineBytes) | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		d := dumpState{w: w, cs: cs} | ||||
| 		d.pointers = make(map[uintptr]int) | ||||
| 		d.dump(reflect.ValueOf(arg)) | ||||
| 		d.w.Write(newlineBytes) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Fdump formats and displays the passed arguments to io.Writer w.  It formats | ||||
| // exactly the same as Dump. | ||||
| func Fdump(w io.Writer, a ...interface{}) { | ||||
| 	fdump(&Config, w, a...) | ||||
| } | ||||
|  | ||||
| // Sdump returns a string with the passed arguments formatted exactly the same | ||||
| // as Dump. | ||||
| func Sdump(a ...interface{}) string { | ||||
| 	var buf bytes.Buffer | ||||
| 	fdump(&Config, &buf, a...) | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| /* | ||||
| Dump displays the passed parameters to standard out with newlines, customizable | ||||
| indentation, and additional debug information such as complete types and all | ||||
| pointer addresses used to indirect to the final value.  It provides the | ||||
| following features over the built-in printing facilities provided by the fmt | ||||
| package: | ||||
|  | ||||
| 	* Pointers are dereferenced and followed | ||||
| 	* Circular data structures are detected and handled properly | ||||
| 	* Custom Stringer/error interfaces are optionally invoked, including | ||||
| 	  on unexported types | ||||
| 	* Custom types which only implement the Stringer/error interfaces via | ||||
| 	  a pointer receiver are optionally invoked when passing non-pointer | ||||
| 	  variables | ||||
| 	* Byte arrays and slices are dumped like the hexdump -C command which | ||||
| 	  includes offsets, byte values in hex, and ASCII output | ||||
|  | ||||
| The configuration options are controlled by an exported package global, | ||||
| spew.Config.  See ConfigState for options documentation. | ||||
|  | ||||
| See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to | ||||
| get the formatted result as a string. | ||||
| */ | ||||
| func Dump(a ...interface{}) { | ||||
| 	fdump(&Config, os.Stdout, a...) | ||||
| } | ||||
							
								
								
									
										419
									
								
								vendor/github.com/davecgh/go-spew/spew/format.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										419
									
								
								vendor/github.com/davecgh/go-spew/spew/format.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,419 @@ | ||||
| /* | ||||
|  * Copyright (c) 2013-2016 Dave Collins <dave@davec.name> | ||||
|  * | ||||
|  * Permission to use, copy, modify, and distribute this software for any | ||||
|  * purpose with or without fee is hereby granted, provided that the above | ||||
|  * copyright notice and this permission notice appear in all copies. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
|  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
|  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
|  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
|  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
|  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  */ | ||||
|  | ||||
| package spew | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // supportedFlags is a list of all the character flags supported by fmt package. | ||||
| const supportedFlags = "0-+# " | ||||
|  | ||||
| // formatState implements the fmt.Formatter interface and contains information | ||||
| // about the state of a formatting operation.  The NewFormatter function can | ||||
| // be used to get a new Formatter which can be used directly as arguments | ||||
| // in standard fmt package printing calls. | ||||
| type formatState struct { | ||||
| 	value          interface{} | ||||
| 	fs             fmt.State | ||||
| 	depth          int | ||||
| 	pointers       map[uintptr]int | ||||
| 	ignoreNextType bool | ||||
| 	cs             *ConfigState | ||||
| } | ||||
|  | ||||
| // buildDefaultFormat recreates the original format string without precision | ||||
| // and width information to pass in to fmt.Sprintf in the case of an | ||||
| // unrecognized type.  Unless new types are added to the language, this | ||||
| // function won't ever be called. | ||||
| func (f *formatState) buildDefaultFormat() (format string) { | ||||
| 	buf := bytes.NewBuffer(percentBytes) | ||||
|  | ||||
| 	for _, flag := range supportedFlags { | ||||
| 		if f.fs.Flag(int(flag)) { | ||||
| 			buf.WriteRune(flag) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	buf.WriteRune('v') | ||||
|  | ||||
| 	format = buf.String() | ||||
| 	return format | ||||
| } | ||||
|  | ||||
| // constructOrigFormat recreates the original format string including precision | ||||
| // and width information to pass along to the standard fmt package.  This allows | ||||
| // automatic deferral of all format strings this package doesn't support. | ||||
| func (f *formatState) constructOrigFormat(verb rune) (format string) { | ||||
| 	buf := bytes.NewBuffer(percentBytes) | ||||
|  | ||||
| 	for _, flag := range supportedFlags { | ||||
| 		if f.fs.Flag(int(flag)) { | ||||
| 			buf.WriteRune(flag) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if width, ok := f.fs.Width(); ok { | ||||
| 		buf.WriteString(strconv.Itoa(width)) | ||||
| 	} | ||||
|  | ||||
| 	if precision, ok := f.fs.Precision(); ok { | ||||
| 		buf.Write(precisionBytes) | ||||
| 		buf.WriteString(strconv.Itoa(precision)) | ||||
| 	} | ||||
|  | ||||
| 	buf.WriteRune(verb) | ||||
|  | ||||
| 	format = buf.String() | ||||
| 	return format | ||||
| } | ||||
|  | ||||
| // unpackValue returns values inside of non-nil interfaces when possible and | ||||
| // ensures that types for values which have been unpacked from an interface | ||||
| // are displayed when the show types flag is also set. | ||||
| // This is useful for data types like structs, arrays, slices, and maps which | ||||
| // can contain varying types packed inside an interface. | ||||
| func (f *formatState) unpackValue(v reflect.Value) reflect.Value { | ||||
| 	if v.Kind() == reflect.Interface { | ||||
| 		f.ignoreNextType = false | ||||
| 		if !v.IsNil() { | ||||
| 			v = v.Elem() | ||||
| 		} | ||||
| 	} | ||||
| 	return v | ||||
| } | ||||
|  | ||||
| // formatPtr handles formatting of pointers by indirecting them as necessary. | ||||
| func (f *formatState) formatPtr(v reflect.Value) { | ||||
| 	// Display nil if top level pointer is nil. | ||||
| 	showTypes := f.fs.Flag('#') | ||||
| 	if v.IsNil() && (!showTypes || f.ignoreNextType) { | ||||
| 		f.fs.Write(nilAngleBytes) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Remove pointers at or below the current depth from map used to detect | ||||
| 	// circular refs. | ||||
| 	for k, depth := range f.pointers { | ||||
| 		if depth >= f.depth { | ||||
| 			delete(f.pointers, k) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Keep list of all dereferenced pointers to possibly show later. | ||||
| 	pointerChain := make([]uintptr, 0) | ||||
|  | ||||
| 	// Figure out how many levels of indirection there are by derferencing | ||||
| 	// pointers and unpacking interfaces down the chain while detecting circular | ||||
| 	// references. | ||||
| 	nilFound := false | ||||
| 	cycleFound := false | ||||
| 	indirects := 0 | ||||
| 	ve := v | ||||
| 	for ve.Kind() == reflect.Ptr { | ||||
| 		if ve.IsNil() { | ||||
| 			nilFound = true | ||||
| 			break | ||||
| 		} | ||||
| 		indirects++ | ||||
| 		addr := ve.Pointer() | ||||
| 		pointerChain = append(pointerChain, addr) | ||||
| 		if pd, ok := f.pointers[addr]; ok && pd < f.depth { | ||||
| 			cycleFound = true | ||||
| 			indirects-- | ||||
| 			break | ||||
| 		} | ||||
| 		f.pointers[addr] = f.depth | ||||
|  | ||||
| 		ve = ve.Elem() | ||||
| 		if ve.Kind() == reflect.Interface { | ||||
| 			if ve.IsNil() { | ||||
| 				nilFound = true | ||||
| 				break | ||||
| 			} | ||||
| 			ve = ve.Elem() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Display type or indirection level depending on flags. | ||||
| 	if showTypes && !f.ignoreNextType { | ||||
| 		f.fs.Write(openParenBytes) | ||||
| 		f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) | ||||
| 		f.fs.Write([]byte(ve.Type().String())) | ||||
| 		f.fs.Write(closeParenBytes) | ||||
| 	} else { | ||||
| 		if nilFound || cycleFound { | ||||
| 			indirects += strings.Count(ve.Type().String(), "*") | ||||
| 		} | ||||
| 		f.fs.Write(openAngleBytes) | ||||
| 		f.fs.Write([]byte(strings.Repeat("*", indirects))) | ||||
| 		f.fs.Write(closeAngleBytes) | ||||
| 	} | ||||
|  | ||||
| 	// Display pointer information depending on flags. | ||||
| 	if f.fs.Flag('+') && (len(pointerChain) > 0) { | ||||
| 		f.fs.Write(openParenBytes) | ||||
| 		for i, addr := range pointerChain { | ||||
| 			if i > 0 { | ||||
| 				f.fs.Write(pointerChainBytes) | ||||
| 			} | ||||
| 			printHexPtr(f.fs, addr) | ||||
| 		} | ||||
| 		f.fs.Write(closeParenBytes) | ||||
| 	} | ||||
|  | ||||
| 	// Display dereferenced value. | ||||
| 	switch { | ||||
| 	case nilFound: | ||||
| 		f.fs.Write(nilAngleBytes) | ||||
|  | ||||
| 	case cycleFound: | ||||
| 		f.fs.Write(circularShortBytes) | ||||
|  | ||||
| 	default: | ||||
| 		f.ignoreNextType = true | ||||
| 		f.format(ve) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // format is the main workhorse for providing the Formatter interface.  It | ||||
| // uses the passed reflect value to figure out what kind of object we are | ||||
| // dealing with and formats it appropriately.  It is a recursive function, | ||||
| // however circular data structures are detected and handled properly. | ||||
| func (f *formatState) format(v reflect.Value) { | ||||
| 	// Handle invalid reflect values immediately. | ||||
| 	kind := v.Kind() | ||||
| 	if kind == reflect.Invalid { | ||||
| 		f.fs.Write(invalidAngleBytes) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Handle pointers specially. | ||||
| 	if kind == reflect.Ptr { | ||||
| 		f.formatPtr(v) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Print type information unless already handled elsewhere. | ||||
| 	if !f.ignoreNextType && f.fs.Flag('#') { | ||||
| 		f.fs.Write(openParenBytes) | ||||
| 		f.fs.Write([]byte(v.Type().String())) | ||||
| 		f.fs.Write(closeParenBytes) | ||||
| 	} | ||||
| 	f.ignoreNextType = false | ||||
|  | ||||
| 	// Call Stringer/error interfaces if they exist and the handle methods | ||||
| 	// flag is enabled. | ||||
| 	if !f.cs.DisableMethods { | ||||
| 		if (kind != reflect.Invalid) && (kind != reflect.Interface) { | ||||
| 			if handled := handleMethods(f.cs, f.fs, v); handled { | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	switch kind { | ||||
| 	case reflect.Invalid: | ||||
| 		// Do nothing.  We should never get here since invalid has already | ||||
| 		// been handled above. | ||||
|  | ||||
| 	case reflect.Bool: | ||||
| 		printBool(f.fs, v.Bool()) | ||||
|  | ||||
| 	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: | ||||
| 		printInt(f.fs, v.Int(), 10) | ||||
|  | ||||
| 	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: | ||||
| 		printUint(f.fs, v.Uint(), 10) | ||||
|  | ||||
| 	case reflect.Float32: | ||||
| 		printFloat(f.fs, v.Float(), 32) | ||||
|  | ||||
| 	case reflect.Float64: | ||||
| 		printFloat(f.fs, v.Float(), 64) | ||||
|  | ||||
| 	case reflect.Complex64: | ||||
| 		printComplex(f.fs, v.Complex(), 32) | ||||
|  | ||||
| 	case reflect.Complex128: | ||||
| 		printComplex(f.fs, v.Complex(), 64) | ||||
|  | ||||
| 	case reflect.Slice: | ||||
| 		if v.IsNil() { | ||||
| 			f.fs.Write(nilAngleBytes) | ||||
| 			break | ||||
| 		} | ||||
| 		fallthrough | ||||
|  | ||||
| 	case reflect.Array: | ||||
| 		f.fs.Write(openBracketBytes) | ||||
| 		f.depth++ | ||||
| 		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { | ||||
| 			f.fs.Write(maxShortBytes) | ||||
| 		} else { | ||||
| 			numEntries := v.Len() | ||||
| 			for i := 0; i < numEntries; i++ { | ||||
| 				if i > 0 { | ||||
| 					f.fs.Write(spaceBytes) | ||||
| 				} | ||||
| 				f.ignoreNextType = true | ||||
| 				f.format(f.unpackValue(v.Index(i))) | ||||
| 			} | ||||
| 		} | ||||
| 		f.depth-- | ||||
| 		f.fs.Write(closeBracketBytes) | ||||
|  | ||||
| 	case reflect.String: | ||||
| 		f.fs.Write([]byte(v.String())) | ||||
|  | ||||
| 	case reflect.Interface: | ||||
| 		// The only time we should get here is for nil interfaces due to | ||||
| 		// unpackValue calls. | ||||
| 		if v.IsNil() { | ||||
| 			f.fs.Write(nilAngleBytes) | ||||
| 		} | ||||
|  | ||||
| 	case reflect.Ptr: | ||||
| 		// Do nothing.  We should never get here since pointers have already | ||||
| 		// been handled above. | ||||
|  | ||||
| 	case reflect.Map: | ||||
| 		// nil maps should be indicated as different than empty maps | ||||
| 		if v.IsNil() { | ||||
| 			f.fs.Write(nilAngleBytes) | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		f.fs.Write(openMapBytes) | ||||
| 		f.depth++ | ||||
| 		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { | ||||
| 			f.fs.Write(maxShortBytes) | ||||
| 		} else { | ||||
| 			keys := v.MapKeys() | ||||
| 			if f.cs.SortKeys { | ||||
| 				sortValues(keys, f.cs) | ||||
| 			} | ||||
| 			for i, key := range keys { | ||||
| 				if i > 0 { | ||||
| 					f.fs.Write(spaceBytes) | ||||
| 				} | ||||
| 				f.ignoreNextType = true | ||||
| 				f.format(f.unpackValue(key)) | ||||
| 				f.fs.Write(colonBytes) | ||||
| 				f.ignoreNextType = true | ||||
| 				f.format(f.unpackValue(v.MapIndex(key))) | ||||
| 			} | ||||
| 		} | ||||
| 		f.depth-- | ||||
| 		f.fs.Write(closeMapBytes) | ||||
|  | ||||
| 	case reflect.Struct: | ||||
| 		numFields := v.NumField() | ||||
| 		f.fs.Write(openBraceBytes) | ||||
| 		f.depth++ | ||||
| 		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { | ||||
| 			f.fs.Write(maxShortBytes) | ||||
| 		} else { | ||||
| 			vt := v.Type() | ||||
| 			for i := 0; i < numFields; i++ { | ||||
| 				if i > 0 { | ||||
| 					f.fs.Write(spaceBytes) | ||||
| 				} | ||||
| 				vtf := vt.Field(i) | ||||
| 				if f.fs.Flag('+') || f.fs.Flag('#') { | ||||
| 					f.fs.Write([]byte(vtf.Name)) | ||||
| 					f.fs.Write(colonBytes) | ||||
| 				} | ||||
| 				f.format(f.unpackValue(v.Field(i))) | ||||
| 			} | ||||
| 		} | ||||
| 		f.depth-- | ||||
| 		f.fs.Write(closeBraceBytes) | ||||
|  | ||||
| 	case reflect.Uintptr: | ||||
| 		printHexPtr(f.fs, uintptr(v.Uint())) | ||||
|  | ||||
| 	case reflect.UnsafePointer, reflect.Chan, reflect.Func: | ||||
| 		printHexPtr(f.fs, v.Pointer()) | ||||
|  | ||||
| 	// There were not any other types at the time this code was written, but | ||||
| 	// fall back to letting the default fmt package handle it if any get added. | ||||
| 	default: | ||||
| 		format := f.buildDefaultFormat() | ||||
| 		if v.CanInterface() { | ||||
| 			fmt.Fprintf(f.fs, format, v.Interface()) | ||||
| 		} else { | ||||
| 			fmt.Fprintf(f.fs, format, v.String()) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Format satisfies the fmt.Formatter interface. See NewFormatter for usage | ||||
| // details. | ||||
| func (f *formatState) Format(fs fmt.State, verb rune) { | ||||
| 	f.fs = fs | ||||
|  | ||||
| 	// Use standard formatting for verbs that are not v. | ||||
| 	if verb != 'v' { | ||||
| 		format := f.constructOrigFormat(verb) | ||||
| 		fmt.Fprintf(fs, format, f.value) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if f.value == nil { | ||||
| 		if fs.Flag('#') { | ||||
| 			fs.Write(interfaceBytes) | ||||
| 		} | ||||
| 		fs.Write(nilAngleBytes) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	f.format(reflect.ValueOf(f.value)) | ||||
| } | ||||
|  | ||||
| // newFormatter is a helper function to consolidate the logic from the various | ||||
| // public methods which take varying config states. | ||||
| func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { | ||||
| 	fs := &formatState{value: v, cs: cs} | ||||
| 	fs.pointers = make(map[uintptr]int) | ||||
| 	return fs | ||||
| } | ||||
|  | ||||
| /* | ||||
| NewFormatter returns a custom formatter that satisfies the fmt.Formatter | ||||
| interface.  As a result, it integrates cleanly with standard fmt package | ||||
| printing functions.  The formatter is useful for inline printing of smaller data | ||||
| types similar to the standard %v format specifier. | ||||
|  | ||||
| The custom formatter only responds to the %v (most compact), %+v (adds pointer | ||||
| addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb | ||||
| combinations.  Any other verbs such as %x and %q will be sent to the the | ||||
| standard fmt package for formatting.  In addition, the custom formatter ignores | ||||
| the width and precision arguments (however they will still work on the format | ||||
| specifiers not handled by the custom formatter). | ||||
|  | ||||
| Typically this function shouldn't be called directly.  It is much easier to make | ||||
| use of the custom formatter by calling one of the convenience functions such as | ||||
| Printf, Println, or Fprintf. | ||||
| */ | ||||
| func NewFormatter(v interface{}) fmt.Formatter { | ||||
| 	return newFormatter(&Config, v) | ||||
| } | ||||
							
								
								
									
										148
									
								
								vendor/github.com/davecgh/go-spew/spew/spew.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										148
									
								
								vendor/github.com/davecgh/go-spew/spew/spew.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,148 @@ | ||||
| /* | ||||
|  * Copyright (c) 2013-2016 Dave Collins <dave@davec.name> | ||||
|  * | ||||
|  * Permission to use, copy, modify, and distribute this software for any | ||||
|  * purpose with or without fee is hereby granted, provided that the above | ||||
|  * copyright notice and this permission notice appear in all copies. | ||||
|  * | ||||
|  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||||
|  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||||
|  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||||
|  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||||
|  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||||
|  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||||
|  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||||
|  */ | ||||
|  | ||||
| package spew | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| // Errorf is a wrapper for fmt.Errorf that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the formatted string as a value that satisfies error.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Errorf(format string, a ...interface{}) (err error) { | ||||
| 	return fmt.Errorf(format, convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Fprint is a wrapper for fmt.Fprint that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Fprint(w io.Writer, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Fprint(w, convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Fprintf(w, format, convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it | ||||
| // passed with a default Formatter interface returned by NewFormatter.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Fprintln(w, convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Print is a wrapper for fmt.Print that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Print(a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Print(convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Printf is a wrapper for fmt.Printf that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Printf(format string, a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Printf(format, convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Println is a wrapper for fmt.Println that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the number of bytes written and any write error encountered.  See | ||||
| // NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Println(a ...interface{}) (n int, err error) { | ||||
| 	return fmt.Println(convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Sprint is a wrapper for fmt.Sprint that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the resulting string.  See NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Sprint(a ...interface{}) string { | ||||
| 	return fmt.Sprint(convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were | ||||
| // passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the resulting string.  See NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Sprintf(format string, a ...interface{}) string { | ||||
| 	return fmt.Sprintf(format, convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it | ||||
| // were passed with a default Formatter interface returned by NewFormatter.  It | ||||
| // returns the resulting string.  See NewFormatter for formatting details. | ||||
| // | ||||
| // This function is shorthand for the following syntax: | ||||
| // | ||||
| //	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) | ||||
| func Sprintln(a ...interface{}) string { | ||||
| 	return fmt.Sprintln(convertArgs(a)...) | ||||
| } | ||||
|  | ||||
| // convertArgs accepts a slice of arguments and returns a slice of the same | ||||
| // length with each argument converted to a default spew Formatter interface. | ||||
| func convertArgs(args []interface{}) (formatters []interface{}) { | ||||
| 	formatters = make([]interface{}, len(args)) | ||||
| 	for index, arg := range args { | ||||
| 		formatters[index] = NewFormatter(arg) | ||||
| 	} | ||||
| 	return formatters | ||||
| } | ||||
							
								
								
									
										24
									
								
								vendor/github.com/kirillDanshin/dlog/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/kirillDanshin/dlog/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| # Compiled Object files, Static and Dynamic libs (Shared Objects) | ||||
| *.o | ||||
| *.a | ||||
| *.so | ||||
|  | ||||
| # Folders | ||||
| _obj | ||||
| _test | ||||
|  | ||||
| # Architecture specific extensions/prefixes | ||||
| *.[568vq] | ||||
| [568vq].out | ||||
|  | ||||
| *.cgo1.go | ||||
| *.cgo2.c | ||||
| _cgo_defun.c | ||||
| _cgo_gotypes.go | ||||
| _cgo_export.* | ||||
|  | ||||
| _testmain.go | ||||
|  | ||||
| *.exe | ||||
| *.test | ||||
| *.prof | ||||
							
								
								
									
										201
									
								
								vendor/github.com/kirillDanshin/dlog/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/kirillDanshin/dlog/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,201 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright {yyyy} {name of copyright owner} | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										51
									
								
								vendor/github.com/kirillDanshin/dlog/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								vendor/github.com/kirillDanshin/dlog/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,51 @@ | ||||
| # dlog [](https://godoc.org/github.com/kirillDanshin/dlog) [](https://goreportcard.com/report/github.com/kirillDanshin/dlog) | ||||
| Simple build-time controlled debug log | ||||
|  | ||||
| # How to use | ||||
| ### Unbuffered | ||||
| ```go | ||||
| package main | ||||
|  | ||||
| import "github.com/kirillDanshin/dlog" | ||||
|  | ||||
| func main() { | ||||
| 	a := []int{2, 4, 8, 16, 32, 64, 128, 256, 512} | ||||
| 	b := "some string" | ||||
| 	 | ||||
| 	dlog.D(a)		// D'ump `a` | ||||
| 	dlog.P(b)		// P'rint `b` | ||||
| 	dlog.F("%s format", b)	// F'ormatted print | ||||
| 	dlog.Ln(b)		// print'Ln `b` | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Buffered | ||||
| ```go | ||||
| package main | ||||
|  | ||||
| import "github.com/kirillDanshin/dlog" | ||||
|  | ||||
| func main() { | ||||
| 	log := dlog.NewBuffered() | ||||
| 	defer log.Release() | ||||
| 	 | ||||
| 	log.D(a)		// D'ump `a` | ||||
| 	log.P(b)		// P'rint `b` | ||||
| 	log.F("%s format", b)	// F'ormatted print | ||||
| 	log.Ln(b)		// print'Ln `b` | ||||
|  | ||||
| 	dlog.Ln(log) // or fmt.Println("log") etc. | ||||
| } | ||||
| ``` | ||||
|  | ||||
| # Release | ||||
| To disable logging in release build just run | ||||
| ```bash | ||||
| 	go build | ||||
| ``` | ||||
|  | ||||
| # Debug | ||||
| To enable logging in debug build run | ||||
| ```bash | ||||
| 	go build -tags "debug" | ||||
| ``` | ||||
							
								
								
									
										21
									
								
								vendor/github.com/kirillDanshin/dlog/buffered_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								vendor/github.com/kirillDanshin/dlog/buffered_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| // +build !debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| // Ln is a build-time enabled println | ||||
| func (b *Buffered) Ln(v ...interface{}) {} | ||||
|  | ||||
| // P is a build-time enabled print | ||||
| func (b *Buffered) P(v ...interface{}) {} | ||||
|  | ||||
| // F is a build-time enabled printf | ||||
| func (b *Buffered) F(f string, v ...interface{}) {} | ||||
|  | ||||
| // D dumps a value | ||||
| func (b *Buffered) D(v ...interface{}) {} | ||||
|  | ||||
| func (b *Buffered) prepare() {} | ||||
|  | ||||
| func (b *Buffered) String() string { | ||||
| 	return "" | ||||
| } | ||||
							
								
								
									
										49
									
								
								vendor/github.com/kirillDanshin/dlog/buffered_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								vendor/github.com/kirillDanshin/dlog/buffered_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,49 @@ | ||||
| // +build debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // Ln is a build-time enabled println | ||||
| func (b *Buffered) Ln(v ...interface{}) { | ||||
| 	b.prepare() | ||||
| 	b.Lock() | ||||
| 	fmt.Fprintln(b.bb, v...) | ||||
| 	b.Unlock() | ||||
| } | ||||
|  | ||||
| // P is a build-time enabled print | ||||
| func (b *Buffered) P(v ...interface{}) { | ||||
| 	b.prepare() | ||||
| 	b.Lock() | ||||
| 	fmt.Fprint(b.bb, v...) | ||||
| 	b.Unlock() | ||||
| } | ||||
|  | ||||
| // F is a build-time enabled printf | ||||
| func (b *Buffered) F(f string, v ...interface{}) { | ||||
| 	b.prepare() | ||||
| 	b.Lock() | ||||
| 	fmt.Fprintf(b.bb, f+"\n", v...) | ||||
| 	b.Unlock() | ||||
| } | ||||
|  | ||||
| // D dumps a value | ||||
| func (b *Buffered) D(v ...interface{}) { | ||||
| 	b.prepare() | ||||
| 	b.Lock() | ||||
| 	for _, v := range v { | ||||
| 		fmt.Fprintf(b.bb, "%+#v\n", v) | ||||
| 	} | ||||
| 	b.Unlock() | ||||
| } | ||||
|  | ||||
| func (b *Buffered) prepare() { | ||||
| 	if b.bb == nil { | ||||
| 		b = NewBuffered() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (b *Buffered) String() string { | ||||
| 	return string(b.bb.Bytes()) | ||||
| } | ||||
							
								
								
									
										30
									
								
								vendor/github.com/kirillDanshin/dlog/buffered_uni.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								vendor/github.com/kirillDanshin/dlog/buffered_uni.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| package dlog | ||||
|  | ||||
| import ( | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/valyala/bytebufferpool" | ||||
| ) | ||||
|  | ||||
| // Buffered thread-safe dlog | ||||
| type Buffered struct { | ||||
| 	bb *bytebufferpool.ByteBuffer | ||||
| 	sync.RWMutex | ||||
| } | ||||
|  | ||||
| // NewBuffered dlog | ||||
| func NewBuffered() *Buffered { | ||||
| 	return &Buffered{ | ||||
| 		bb: bytebufferpool.Get(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Release the buffer for dlog | ||||
| func (b *Buffered) Release() { | ||||
| 	if b.bb == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	b.Lock() | ||||
| 	bytebufferpool.Put(b.bb) | ||||
| 	b.Unlock() | ||||
| } | ||||
							
								
								
									
										14
									
								
								vendor/github.com/kirillDanshin/dlog/caller.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/kirillDanshin/dlog/caller.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| package dlog | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // Caller info | ||||
| type Caller struct { | ||||
| 	File     string | ||||
| 	Line     int | ||||
| 	FuncName string | ||||
| } | ||||
|  | ||||
| func (c *Caller) String() string { | ||||
| 	return fmt.Sprintf("Called from %s:%d (%s)", c.File, c.Line, c.FuncName) | ||||
| } | ||||
							
								
								
									
										11
									
								
								vendor/github.com/kirillDanshin/dlog/consts.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/kirillDanshin/dlog/consts.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| package dlog | ||||
|  | ||||
| const ( | ||||
| 	// CallerUnknown returned when caller isn't determined | ||||
| 	CallerUnknown = "UNKNOWN" | ||||
|  | ||||
| 	// StateEnabled is to check if dlog.State enabled | ||||
| 	StateEnabled = "enabled" | ||||
| 	// StateDisabled is to check if dlog.State disabled | ||||
| 	StateDisabled = "disabled" | ||||
| ) | ||||
							
								
								
									
										15
									
								
								vendor/github.com/kirillDanshin/dlog/dlog_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/kirillDanshin/dlog/dlog_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| // +build !debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| // D dumps a value | ||||
| func D(v ...interface{}) {} | ||||
|  | ||||
| // F is a build-time disabled printf | ||||
| func F(f string, v ...interface{}) {} | ||||
|  | ||||
| // P is a build-time disabled print | ||||
| func P(v ...interface{}) {} | ||||
|  | ||||
| // Ln is a build-time disabled println | ||||
| func Ln(v ...interface{}) {} | ||||
							
								
								
									
										38
									
								
								vendor/github.com/kirillDanshin/dlog/dlog_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								vendor/github.com/kirillDanshin/dlog/dlog_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| // +build debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"log" | ||||
|  | ||||
| 	"github.com/davecgh/go-spew/spew" | ||||
| ) | ||||
|  | ||||
| var spewInstance = spew.ConfigState{ | ||||
| 	Indent: "\t", | ||||
| } | ||||
|  | ||||
| // D dumps a value | ||||
| func D(v ...interface{}) { | ||||
| 	for _, v := range v { | ||||
| 		spewInstance.Dump(v) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // F is a build-time enabled printf | ||||
| func F(f string, v ...interface{}) { | ||||
| 	// log.Printf(f, v...) | ||||
| 	spewInstance.Printf(f+"\n", v...) | ||||
| } | ||||
|  | ||||
| // P is a build-time enabled print | ||||
| func P(v ...interface{}) { | ||||
| 	log.Print(v...) | ||||
| 	fmt.Println() | ||||
| } | ||||
|  | ||||
| // Ln is a build-time enabled println | ||||
| func Ln(v ...interface{}) { | ||||
| 	log.Println(v...) | ||||
| } | ||||
							
								
								
									
										5
									
								
								vendor/github.com/kirillDanshin/dlog/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/kirillDanshin/dlog/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| // Package dlog is a build-time | ||||
| // enabled or disabled logger. | ||||
| // Godoc shows disabled state | ||||
| // because it built in by default. | ||||
| package dlog | ||||
							
								
								
									
										6
									
								
								vendor/github.com/kirillDanshin/dlog/state_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/kirillDanshin/dlog/state_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,6 @@ | ||||
| // +build !debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| // State handles dlog state. Can be "disabled" or "enabled". | ||||
| const State = "disabled" | ||||
							
								
								
									
										6
									
								
								vendor/github.com/kirillDanshin/dlog/state_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								vendor/github.com/kirillDanshin/dlog/state_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,6 @@ | ||||
| // +build debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| // State handles dlog state. Can be "disabled" or "enabled". | ||||
| const State = "enabled" | ||||
							
								
								
									
										25
									
								
								vendor/github.com/kirillDanshin/dlog/withCaller_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/kirillDanshin/dlog/withCaller_disabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| // +build !debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| // D dumps a value | ||||
| func (*WithCaller) D(v ...interface{}) {} | ||||
|  | ||||
| // F is a build-time enabled printf | ||||
| func (*WithCaller) F(f string, v ...interface{}) {} | ||||
|  | ||||
| // P is a build-time enabled print | ||||
| func (*WithCaller) P(v ...interface{}) {} | ||||
|  | ||||
| // Ln is a build-time enabled println | ||||
| func (*WithCaller) Ln(v ...interface{}) {} | ||||
|  | ||||
| // GetCaller is a build-time disabled caller determining. | ||||
| // Returns caller's file, line and func name | ||||
| func GetCaller(_ ...int) (*Caller, bool) { | ||||
| 	return &Caller{ | ||||
| 		File:     CallerUnknown, | ||||
| 		Line:     0, | ||||
| 		FuncName: CallerUnknown, | ||||
| 	}, false | ||||
| } | ||||
							
								
								
									
										70
									
								
								vendor/github.com/kirillDanshin/dlog/withCaller_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								vendor/github.com/kirillDanshin/dlog/withCaller_enabled.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| // +build debug | ||||
|  | ||||
| package dlog | ||||
|  | ||||
| import ( | ||||
| 	"log" | ||||
| 	"runtime" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/kirillDanshin/myutils" | ||||
| ) | ||||
|  | ||||
| // D dumps a value | ||||
| func (*WithCaller) D(v ...interface{}) { | ||||
| 	c, _ := GetCaller() | ||||
| 	log.Print(c, ": [") | ||||
| 	for _, v := range v { | ||||
| 		spewInstance.Dump(v) | ||||
| 	} | ||||
| 	log.Println("]") | ||||
| } | ||||
|  | ||||
| // F is a build-time enabled printf | ||||
| func (*WithCaller) F(f string, v ...interface{}) { | ||||
| 	c, _ := GetCaller() | ||||
| 	// log.Printf(myutils.Concat(c.String(), "[\n\t", f, "\n]"), v...) | ||||
| 	spewInstance.Printf(myutils.Concat(c.String(), "[\n\t", f, "\n]"), v...) | ||||
| } | ||||
|  | ||||
| // P is a build-time enabled print | ||||
| func (*WithCaller) P(v ...interface{}) { | ||||
| 	c, _ := GetCaller() | ||||
| 	log.Print(c, ": [\t") | ||||
| 	log.Print(v...) | ||||
| 	log.Println("]") | ||||
| } | ||||
|  | ||||
| // Ln is a build-time enabled println | ||||
| func (*WithCaller) Ln(v ...interface{}) { | ||||
| 	c, _ := GetCaller() | ||||
| 	log.Print(c, ": [\t") | ||||
| 	log.Println(v...) | ||||
| 	log.Println("]") | ||||
| } | ||||
|  | ||||
| // GetCaller returns caller's file, line and func name | ||||
| func GetCaller(stackBack ...int) (*Caller, bool) { | ||||
| 	sb := 2 | ||||
| 	if len(stackBack) > 0 { | ||||
| 		sb = stackBack[0] + 1 | ||||
| 	} | ||||
| 	pc, file, line, ok := runtime.Caller(sb) | ||||
| 	if !ok { | ||||
| 		return &Caller{ | ||||
| 			File:     CallerUnknown, | ||||
| 			Line:     0, | ||||
| 			FuncName: CallerUnknown, | ||||
| 		}, false | ||||
| 	} | ||||
|  | ||||
| 	if li := strings.LastIndex(file, "/"); li > 0 { | ||||
| 		file = file[li+1:] | ||||
| 	} | ||||
|  | ||||
| 	return &Caller{ | ||||
| 		File:     file, | ||||
| 		Line:     line, | ||||
| 		FuncName: runtime.FuncForPC(pc).Name(), | ||||
| 	}, true | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/kirillDanshin/dlog/withCaller_uni.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/kirillDanshin/dlog/withCaller_uni.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| package dlog | ||||
|  | ||||
| // WithCaller is a dlog with caller info prefix | ||||
| type WithCaller struct{} | ||||
							
								
								
									
										24
									
								
								vendor/github.com/kirillDanshin/myutils/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/kirillDanshin/myutils/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| # Compiled Object files, Static and Dynamic libs (Shared Objects) | ||||
| *.o | ||||
| *.a | ||||
| *.so | ||||
|  | ||||
| # Folders | ||||
| _obj | ||||
| _test | ||||
|  | ||||
| # Architecture specific extensions/prefixes | ||||
| *.[568vq] | ||||
| [568vq].out | ||||
|  | ||||
| *.cgo1.go | ||||
| *.cgo2.c | ||||
| _cgo_defun.c | ||||
| _cgo_gotypes.go | ||||
| _cgo_export.* | ||||
|  | ||||
| _testmain.go | ||||
|  | ||||
| *.exe | ||||
| *.test | ||||
| *.prof | ||||
							
								
								
									
										201
									
								
								vendor/github.com/kirillDanshin/myutils/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/kirillDanshin/myutils/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,201 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright 2016 Kirill Danshin | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										2
									
								
								vendor/github.com/kirillDanshin/myutils/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/kirillDanshin/myutils/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| # myutils | ||||
| My Go utils | ||||
							
								
								
									
										28
									
								
								vendor/github.com/kirillDanshin/myutils/cpuprof.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/github.com/kirillDanshin/myutils/cpuprof.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| package myutils | ||||
|  | ||||
| import ( | ||||
| 	"flag" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"runtime/pprof" | ||||
| ) | ||||
|  | ||||
| var cpupprof = flag.String("cpupprof", "", "CPU profile output file path") | ||||
|  | ||||
| // CPUProf runs the profiler and returns a function you need to defer | ||||
| func CPUProf() func() { | ||||
| 	flag.Parse() | ||||
| 	out := *cpupprof | ||||
| 	if out != "" { | ||||
| 		file, err := os.Create(out) | ||||
| 		if err != nil { | ||||
| 			log.Fatalf("can't open CPU profile file %q", out) | ||||
| 		} | ||||
| 		pprof.StartCPUProfile(file) | ||||
| 		return func() { | ||||
| 			pprof.StopCPUProfile() | ||||
| 			file.Close() | ||||
| 		} | ||||
| 	} | ||||
| 	return func() {} | ||||
| } | ||||
							
								
								
									
										25
									
								
								vendor/github.com/kirillDanshin/myutils/error_handlers.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/kirillDanshin/myutils/error_handlers.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| package myutils | ||||
|  | ||||
| import ( | ||||
| 	"log" | ||||
| ) | ||||
|  | ||||
| // LogFatalError it's a snippet for | ||||
| //     if err != nil { | ||||
| //         log.Fatalf("Error: %s", err) | ||||
| //     } | ||||
| func LogFatalError(err error) { | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Error: %s", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RequiredStrFatal it's a snippet for | ||||
| //     if str == "" { | ||||
| //         log.Fatalf("Error: %s is empty", name) | ||||
| //     } | ||||
| func RequiredStrFatal(name, str string) { | ||||
| 	if str == "" { | ||||
| 		log.Fatalf("Error: %s is empty", name) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										39
									
								
								vendor/github.com/kirillDanshin/myutils/multival_to_singleval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/github.com/kirillDanshin/myutils/multival_to_singleval.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,39 @@ | ||||
| package myutils | ||||
|  | ||||
| // First returns first argument of a function return | ||||
| // use it like | ||||
| //     myutils.First(ab()) | ||||
| func First(args ...interface{}) interface{} { | ||||
| 	if len(args) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return args[0] | ||||
| } | ||||
|  | ||||
| // Last returns first argument of a function return | ||||
| // use it like | ||||
| //     myutils.Last(ab()) | ||||
| func Last(args ...interface{}) interface{} { | ||||
| 	if len(args) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return args[len(args)] | ||||
| } | ||||
|  | ||||
| // Pick returns picked argument of a function return | ||||
| // use it like | ||||
| //     myutils.Pick(1, ab()) | ||||
| func Pick(index int, args ...interface{}) interface{} { | ||||
| 	if len(args) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	return args[index] | ||||
| } | ||||
|  | ||||
| // Slice returns arguments of a function return as a slice | ||||
| // use it like | ||||
| //     myutils.Slice(ab())[1] | ||||
| func Slice(args ...interface{}) []interface{} { | ||||
| 	return args | ||||
| } | ||||
							
								
								
									
										31
									
								
								vendor/github.com/kirillDanshin/myutils/strCrop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								vendor/github.com/kirillDanshin/myutils/strCrop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,31 @@ | ||||
| package myutils | ||||
|  | ||||
| import "regexp" | ||||
|  | ||||
| var cropRgx *regexp.Regexp | ||||
|  | ||||
| // StrCrop crops a string like you want in blog previews | ||||
| func StrCrop(str string, length int) string { | ||||
| 	if len(str) <= length { | ||||
| 		return str | ||||
| 	} | ||||
|  | ||||
| 	if cropRgx == nil { | ||||
| 		cropRgx = regexp.MustCompile(`[\s,.-]`) | ||||
| 	} | ||||
|  | ||||
| 	var ( | ||||
| 		lastStop int | ||||
| 		runeStr  = []rune(str) | ||||
| 	) | ||||
| 	for i, r := range runeStr { | ||||
| 		if cropRgx.MatchString(string(r)) { | ||||
| 			lastStop = i | ||||
| 		} | ||||
| 		if i >= length { | ||||
| 			return string(runeStr[:lastStop]) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return str | ||||
| } | ||||
							
								
								
									
										19
									
								
								vendor/github.com/kirillDanshin/myutils/strings.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/kirillDanshin/myutils/strings.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| package myutils | ||||
|  | ||||
| import "github.com/valyala/bytebufferpool" | ||||
|  | ||||
| // Concat some strings | ||||
| func Concat(a string, b ...string) string { | ||||
| 	if len(b) == 0 { | ||||
| 		return a | ||||
| 	} | ||||
| 	buf := bytebufferpool.Get() | ||||
| 	defer bytebufferpool.Put(buf) | ||||
|  | ||||
| 	buf.SetString(a) | ||||
| 	for _, s := range b { | ||||
| 		buf.WriteString(s) | ||||
| 	} | ||||
|  | ||||
| 	return string(buf.B) | ||||
| } | ||||
							
								
								
									
										32
									
								
								vendor/github.com/kirillDanshin/myutils/syncPrint.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/kirillDanshin/myutils/syncPrint.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| package myutils | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // SyncPrinter is a thread-safe printer. | ||||
| // It will output given queue without | ||||
| type SyncPrinter struct { | ||||
| 	Queue chan string | ||||
| 	Close chan bool | ||||
| } | ||||
|  | ||||
| // NewSyncPrinter initialize a new SyncPrinter | ||||
| func NewSyncPrinter() (*SyncPrinter, error) { | ||||
| 	return &SyncPrinter{ | ||||
| 		Queue: make(chan string, 64), | ||||
| 		Close: make(chan bool, 1), | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // Run a SyncPrinter instance | ||||
| func (printer *SyncPrinter) Run() { | ||||
| 	for { | ||||
| 		select { | ||||
| 		case s := <-printer.Queue: | ||||
| 			fmt.Println(s) | ||||
| 		case <-printer.Close: | ||||
| 			close(printer.Close) | ||||
| 			close(printer.Queue) | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										27
									
								
								vendor/github.com/klauspost/compress/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/klauspost/compress/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| Copyright (c) 2012 The Go Authors. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|    * Redistributions of source code must retain the above copyright | ||||
| notice, this list of conditions and the following disclaimer. | ||||
|    * Redistributions in binary form must reproduce the above | ||||
| copyright notice, this list of conditions and the following disclaimer | ||||
| in the documentation and/or other materials provided with the | ||||
| distribution. | ||||
|    * Neither the name of Google Inc. nor the names of its | ||||
| contributors may be used to endorse or promote products derived from | ||||
| this software without specific prior written permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										32
									
								
								vendor/github.com/klauspost/compress/flate/copy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/klauspost/compress/flate/copy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| // Copyright 2012 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| // forwardCopy is like the built-in copy function except that it always goes | ||||
| // forward from the start, even if the dst and src overlap. | ||||
| // It is equivalent to: | ||||
| //   for i := 0; i < n; i++ { | ||||
| //     mem[dst+i] = mem[src+i] | ||||
| //   } | ||||
| func forwardCopy(mem []byte, dst, src, n int) { | ||||
| 	if dst <= src { | ||||
| 		copy(mem[dst:dst+n], mem[src:src+n]) | ||||
| 		return | ||||
| 	} | ||||
| 	for { | ||||
| 		if dst >= src+n { | ||||
| 			copy(mem[dst:dst+n], mem[src:src+n]) | ||||
| 			return | ||||
| 		} | ||||
| 		// There is some forward overlap.  The destination | ||||
| 		// will be filled with a repeated pattern of mem[src:src+k]. | ||||
| 		// We copy one instance of the pattern here, then repeat. | ||||
| 		// Each time around this loop k will double. | ||||
| 		k := dst - src | ||||
| 		copy(mem[dst:dst+k], mem[src:src+k]) | ||||
| 		n -= k | ||||
| 		dst += k | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										42
									
								
								vendor/github.com/klauspost/compress/flate/crc32_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/klauspost/compress/flate/crc32_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| //+build !noasm | ||||
| //+build !appengine | ||||
| //+build !gccgo | ||||
|  | ||||
| // Copyright 2015, Klaus Post, see LICENSE for details. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| import ( | ||||
| 	"github.com/klauspost/cpuid" | ||||
| ) | ||||
|  | ||||
| // crc32sse returns a hash for the first 4 bytes of the slice | ||||
| // len(a) must be >= 4. | ||||
| //go:noescape | ||||
| func crc32sse(a []byte) uint32 | ||||
|  | ||||
| // crc32sseAll calculates hashes for each 4-byte set in a. | ||||
| // dst must be east len(a) - 4 in size. | ||||
| // The size is not checked by the assembly. | ||||
| //go:noescape | ||||
| func crc32sseAll(a []byte, dst []uint32) | ||||
|  | ||||
| // matchLenSSE4 returns the number of matching bytes in a and b | ||||
| // up to length 'max'. Both slices must be at least 'max' | ||||
| // bytes in size. | ||||
| // | ||||
| // TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. | ||||
| // | ||||
| //go:noescape | ||||
| func matchLenSSE4(a, b []byte, max int) int | ||||
|  | ||||
| // histogram accumulates a histogram of b in h. | ||||
| // h must be at least 256 entries in length, | ||||
| // and must be cleared before calling this function. | ||||
| //go:noescape | ||||
| func histogram(b []byte, h []int32) | ||||
|  | ||||
| // Detect SSE 4.2 feature. | ||||
| func init() { | ||||
| 	useSSE42 = cpuid.CPU.SSE42() | ||||
| } | ||||
							
								
								
									
										214
									
								
								vendor/github.com/klauspost/compress/flate/crc32_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										214
									
								
								vendor/github.com/klauspost/compress/flate/crc32_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,214 @@ | ||||
| //+build !noasm | ||||
| //+build !appengine | ||||
| //+build !gccgo | ||||
|  | ||||
| // Copyright 2015, Klaus Post, see LICENSE for details. | ||||
|  | ||||
| // func crc32sse(a []byte) uint32 | ||||
| TEXT ·crc32sse(SB), 4, $0 | ||||
| 	MOVQ a+0(FP), R10 | ||||
| 	XORQ BX, BX | ||||
|  | ||||
| 	// CRC32   dword (R10), EBX | ||||
| 	BYTE $0xF2; BYTE $0x41; BYTE $0x0f | ||||
| 	BYTE $0x38; BYTE $0xf1; BYTE $0x1a | ||||
|  | ||||
| 	MOVL BX, ret+24(FP) | ||||
| 	RET | ||||
|  | ||||
| // func crc32sseAll(a []byte, dst []uint32) | ||||
| TEXT ·crc32sseAll(SB), 4, $0 | ||||
| 	MOVQ  a+0(FP), R8      // R8: src | ||||
| 	MOVQ  a_len+8(FP), R10 // input length | ||||
| 	MOVQ  dst+24(FP), R9   // R9: dst | ||||
| 	SUBQ  $4, R10 | ||||
| 	JS    end | ||||
| 	JZ    one_crc | ||||
| 	MOVQ  R10, R13 | ||||
| 	SHRQ  $2, R10          // len/4 | ||||
| 	ANDQ  $3, R13          // len&3 | ||||
| 	XORQ  BX, BX | ||||
| 	ADDQ  $1, R13 | ||||
| 	TESTQ R10, R10 | ||||
| 	JZ    rem_loop | ||||
|  | ||||
| crc_loop: | ||||
| 	MOVQ (R8), R11 | ||||
| 	XORQ BX, BX | ||||
| 	XORQ DX, DX | ||||
| 	XORQ DI, DI | ||||
| 	MOVQ R11, R12 | ||||
| 	SHRQ $8, R11 | ||||
| 	MOVQ R12, AX | ||||
| 	MOVQ R11, CX | ||||
| 	SHRQ $16, R12 | ||||
| 	SHRQ $16, R11 | ||||
| 	MOVQ R12, SI | ||||
|  | ||||
| 	// CRC32   EAX, EBX | ||||
| 	BYTE $0xF2; BYTE $0x0f | ||||
| 	BYTE $0x38; BYTE $0xf1; BYTE $0xd8 | ||||
|  | ||||
| 	// CRC32   ECX, EDX | ||||
| 	BYTE $0xF2; BYTE $0x0f | ||||
| 	BYTE $0x38; BYTE $0xf1; BYTE $0xd1 | ||||
|  | ||||
| 	// CRC32   ESI, EDI | ||||
| 	BYTE $0xF2; BYTE $0x0f | ||||
| 	BYTE $0x38; BYTE $0xf1; BYTE $0xfe | ||||
| 	MOVL BX, (R9) | ||||
| 	MOVL DX, 4(R9) | ||||
| 	MOVL DI, 8(R9) | ||||
|  | ||||
| 	XORQ BX, BX | ||||
| 	MOVL R11, AX | ||||
|  | ||||
| 	// CRC32   EAX, EBX | ||||
| 	BYTE $0xF2; BYTE $0x0f | ||||
| 	BYTE $0x38; BYTE $0xf1; BYTE $0xd8 | ||||
| 	MOVL BX, 12(R9) | ||||
|  | ||||
| 	ADDQ $16, R9 | ||||
| 	ADDQ $4, R8 | ||||
| 	XORQ BX, BX | ||||
| 	SUBQ $1, R10 | ||||
| 	JNZ  crc_loop | ||||
|  | ||||
| rem_loop: | ||||
| 	MOVL (R8), AX | ||||
|  | ||||
| 	// CRC32   EAX, EBX | ||||
| 	BYTE $0xF2; BYTE $0x0f | ||||
| 	BYTE $0x38; BYTE $0xf1; BYTE $0xd8 | ||||
|  | ||||
| 	MOVL BX, (R9) | ||||
| 	ADDQ $4, R9 | ||||
| 	ADDQ $1, R8 | ||||
| 	XORQ BX, BX | ||||
| 	SUBQ $1, R13 | ||||
| 	JNZ  rem_loop | ||||
|  | ||||
| end: | ||||
| 	RET | ||||
|  | ||||
| one_crc: | ||||
| 	MOVQ $1, R13 | ||||
| 	XORQ BX, BX | ||||
| 	JMP  rem_loop | ||||
|  | ||||
| // func matchLenSSE4(a, b []byte, max int) int | ||||
| TEXT ·matchLenSSE4(SB), 4, $0 | ||||
| 	MOVQ a_base+0(FP), SI | ||||
| 	MOVQ b_base+24(FP), DI | ||||
| 	MOVQ DI, DX | ||||
| 	MOVQ max+48(FP), CX | ||||
|  | ||||
| cmp8: | ||||
| 	// As long as we are 8 or more bytes before the end of max, we can load and | ||||
| 	// compare 8 bytes at a time. If those 8 bytes are equal, repeat. | ||||
| 	CMPQ CX, $8 | ||||
| 	JLT  cmp1 | ||||
| 	MOVQ (SI), AX | ||||
| 	MOVQ (DI), BX | ||||
| 	CMPQ AX, BX | ||||
| 	JNE  bsf | ||||
| 	ADDQ $8, SI | ||||
| 	ADDQ $8, DI | ||||
| 	SUBQ $8, CX | ||||
| 	JMP  cmp8 | ||||
|  | ||||
| bsf: | ||||
| 	// If those 8 bytes were not equal, XOR the two 8 byte values, and return | ||||
| 	// the index of the first byte that differs. The BSF instruction finds the | ||||
| 	// least significant 1 bit, the amd64 architecture is little-endian, and | ||||
| 	// the shift by 3 converts a bit index to a byte index. | ||||
| 	XORQ AX, BX | ||||
| 	BSFQ BX, BX | ||||
| 	SHRQ $3, BX | ||||
| 	ADDQ BX, DI | ||||
|  | ||||
| 	// Subtract off &b[0] to convert from &b[ret] to ret, and return. | ||||
| 	SUBQ DX, DI | ||||
| 	MOVQ DI, ret+56(FP) | ||||
| 	RET | ||||
|  | ||||
| cmp1: | ||||
| 	// In the slices' tail, compare 1 byte at a time. | ||||
| 	CMPQ CX, $0 | ||||
| 	JEQ  matchLenEnd | ||||
| 	MOVB (SI), AX | ||||
| 	MOVB (DI), BX | ||||
| 	CMPB AX, BX | ||||
| 	JNE  matchLenEnd | ||||
| 	ADDQ $1, SI | ||||
| 	ADDQ $1, DI | ||||
| 	SUBQ $1, CX | ||||
| 	JMP  cmp1 | ||||
|  | ||||
| matchLenEnd: | ||||
| 	// Subtract off &b[0] to convert from &b[ret] to ret, and return. | ||||
| 	SUBQ DX, DI | ||||
| 	MOVQ DI, ret+56(FP) | ||||
| 	RET | ||||
|  | ||||
| // func histogram(b []byte, h []int32) | ||||
| TEXT ·histogram(SB), 4, $0 | ||||
| 	MOVQ b+0(FP), SI     // SI: &b | ||||
| 	MOVQ b_len+8(FP), R9 // R9: len(b) | ||||
| 	MOVQ h+24(FP), DI    // DI: Histogram | ||||
| 	MOVQ R9, R8 | ||||
| 	SHRQ $3, R8 | ||||
| 	JZ   hist1 | ||||
| 	XORQ R11, R11 | ||||
|  | ||||
| loop_hist8: | ||||
| 	MOVQ (SI), R10 | ||||
|  | ||||
| 	MOVB R10, R11 | ||||
| 	INCL (DI)(R11*4) | ||||
| 	SHRQ $8, R10 | ||||
|  | ||||
| 	MOVB R10, R11 | ||||
| 	INCL (DI)(R11*4) | ||||
| 	SHRQ $8, R10 | ||||
|  | ||||
| 	MOVB R10, R11 | ||||
| 	INCL (DI)(R11*4) | ||||
| 	SHRQ $8, R10 | ||||
|  | ||||
| 	MOVB R10, R11 | ||||
| 	INCL (DI)(R11*4) | ||||
| 	SHRQ $8, R10 | ||||
|  | ||||
| 	MOVB R10, R11 | ||||
| 	INCL (DI)(R11*4) | ||||
| 	SHRQ $8, R10 | ||||
|  | ||||
| 	MOVB R10, R11 | ||||
| 	INCL (DI)(R11*4) | ||||
| 	SHRQ $8, R10 | ||||
|  | ||||
| 	MOVB R10, R11 | ||||
| 	INCL (DI)(R11*4) | ||||
| 	SHRQ $8, R10 | ||||
|  | ||||
| 	INCL (DI)(R10*4) | ||||
|  | ||||
| 	ADDQ $8, SI | ||||
| 	DECQ R8 | ||||
| 	JNZ  loop_hist8 | ||||
|  | ||||
| hist1: | ||||
| 	ANDQ $7, R9 | ||||
| 	JZ   end_hist | ||||
| 	XORQ R10, R10 | ||||
|  | ||||
| loop_hist1: | ||||
| 	MOVB (SI), R10 | ||||
| 	INCL (DI)(R10*4) | ||||
| 	INCQ SI | ||||
| 	DECQ R9 | ||||
| 	JNZ  loop_hist1 | ||||
|  | ||||
| end_hist: | ||||
| 	RET | ||||
							
								
								
									
										35
									
								
								vendor/github.com/klauspost/compress/flate/crc32_noasm.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								vendor/github.com/klauspost/compress/flate/crc32_noasm.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| //+build !amd64 noasm appengine gccgo | ||||
|  | ||||
| // Copyright 2015, Klaus Post, see LICENSE for details. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| func init() { | ||||
| 	useSSE42 = false | ||||
| } | ||||
|  | ||||
| // crc32sse should never be called. | ||||
| func crc32sse(a []byte) uint32 { | ||||
| 	panic("no assembler") | ||||
| } | ||||
|  | ||||
| // crc32sseAll should never be called. | ||||
| func crc32sseAll(a []byte, dst []uint32) { | ||||
| 	panic("no assembler") | ||||
| } | ||||
|  | ||||
| // matchLenSSE4 should never be called. | ||||
| func matchLenSSE4(a, b []byte, max int) int { | ||||
| 	panic("no assembler") | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| // histogram accumulates a histogram of b in h. | ||||
| // | ||||
| // len(h) must be >= 256, and h's elements must be all zeroes. | ||||
| func histogram(b []byte, h []int32) { | ||||
| 	h = h[:256] | ||||
| 	for _, t := range b { | ||||
| 		h[t]++ | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										1353
									
								
								vendor/github.com/klauspost/compress/flate/deflate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1353
									
								
								vendor/github.com/klauspost/compress/flate/deflate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										184
									
								
								vendor/github.com/klauspost/compress/flate/dict_decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										184
									
								
								vendor/github.com/klauspost/compress/flate/dict_decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,184 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| // dictDecoder implements the LZ77 sliding dictionary as used in decompression. | ||||
| // LZ77 decompresses data through sequences of two forms of commands: | ||||
| // | ||||
| //	* Literal insertions: Runs of one or more symbols are inserted into the data | ||||
| //	stream as is. This is accomplished through the writeByte method for a | ||||
| //	single symbol, or combinations of writeSlice/writeMark for multiple symbols. | ||||
| //	Any valid stream must start with a literal insertion if no preset dictionary | ||||
| //	is used. | ||||
| // | ||||
| //	* Backward copies: Runs of one or more symbols are copied from previously | ||||
| //	emitted data. Backward copies come as the tuple (dist, length) where dist | ||||
| //	determines how far back in the stream to copy from and length determines how | ||||
| //	many bytes to copy. Note that it is valid for the length to be greater than | ||||
| //	the distance. Since LZ77 uses forward copies, that situation is used to | ||||
| //	perform a form of run-length encoding on repeated runs of symbols. | ||||
| //	The writeCopy and tryWriteCopy are used to implement this command. | ||||
| // | ||||
| // For performance reasons, this implementation performs little to no sanity | ||||
| // checks about the arguments. As such, the invariants documented for each | ||||
| // method call must be respected. | ||||
| type dictDecoder struct { | ||||
| 	hist []byte // Sliding window history | ||||
|  | ||||
| 	// Invariant: 0 <= rdPos <= wrPos <= len(hist) | ||||
| 	wrPos int  // Current output position in buffer | ||||
| 	rdPos int  // Have emitted hist[:rdPos] already | ||||
| 	full  bool // Has a full window length been written yet? | ||||
| } | ||||
|  | ||||
| // init initializes dictDecoder to have a sliding window dictionary of the given | ||||
| // size. If a preset dict is provided, it will initialize the dictionary with | ||||
| // the contents of dict. | ||||
| func (dd *dictDecoder) init(size int, dict []byte) { | ||||
| 	*dd = dictDecoder{hist: dd.hist} | ||||
|  | ||||
| 	if cap(dd.hist) < size { | ||||
| 		dd.hist = make([]byte, size) | ||||
| 	} | ||||
| 	dd.hist = dd.hist[:size] | ||||
|  | ||||
| 	if len(dict) > len(dd.hist) { | ||||
| 		dict = dict[len(dict)-len(dd.hist):] | ||||
| 	} | ||||
| 	dd.wrPos = copy(dd.hist, dict) | ||||
| 	if dd.wrPos == len(dd.hist) { | ||||
| 		dd.wrPos = 0 | ||||
| 		dd.full = true | ||||
| 	} | ||||
| 	dd.rdPos = dd.wrPos | ||||
| } | ||||
|  | ||||
| // histSize reports the total amount of historical data in the dictionary. | ||||
| func (dd *dictDecoder) histSize() int { | ||||
| 	if dd.full { | ||||
| 		return len(dd.hist) | ||||
| 	} | ||||
| 	return dd.wrPos | ||||
| } | ||||
|  | ||||
| // availRead reports the number of bytes that can be flushed by readFlush. | ||||
| func (dd *dictDecoder) availRead() int { | ||||
| 	return dd.wrPos - dd.rdPos | ||||
| } | ||||
|  | ||||
| // availWrite reports the available amount of output buffer space. | ||||
| func (dd *dictDecoder) availWrite() int { | ||||
| 	return len(dd.hist) - dd.wrPos | ||||
| } | ||||
|  | ||||
| // writeSlice returns a slice of the available buffer to write data to. | ||||
| // | ||||
| // This invariant will be kept: len(s) <= availWrite() | ||||
| func (dd *dictDecoder) writeSlice() []byte { | ||||
| 	return dd.hist[dd.wrPos:] | ||||
| } | ||||
|  | ||||
| // writeMark advances the writer pointer by cnt. | ||||
| // | ||||
| // This invariant must be kept: 0 <= cnt <= availWrite() | ||||
| func (dd *dictDecoder) writeMark(cnt int) { | ||||
| 	dd.wrPos += cnt | ||||
| } | ||||
|  | ||||
| // writeByte writes a single byte to the dictionary. | ||||
| // | ||||
| // This invariant must be kept: 0 < availWrite() | ||||
| func (dd *dictDecoder) writeByte(c byte) { | ||||
| 	dd.hist[dd.wrPos] = c | ||||
| 	dd.wrPos++ | ||||
| } | ||||
|  | ||||
| // writeCopy copies a string at a given (dist, length) to the output. | ||||
| // This returns the number of bytes copied and may be less than the requested | ||||
| // length if the available space in the output buffer is too small. | ||||
| // | ||||
| // This invariant must be kept: 0 < dist <= histSize() | ||||
| func (dd *dictDecoder) writeCopy(dist, length int) int { | ||||
| 	dstBase := dd.wrPos | ||||
| 	dstPos := dstBase | ||||
| 	srcPos := dstPos - dist | ||||
| 	endPos := dstPos + length | ||||
| 	if endPos > len(dd.hist) { | ||||
| 		endPos = len(dd.hist) | ||||
| 	} | ||||
|  | ||||
| 	// Copy non-overlapping section after destination position. | ||||
| 	// | ||||
| 	// This section is non-overlapping in that the copy length for this section | ||||
| 	// is always less than or equal to the backwards distance. This can occur | ||||
| 	// if a distance refers to data that wraps-around in the buffer. | ||||
| 	// Thus, a backwards copy is performed here; that is, the exact bytes in | ||||
| 	// the source prior to the copy is placed in the destination. | ||||
| 	if srcPos < 0 { | ||||
| 		srcPos += len(dd.hist) | ||||
| 		dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) | ||||
| 		srcPos = 0 | ||||
| 	} | ||||
|  | ||||
| 	// Copy possibly overlapping section before destination position. | ||||
| 	// | ||||
| 	// This section can overlap if the copy length for this section is larger | ||||
| 	// than the backwards distance. This is allowed by LZ77 so that repeated | ||||
| 	// strings can be succinctly represented using (dist, length) pairs. | ||||
| 	// Thus, a forwards copy is performed here; that is, the bytes copied is | ||||
| 	// possibly dependent on the resulting bytes in the destination as the copy | ||||
| 	// progresses along. This is functionally equivalent to the following: | ||||
| 	// | ||||
| 	//	for i := 0; i < endPos-dstPos; i++ { | ||||
| 	//		dd.hist[dstPos+i] = dd.hist[srcPos+i] | ||||
| 	//	} | ||||
| 	//	dstPos = endPos | ||||
| 	// | ||||
| 	for dstPos < endPos { | ||||
| 		dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) | ||||
| 	} | ||||
|  | ||||
| 	dd.wrPos = dstPos | ||||
| 	return dstPos - dstBase | ||||
| } | ||||
|  | ||||
| // tryWriteCopy tries to copy a string at a given (distance, length) to the | ||||
| // output. This specialized version is optimized for short distances. | ||||
| // | ||||
| // This method is designed to be inlined for performance reasons. | ||||
| // | ||||
| // This invariant must be kept: 0 < dist <= histSize() | ||||
| func (dd *dictDecoder) tryWriteCopy(dist, length int) int { | ||||
| 	dstPos := dd.wrPos | ||||
| 	endPos := dstPos + length | ||||
| 	if dstPos < dist || endPos > len(dd.hist) { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	dstBase := dstPos | ||||
| 	srcPos := dstPos - dist | ||||
|  | ||||
| 	// Copy possibly overlapping section before destination position. | ||||
| loop: | ||||
| 	dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) | ||||
| 	if dstPos < endPos { | ||||
| 		goto loop // Avoid for-loop so that this function can be inlined | ||||
| 	} | ||||
|  | ||||
| 	dd.wrPos = dstPos | ||||
| 	return dstPos - dstBase | ||||
| } | ||||
|  | ||||
| // readFlush returns a slice of the historical buffer that is ready to be | ||||
| // emitted to the user. The data returned by readFlush must be fully consumed | ||||
| // before calling any other dictDecoder methods. | ||||
| func (dd *dictDecoder) readFlush() []byte { | ||||
| 	toRead := dd.hist[dd.rdPos:dd.wrPos] | ||||
| 	dd.rdPos = dd.wrPos | ||||
| 	if dd.wrPos == len(dd.hist) { | ||||
| 		dd.wrPos, dd.rdPos = 0, 0 | ||||
| 		dd.full = true | ||||
| 	} | ||||
| 	return toRead | ||||
| } | ||||
							
								
								
									
										265
									
								
								vendor/github.com/klauspost/compress/flate/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										265
									
								
								vendor/github.com/klauspost/compress/flate/gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,265 @@ | ||||
| // Copyright 2012 The Go Authors.  All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build ignore | ||||
|  | ||||
| // This program generates fixedhuff.go | ||||
| // Invoke as | ||||
| // | ||||
| //	go run gen.go -output fixedhuff.go | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"go/format" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| ) | ||||
|  | ||||
| var filename = flag.String("output", "fixedhuff.go", "output file name") | ||||
|  | ||||
| const maxCodeLen = 16 | ||||
|  | ||||
| // Note: the definition of the huffmanDecoder struct is copied from | ||||
| // inflate.go, as it is private to the implementation. | ||||
|  | ||||
| // chunk & 15 is number of bits | ||||
| // chunk >> 4 is value, including table link | ||||
|  | ||||
| const ( | ||||
| 	huffmanChunkBits  = 9 | ||||
| 	huffmanNumChunks  = 1 << huffmanChunkBits | ||||
| 	huffmanCountMask  = 15 | ||||
| 	huffmanValueShift = 4 | ||||
| ) | ||||
|  | ||||
| type huffmanDecoder struct { | ||||
| 	min      int                      // the minimum code length | ||||
| 	chunks   [huffmanNumChunks]uint32 // chunks as described above | ||||
| 	links    [][]uint32               // overflow links | ||||
| 	linkMask uint32                   // mask the width of the link table | ||||
| } | ||||
|  | ||||
| // Initialize Huffman decoding tables from array of code lengths. | ||||
| // Following this function, h is guaranteed to be initialized into a complete | ||||
| // tree (i.e., neither over-subscribed nor under-subscribed). The exception is a | ||||
| // degenerate case where the tree has only a single symbol with length 1. Empty | ||||
| // trees are permitted. | ||||
| func (h *huffmanDecoder) init(bits []int) bool { | ||||
| 	// Sanity enables additional runtime tests during Huffman | ||||
| 	// table construction.  It's intended to be used during | ||||
| 	// development to supplement the currently ad-hoc unit tests. | ||||
| 	const sanity = false | ||||
|  | ||||
| 	if h.min != 0 { | ||||
| 		*h = huffmanDecoder{} | ||||
| 	} | ||||
|  | ||||
| 	// Count number of codes of each length, | ||||
| 	// compute min and max length. | ||||
| 	var count [maxCodeLen]int | ||||
| 	var min, max int | ||||
| 	for _, n := range bits { | ||||
| 		if n == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		if min == 0 || n < min { | ||||
| 			min = n | ||||
| 		} | ||||
| 		if n > max { | ||||
| 			max = n | ||||
| 		} | ||||
| 		count[n]++ | ||||
| 	} | ||||
|  | ||||
| 	// Empty tree. The decompressor.huffSym function will fail later if the tree | ||||
| 	// is used. Technically, an empty tree is only valid for the HDIST tree and | ||||
| 	// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree | ||||
| 	// is guaranteed to fail since it will attempt to use the tree to decode the | ||||
| 	// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is | ||||
| 	// guaranteed to fail later since the compressed data section must be | ||||
| 	// composed of at least one symbol (the end-of-block marker). | ||||
| 	if max == 0 { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	code := 0 | ||||
| 	var nextcode [maxCodeLen]int | ||||
| 	for i := min; i <= max; i++ { | ||||
| 		code <<= 1 | ||||
| 		nextcode[i] = code | ||||
| 		code += count[i] | ||||
| 	} | ||||
|  | ||||
| 	// Check that the coding is complete (i.e., that we've | ||||
| 	// assigned all 2-to-the-max possible bit sequences). | ||||
| 	// Exception: To be compatible with zlib, we also need to | ||||
| 	// accept degenerate single-code codings.  See also | ||||
| 	// TestDegenerateHuffmanCoding. | ||||
| 	if code != 1<<uint(max) && !(code == 1 && max == 1) { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	h.min = min | ||||
| 	if max > huffmanChunkBits { | ||||
| 		numLinks := 1 << (uint(max) - huffmanChunkBits) | ||||
| 		h.linkMask = uint32(numLinks - 1) | ||||
|  | ||||
| 		// create link tables | ||||
| 		link := nextcode[huffmanChunkBits+1] >> 1 | ||||
| 		h.links = make([][]uint32, huffmanNumChunks-link) | ||||
| 		for j := uint(link); j < huffmanNumChunks; j++ { | ||||
| 			reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 | ||||
| 			reverse >>= uint(16 - huffmanChunkBits) | ||||
| 			off := j - uint(link) | ||||
| 			if sanity && h.chunks[reverse] != 0 { | ||||
| 				panic("impossible: overwriting existing chunk") | ||||
| 			} | ||||
| 			h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1)) | ||||
| 			h.links[off] = make([]uint32, numLinks) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for i, n := range bits { | ||||
| 		if n == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		code := nextcode[n] | ||||
| 		nextcode[n]++ | ||||
| 		chunk := uint32(i<<huffmanValueShift | n) | ||||
| 		reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8 | ||||
| 		reverse >>= uint(16 - n) | ||||
| 		if n <= huffmanChunkBits { | ||||
| 			for off := reverse; off < len(h.chunks); off += 1 << uint(n) { | ||||
| 				// We should never need to overwrite | ||||
| 				// an existing chunk.  Also, 0 is | ||||
| 				// never a valid chunk, because the | ||||
| 				// lower 4 "count" bits should be | ||||
| 				// between 1 and 15. | ||||
| 				if sanity && h.chunks[off] != 0 { | ||||
| 					panic("impossible: overwriting existing chunk") | ||||
| 				} | ||||
| 				h.chunks[off] = chunk | ||||
| 			} | ||||
| 		} else { | ||||
| 			j := reverse & (huffmanNumChunks - 1) | ||||
| 			if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { | ||||
| 				// Longer codes should have been | ||||
| 				// associated with a link table above. | ||||
| 				panic("impossible: not an indirect chunk") | ||||
| 			} | ||||
| 			value := h.chunks[j] >> huffmanValueShift | ||||
| 			linktab := h.links[value] | ||||
| 			reverse >>= huffmanChunkBits | ||||
| 			for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { | ||||
| 				if sanity && linktab[off] != 0 { | ||||
| 					panic("impossible: overwriting existing chunk") | ||||
| 				} | ||||
| 				linktab[off] = chunk | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if sanity { | ||||
| 		// Above we've sanity checked that we never overwrote | ||||
| 		// an existing entry.  Here we additionally check that | ||||
| 		// we filled the tables completely. | ||||
| 		for i, chunk := range h.chunks { | ||||
| 			if chunk == 0 { | ||||
| 				// As an exception, in the degenerate | ||||
| 				// single-code case, we allow odd | ||||
| 				// chunks to be missing. | ||||
| 				if code == 1 && i%2 == 1 { | ||||
| 					continue | ||||
| 				} | ||||
| 				panic("impossible: missing chunk") | ||||
| 			} | ||||
| 		} | ||||
| 		for _, linktab := range h.links { | ||||
| 			for _, chunk := range linktab { | ||||
| 				if chunk == 0 { | ||||
| 					panic("impossible: missing chunk") | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func main() { | ||||
| 	flag.Parse() | ||||
|  | ||||
| 	var h huffmanDecoder | ||||
| 	var bits [288]int | ||||
| 	initReverseByte() | ||||
| 	for i := 0; i < 144; i++ { | ||||
| 		bits[i] = 8 | ||||
| 	} | ||||
| 	for i := 144; i < 256; i++ { | ||||
| 		bits[i] = 9 | ||||
| 	} | ||||
| 	for i := 256; i < 280; i++ { | ||||
| 		bits[i] = 7 | ||||
| 	} | ||||
| 	for i := 280; i < 288; i++ { | ||||
| 		bits[i] = 8 | ||||
| 	} | ||||
| 	h.init(bits[:]) | ||||
| 	if h.links != nil { | ||||
| 		log.Fatal("Unexpected links table in fixed Huffman decoder") | ||||
| 	} | ||||
|  | ||||
| 	var buf bytes.Buffer | ||||
|  | ||||
| 	fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file.`+"\n\n") | ||||
|  | ||||
| 	fmt.Fprintln(&buf, "package flate") | ||||
| 	fmt.Fprintln(&buf) | ||||
| 	fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") | ||||
| 	fmt.Fprintln(&buf) | ||||
| 	fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") | ||||
| 	fmt.Fprintf(&buf, "\t%d,\n", h.min) | ||||
| 	fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") | ||||
| 	for i := 0; i < huffmanNumChunks; i++ { | ||||
| 		if i&7 == 0 { | ||||
| 			fmt.Fprintf(&buf, "\t\t") | ||||
| 		} else { | ||||
| 			fmt.Fprintf(&buf, " ") | ||||
| 		} | ||||
| 		fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) | ||||
| 		if i&7 == 7 { | ||||
| 			fmt.Fprintln(&buf) | ||||
| 		} | ||||
| 	} | ||||
| 	fmt.Fprintln(&buf, "\t},") | ||||
| 	fmt.Fprintln(&buf, "\tnil, 0,") | ||||
| 	fmt.Fprintln(&buf, "}") | ||||
|  | ||||
| 	data, err := format.Source(buf.Bytes()) | ||||
| 	if err != nil { | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| 	err = ioutil.WriteFile(*filename, data, 0644) | ||||
| 	if err != nil { | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var reverseByte [256]byte | ||||
|  | ||||
| func initReverseByte() { | ||||
| 	for x := 0; x < 256; x++ { | ||||
| 		var result byte | ||||
| 		for i := uint(0); i < 8; i++ { | ||||
| 			result |= byte(((x >> i) & 1) << (7 - i)) | ||||
| 		} | ||||
| 		reverseByte[x] = result | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										701
									
								
								vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										701
									
								
								vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,701 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| import ( | ||||
| 	"io" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// The largest offset code. | ||||
| 	offsetCodeCount = 30 | ||||
|  | ||||
| 	// The special code used to mark the end of a block. | ||||
| 	endBlockMarker = 256 | ||||
|  | ||||
| 	// The first length code. | ||||
| 	lengthCodesStart = 257 | ||||
|  | ||||
| 	// The number of codegen codes. | ||||
| 	codegenCodeCount = 19 | ||||
| 	badCode          = 255 | ||||
|  | ||||
| 	// bufferFlushSize indicates the buffer size | ||||
| 	// after which bytes are flushed to the writer. | ||||
| 	// Should preferably be a multiple of 6, since | ||||
| 	// we accumulate 6 bytes between writes to the buffer. | ||||
| 	bufferFlushSize = 240 | ||||
|  | ||||
| 	// bufferSize is the actual output byte buffer size. | ||||
| 	// It must have additional headroom for a flush | ||||
| 	// which can contain up to 8 bytes. | ||||
| 	bufferSize = bufferFlushSize + 8 | ||||
| ) | ||||
|  | ||||
| // The number of extra bits needed by length code X - LENGTH_CODES_START. | ||||
| var lengthExtraBits = []int8{ | ||||
| 	/* 257 */ 0, 0, 0, | ||||
| 	/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, | ||||
| 	/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, | ||||
| 	/* 280 */ 4, 5, 5, 5, 5, 0, | ||||
| } | ||||
|  | ||||
| // The length indicated by length code X - LENGTH_CODES_START. | ||||
| var lengthBase = []uint32{ | ||||
| 	0, 1, 2, 3, 4, 5, 6, 7, 8, 10, | ||||
| 	12, 14, 16, 20, 24, 28, 32, 40, 48, 56, | ||||
| 	64, 80, 96, 112, 128, 160, 192, 224, 255, | ||||
| } | ||||
|  | ||||
| // offset code word extra bits. | ||||
| var offsetExtraBits = []int8{ | ||||
| 	0, 0, 0, 0, 1, 1, 2, 2, 3, 3, | ||||
| 	4, 4, 5, 5, 6, 6, 7, 7, 8, 8, | ||||
| 	9, 9, 10, 10, 11, 11, 12, 12, 13, 13, | ||||
| 	/* extended window */ | ||||
| 	14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, | ||||
| } | ||||
|  | ||||
| var offsetBase = []uint32{ | ||||
| 	/* normal deflate */ | ||||
| 	0x000000, 0x000001, 0x000002, 0x000003, 0x000004, | ||||
| 	0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, | ||||
| 	0x000020, 0x000030, 0x000040, 0x000060, 0x000080, | ||||
| 	0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, | ||||
| 	0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, | ||||
| 	0x001800, 0x002000, 0x003000, 0x004000, 0x006000, | ||||
|  | ||||
| 	/* extended window */ | ||||
| 	0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, | ||||
| 	0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, | ||||
| 	0x100000, 0x180000, 0x200000, 0x300000, | ||||
| } | ||||
|  | ||||
| // The odd order in which the codegen code sizes are written. | ||||
| var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} | ||||
|  | ||||
| type huffmanBitWriter struct { | ||||
| 	// writer is the underlying writer. | ||||
| 	// Do not use it directly; use the write method, which ensures | ||||
| 	// that Write errors are sticky. | ||||
| 	writer io.Writer | ||||
|  | ||||
| 	// Data waiting to be written is bytes[0:nbytes] | ||||
| 	// and then the low nbits of bits. | ||||
| 	bits            uint64 | ||||
| 	nbits           uint | ||||
| 	bytes           [bufferSize]byte | ||||
| 	codegenFreq     [codegenCodeCount]int32 | ||||
| 	nbytes          int | ||||
| 	literalFreq     []int32 | ||||
| 	offsetFreq      []int32 | ||||
| 	codegen         []uint8 | ||||
| 	literalEncoding *huffmanEncoder | ||||
| 	offsetEncoding  *huffmanEncoder | ||||
| 	codegenEncoding *huffmanEncoder | ||||
| 	err             error | ||||
| } | ||||
|  | ||||
| func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { | ||||
| 	return &huffmanBitWriter{ | ||||
| 		writer:          w, | ||||
| 		literalFreq:     make([]int32, maxNumLit), | ||||
| 		offsetFreq:      make([]int32, offsetCodeCount), | ||||
| 		codegen:         make([]uint8, maxNumLit+offsetCodeCount+1), | ||||
| 		literalEncoding: newHuffmanEncoder(maxNumLit), | ||||
| 		codegenEncoding: newHuffmanEncoder(codegenCodeCount), | ||||
| 		offsetEncoding:  newHuffmanEncoder(offsetCodeCount), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) reset(writer io.Writer) { | ||||
| 	w.writer = writer | ||||
| 	w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil | ||||
| 	w.bytes = [bufferSize]byte{} | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) flush() { | ||||
| 	if w.err != nil { | ||||
| 		w.nbits = 0 | ||||
| 		return | ||||
| 	} | ||||
| 	n := w.nbytes | ||||
| 	for w.nbits != 0 { | ||||
| 		w.bytes[n] = byte(w.bits) | ||||
| 		w.bits >>= 8 | ||||
| 		if w.nbits > 8 { // Avoid underflow | ||||
| 			w.nbits -= 8 | ||||
| 		} else { | ||||
| 			w.nbits = 0 | ||||
| 		} | ||||
| 		n++ | ||||
| 	} | ||||
| 	w.bits = 0 | ||||
| 	w.write(w.bytes[:n]) | ||||
| 	w.nbytes = 0 | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) write(b []byte) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	_, w.err = w.writer.Write(b) | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) writeBits(b int32, nb uint) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	w.bits |= uint64(b) << w.nbits | ||||
| 	w.nbits += nb | ||||
| 	if w.nbits >= 48 { | ||||
| 		bits := w.bits | ||||
| 		w.bits >>= 48 | ||||
| 		w.nbits -= 48 | ||||
| 		n := w.nbytes | ||||
| 		bytes := w.bytes[n : n+6] | ||||
| 		bytes[0] = byte(bits) | ||||
| 		bytes[1] = byte(bits >> 8) | ||||
| 		bytes[2] = byte(bits >> 16) | ||||
| 		bytes[3] = byte(bits >> 24) | ||||
| 		bytes[4] = byte(bits >> 32) | ||||
| 		bytes[5] = byte(bits >> 40) | ||||
| 		n += 6 | ||||
| 		if n >= bufferFlushSize { | ||||
| 			w.write(w.bytes[:n]) | ||||
| 			n = 0 | ||||
| 		} | ||||
| 		w.nbytes = n | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) writeBytes(bytes []byte) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	n := w.nbytes | ||||
| 	if w.nbits&7 != 0 { | ||||
| 		w.err = InternalError("writeBytes with unfinished bits") | ||||
| 		return | ||||
| 	} | ||||
| 	for w.nbits != 0 { | ||||
| 		w.bytes[n] = byte(w.bits) | ||||
| 		w.bits >>= 8 | ||||
| 		w.nbits -= 8 | ||||
| 		n++ | ||||
| 	} | ||||
| 	if n != 0 { | ||||
| 		w.write(w.bytes[:n]) | ||||
| 	} | ||||
| 	w.nbytes = 0 | ||||
| 	w.write(bytes) | ||||
| } | ||||
|  | ||||
| // RFC 1951 3.2.7 specifies a special run-length encoding for specifying | ||||
| // the literal and offset lengths arrays (which are concatenated into a single | ||||
| // array).  This method generates that run-length encoding. | ||||
| // | ||||
| // The result is written into the codegen array, and the frequencies | ||||
| // of each code is written into the codegenFreq array. | ||||
| // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional | ||||
| // information. Code badCode is an end marker | ||||
| // | ||||
| //  numLiterals      The number of literals in literalEncoding | ||||
| //  numOffsets       The number of offsets in offsetEncoding | ||||
| //  litenc, offenc   The literal and offset encoder to use | ||||
| func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { | ||||
| 	for i := range w.codegenFreq { | ||||
| 		w.codegenFreq[i] = 0 | ||||
| 	} | ||||
| 	// Note that we are using codegen both as a temporary variable for holding | ||||
| 	// a copy of the frequencies, and as the place where we put the result. | ||||
| 	// This is fine because the output is always shorter than the input used | ||||
| 	// so far. | ||||
| 	codegen := w.codegen // cache | ||||
| 	// Copy the concatenated code sizes to codegen. Put a marker at the end. | ||||
| 	cgnl := codegen[:numLiterals] | ||||
| 	for i := range cgnl { | ||||
| 		cgnl[i] = uint8(litEnc.codes[i].len) | ||||
| 	} | ||||
|  | ||||
| 	cgnl = codegen[numLiterals : numLiterals+numOffsets] | ||||
| 	for i := range cgnl { | ||||
| 		cgnl[i] = uint8(offEnc.codes[i].len) | ||||
| 	} | ||||
| 	codegen[numLiterals+numOffsets] = badCode | ||||
|  | ||||
| 	size := codegen[0] | ||||
| 	count := 1 | ||||
| 	outIndex := 0 | ||||
| 	for inIndex := 1; size != badCode; inIndex++ { | ||||
| 		// INVARIANT: We have seen "count" copies of size that have not yet | ||||
| 		// had output generated for them. | ||||
| 		nextSize := codegen[inIndex] | ||||
| 		if nextSize == size { | ||||
| 			count++ | ||||
| 			continue | ||||
| 		} | ||||
| 		// We need to generate codegen indicating "count" of size. | ||||
| 		if size != 0 { | ||||
| 			codegen[outIndex] = size | ||||
| 			outIndex++ | ||||
| 			w.codegenFreq[size]++ | ||||
| 			count-- | ||||
| 			for count >= 3 { | ||||
| 				n := 6 | ||||
| 				if n > count { | ||||
| 					n = count | ||||
| 				} | ||||
| 				codegen[outIndex] = 16 | ||||
| 				outIndex++ | ||||
| 				codegen[outIndex] = uint8(n - 3) | ||||
| 				outIndex++ | ||||
| 				w.codegenFreq[16]++ | ||||
| 				count -= n | ||||
| 			} | ||||
| 		} else { | ||||
| 			for count >= 11 { | ||||
| 				n := 138 | ||||
| 				if n > count { | ||||
| 					n = count | ||||
| 				} | ||||
| 				codegen[outIndex] = 18 | ||||
| 				outIndex++ | ||||
| 				codegen[outIndex] = uint8(n - 11) | ||||
| 				outIndex++ | ||||
| 				w.codegenFreq[18]++ | ||||
| 				count -= n | ||||
| 			} | ||||
| 			if count >= 3 { | ||||
| 				// count >= 3 && count <= 10 | ||||
| 				codegen[outIndex] = 17 | ||||
| 				outIndex++ | ||||
| 				codegen[outIndex] = uint8(count - 3) | ||||
| 				outIndex++ | ||||
| 				w.codegenFreq[17]++ | ||||
| 				count = 0 | ||||
| 			} | ||||
| 		} | ||||
| 		count-- | ||||
| 		for ; count >= 0; count-- { | ||||
| 			codegen[outIndex] = size | ||||
| 			outIndex++ | ||||
| 			w.codegenFreq[size]++ | ||||
| 		} | ||||
| 		// Set up invariant for next time through the loop. | ||||
| 		size = nextSize | ||||
| 		count = 1 | ||||
| 	} | ||||
| 	// Marker indicating the end of the codegen. | ||||
| 	codegen[outIndex] = badCode | ||||
| } | ||||
|  | ||||
| // dynamicSize returns the size of dynamically encoded data in bits. | ||||
| func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { | ||||
| 	numCodegens = len(w.codegenFreq) | ||||
| 	for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { | ||||
| 		numCodegens-- | ||||
| 	} | ||||
| 	header := 3 + 5 + 5 + 4 + (3 * numCodegens) + | ||||
| 		w.codegenEncoding.bitLength(w.codegenFreq[:]) + | ||||
| 		int(w.codegenFreq[16])*2 + | ||||
| 		int(w.codegenFreq[17])*3 + | ||||
| 		int(w.codegenFreq[18])*7 | ||||
| 	size = header + | ||||
| 		litEnc.bitLength(w.literalFreq) + | ||||
| 		offEnc.bitLength(w.offsetFreq) + | ||||
| 		extraBits | ||||
|  | ||||
| 	return size, numCodegens | ||||
| } | ||||
|  | ||||
| // fixedSize returns the size of dynamically encoded data in bits. | ||||
| func (w *huffmanBitWriter) fixedSize(extraBits int) int { | ||||
| 	return 3 + | ||||
| 		fixedLiteralEncoding.bitLength(w.literalFreq) + | ||||
| 		fixedOffsetEncoding.bitLength(w.offsetFreq) + | ||||
| 		extraBits | ||||
| } | ||||
|  | ||||
| // storedSize calculates the stored size, including header. | ||||
| // The function returns the size in bits and whether the block | ||||
| // fits inside a single block. | ||||
| func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { | ||||
| 	if in == nil { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	if len(in) <= maxStoreBlockSize { | ||||
| 		return (len(in) + 5) * 8, true | ||||
| 	} | ||||
| 	return 0, false | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) writeCode(c hcode) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	w.bits |= uint64(c.code) << w.nbits | ||||
| 	w.nbits += uint(c.len) | ||||
| 	if w.nbits >= 48 { | ||||
| 		bits := w.bits | ||||
| 		w.bits >>= 48 | ||||
| 		w.nbits -= 48 | ||||
| 		n := w.nbytes | ||||
| 		bytes := w.bytes[n : n+6] | ||||
| 		bytes[0] = byte(bits) | ||||
| 		bytes[1] = byte(bits >> 8) | ||||
| 		bytes[2] = byte(bits >> 16) | ||||
| 		bytes[3] = byte(bits >> 24) | ||||
| 		bytes[4] = byte(bits >> 32) | ||||
| 		bytes[5] = byte(bits >> 40) | ||||
| 		n += 6 | ||||
| 		if n >= bufferFlushSize { | ||||
| 			w.write(w.bytes[:n]) | ||||
| 			n = 0 | ||||
| 		} | ||||
| 		w.nbytes = n | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Write the header of a dynamic Huffman block to the output stream. | ||||
| // | ||||
| //  numLiterals  The number of literals specified in codegen | ||||
| //  numOffsets   The number of offsets specified in codegen | ||||
| //  numCodegens  The number of codegens used in codegen | ||||
| func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	var firstBits int32 = 4 | ||||
| 	if isEof { | ||||
| 		firstBits = 5 | ||||
| 	} | ||||
| 	w.writeBits(firstBits, 3) | ||||
| 	w.writeBits(int32(numLiterals-257), 5) | ||||
| 	w.writeBits(int32(numOffsets-1), 5) | ||||
| 	w.writeBits(int32(numCodegens-4), 4) | ||||
|  | ||||
| 	for i := 0; i < numCodegens; i++ { | ||||
| 		value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) | ||||
| 		w.writeBits(int32(value), 3) | ||||
| 	} | ||||
|  | ||||
| 	i := 0 | ||||
| 	for { | ||||
| 		var codeWord int = int(w.codegen[i]) | ||||
| 		i++ | ||||
| 		if codeWord == badCode { | ||||
| 			break | ||||
| 		} | ||||
| 		w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) | ||||
|  | ||||
| 		switch codeWord { | ||||
| 		case 16: | ||||
| 			w.writeBits(int32(w.codegen[i]), 2) | ||||
| 			i++ | ||||
| 			break | ||||
| 		case 17: | ||||
| 			w.writeBits(int32(w.codegen[i]), 3) | ||||
| 			i++ | ||||
| 			break | ||||
| 		case 18: | ||||
| 			w.writeBits(int32(w.codegen[i]), 7) | ||||
| 			i++ | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	var flag int32 | ||||
| 	if isEof { | ||||
| 		flag = 1 | ||||
| 	} | ||||
| 	w.writeBits(flag, 3) | ||||
| 	w.flush() | ||||
| 	w.writeBits(int32(length), 16) | ||||
| 	w.writeBits(int32(^uint16(length)), 16) | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	// Indicate that we are a fixed Huffman block | ||||
| 	var value int32 = 2 | ||||
| 	if isEof { | ||||
| 		value = 3 | ||||
| 	} | ||||
| 	w.writeBits(value, 3) | ||||
| } | ||||
|  | ||||
| // writeBlock will write a block of tokens with the smallest encoding. | ||||
| // The original input can be supplied, and if the huffman encoded data | ||||
| // is larger than the original bytes, the data will be written as a | ||||
| // stored block. | ||||
| // If the input is nil, the tokens will always be Huffman encoded. | ||||
| func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	tokens = append(tokens, endBlockMarker) | ||||
| 	numLiterals, numOffsets := w.indexTokens(tokens) | ||||
|  | ||||
| 	var extraBits int | ||||
| 	storedSize, storable := w.storedSize(input) | ||||
| 	if storable { | ||||
| 		// We only bother calculating the costs of the extra bits required by | ||||
| 		// the length of offset fields (which will be the same for both fixed | ||||
| 		// and dynamic encoding), if we need to compare those two encodings | ||||
| 		// against stored encoding. | ||||
| 		for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { | ||||
| 			// First eight length codes have extra size = 0. | ||||
| 			extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) | ||||
| 		} | ||||
| 		for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { | ||||
| 			// First four offset codes have extra size = 0. | ||||
| 			extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Figure out smallest code. | ||||
| 	// Fixed Huffman baseline. | ||||
| 	var literalEncoding = fixedLiteralEncoding | ||||
| 	var offsetEncoding = fixedOffsetEncoding | ||||
| 	var size = w.fixedSize(extraBits) | ||||
|  | ||||
| 	// Dynamic Huffman? | ||||
| 	var numCodegens int | ||||
|  | ||||
| 	// Generate codegen and codegenFrequencies, which indicates how to encode | ||||
| 	// the literalEncoding and the offsetEncoding. | ||||
| 	w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) | ||||
| 	w.codegenEncoding.generate(w.codegenFreq[:], 7) | ||||
| 	dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) | ||||
|  | ||||
| 	if dynamicSize < size { | ||||
| 		size = dynamicSize | ||||
| 		literalEncoding = w.literalEncoding | ||||
| 		offsetEncoding = w.offsetEncoding | ||||
| 	} | ||||
|  | ||||
| 	// Stored bytes? | ||||
| 	if storable && storedSize < size { | ||||
| 		w.writeStoredHeader(len(input), eof) | ||||
| 		w.writeBytes(input) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Huffman. | ||||
| 	if literalEncoding == fixedLiteralEncoding { | ||||
| 		w.writeFixedHeader(eof) | ||||
| 	} else { | ||||
| 		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) | ||||
| 	} | ||||
|  | ||||
| 	// Write the tokens. | ||||
| 	w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) | ||||
| } | ||||
|  | ||||
| // writeBlockDynamic encodes a block using a dynamic Huffman table. | ||||
| // This should be used if the symbols used have a disproportionate | ||||
| // histogram distribution. | ||||
| // If input is supplied and the compression savings are below 1/16th of the | ||||
| // input size the block is stored. | ||||
| func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	tokens = append(tokens, endBlockMarker) | ||||
| 	numLiterals, numOffsets := w.indexTokens(tokens) | ||||
|  | ||||
| 	// Generate codegen and codegenFrequencies, which indicates how to encode | ||||
| 	// the literalEncoding and the offsetEncoding. | ||||
| 	w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) | ||||
| 	w.codegenEncoding.generate(w.codegenFreq[:], 7) | ||||
| 	size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) | ||||
|  | ||||
| 	// Store bytes, if we don't get a reasonable improvement. | ||||
| 	if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { | ||||
| 		w.writeStoredHeader(len(input), eof) | ||||
| 		w.writeBytes(input) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Write Huffman table. | ||||
| 	w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) | ||||
|  | ||||
| 	// Write the tokens. | ||||
| 	w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) | ||||
| } | ||||
|  | ||||
| // indexTokens indexes a slice of tokens, and updates | ||||
| // literalFreq and offsetFreq, and generates literalEncoding | ||||
| // and offsetEncoding. | ||||
| // The number of literal and offset tokens is returned. | ||||
| func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { | ||||
| 	for i := range w.literalFreq { | ||||
| 		w.literalFreq[i] = 0 | ||||
| 	} | ||||
| 	for i := range w.offsetFreq { | ||||
| 		w.offsetFreq[i] = 0 | ||||
| 	} | ||||
|  | ||||
| 	for _, t := range tokens { | ||||
| 		if t < matchType { | ||||
| 			w.literalFreq[t.literal()]++ | ||||
| 			continue | ||||
| 		} | ||||
| 		length := t.length() | ||||
| 		offset := t.offset() | ||||
| 		w.literalFreq[lengthCodesStart+lengthCode(length)]++ | ||||
| 		w.offsetFreq[offsetCode(offset)]++ | ||||
| 	} | ||||
|  | ||||
| 	// get the number of literals | ||||
| 	numLiterals = len(w.literalFreq) | ||||
| 	for w.literalFreq[numLiterals-1] == 0 { | ||||
| 		numLiterals-- | ||||
| 	} | ||||
| 	// get the number of offsets | ||||
| 	numOffsets = len(w.offsetFreq) | ||||
| 	for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { | ||||
| 		numOffsets-- | ||||
| 	} | ||||
| 	if numOffsets == 0 { | ||||
| 		// We haven't found a single match. If we want to go with the dynamic encoding, | ||||
| 		// we should count at least one offset to be sure that the offset huffman tree could be encoded. | ||||
| 		w.offsetFreq[0] = 1 | ||||
| 		numOffsets = 1 | ||||
| 	} | ||||
| 	w.literalEncoding.generate(w.literalFreq, 15) | ||||
| 	w.offsetEncoding.generate(w.offsetFreq, 15) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // writeTokens writes a slice of tokens to the output. | ||||
| // codes for literal and offset encoding must be supplied. | ||||
| func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	for _, t := range tokens { | ||||
| 		if t < matchType { | ||||
| 			w.writeCode(leCodes[t.literal()]) | ||||
| 			continue | ||||
| 		} | ||||
| 		// Write the length | ||||
| 		length := t.length() | ||||
| 		lengthCode := lengthCode(length) | ||||
| 		w.writeCode(leCodes[lengthCode+lengthCodesStart]) | ||||
| 		extraLengthBits := uint(lengthExtraBits[lengthCode]) | ||||
| 		if extraLengthBits > 0 { | ||||
| 			extraLength := int32(length - lengthBase[lengthCode]) | ||||
| 			w.writeBits(extraLength, extraLengthBits) | ||||
| 		} | ||||
| 		// Write the offset | ||||
| 		offset := t.offset() | ||||
| 		offsetCode := offsetCode(offset) | ||||
| 		w.writeCode(oeCodes[offsetCode]) | ||||
| 		extraOffsetBits := uint(offsetExtraBits[offsetCode]) | ||||
| 		if extraOffsetBits > 0 { | ||||
| 			extraOffset := int32(offset - offsetBase[offsetCode]) | ||||
| 			w.writeBits(extraOffset, extraOffsetBits) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // huffOffset is a static offset encoder used for huffman only encoding. | ||||
| // It can be reused since we will not be encoding offset values. | ||||
| var huffOffset *huffmanEncoder | ||||
|  | ||||
| func init() { | ||||
| 	w := newHuffmanBitWriter(nil) | ||||
| 	w.offsetFreq[0] = 1 | ||||
| 	huffOffset = newHuffmanEncoder(offsetCodeCount) | ||||
| 	huffOffset.generate(w.offsetFreq, 15) | ||||
| } | ||||
|  | ||||
| // writeBlockHuff encodes a block of bytes as either | ||||
| // Huffman encoded literals or uncompressed bytes if the | ||||
| // results only gains very little from compression. | ||||
| func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { | ||||
| 	if w.err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Clear histogram | ||||
| 	for i := range w.literalFreq { | ||||
| 		w.literalFreq[i] = 0 | ||||
| 	} | ||||
|  | ||||
| 	// Add everything as literals | ||||
| 	histogram(input, w.literalFreq) | ||||
|  | ||||
| 	w.literalFreq[endBlockMarker] = 1 | ||||
|  | ||||
| 	const numLiterals = endBlockMarker + 1 | ||||
| 	const numOffsets = 1 | ||||
|  | ||||
| 	w.literalEncoding.generate(w.literalFreq, 15) | ||||
|  | ||||
| 	// Figure out smallest code. | ||||
| 	// Always use dynamic Huffman or Store | ||||
| 	var numCodegens int | ||||
|  | ||||
| 	// Generate codegen and codegenFrequencies, which indicates how to encode | ||||
| 	// the literalEncoding and the offsetEncoding. | ||||
| 	w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) | ||||
| 	w.codegenEncoding.generate(w.codegenFreq[:], 7) | ||||
| 	size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) | ||||
|  | ||||
| 	// Store bytes, if we don't get a reasonable improvement. | ||||
| 	if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { | ||||
| 		w.writeStoredHeader(len(input), eof) | ||||
| 		w.writeBytes(input) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Huffman. | ||||
| 	w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) | ||||
| 	encoding := w.literalEncoding.codes[:257] | ||||
| 	n := w.nbytes | ||||
| 	for _, t := range input { | ||||
| 		// Bitwriting inlined, ~30% speedup | ||||
| 		c := encoding[t] | ||||
| 		w.bits |= uint64(c.code) << w.nbits | ||||
| 		w.nbits += uint(c.len) | ||||
| 		if w.nbits < 48 { | ||||
| 			continue | ||||
| 		} | ||||
| 		// Store 6 bytes | ||||
| 		bits := w.bits | ||||
| 		w.bits >>= 48 | ||||
| 		w.nbits -= 48 | ||||
| 		bytes := w.bytes[n : n+6] | ||||
| 		bytes[0] = byte(bits) | ||||
| 		bytes[1] = byte(bits >> 8) | ||||
| 		bytes[2] = byte(bits >> 16) | ||||
| 		bytes[3] = byte(bits >> 24) | ||||
| 		bytes[4] = byte(bits >> 32) | ||||
| 		bytes[5] = byte(bits >> 40) | ||||
| 		n += 6 | ||||
| 		if n < bufferFlushSize { | ||||
| 			continue | ||||
| 		} | ||||
| 		w.write(w.bytes[:n]) | ||||
| 		if w.err != nil { | ||||
| 			return // Return early in the event of write failures | ||||
| 		} | ||||
| 		n = 0 | ||||
| 	} | ||||
| 	w.nbytes = n | ||||
| 	w.writeCode(encoding[endBlockMarker]) | ||||
| } | ||||
							
								
								
									
										344
									
								
								vendor/github.com/klauspost/compress/flate/huffman_code.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										344
									
								
								vendor/github.com/klauspost/compress/flate/huffman_code.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,344 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| import ( | ||||
| 	"math" | ||||
| 	"sort" | ||||
| ) | ||||
|  | ||||
| // hcode is a huffman code with a bit code and bit length. | ||||
| type hcode struct { | ||||
| 	code, len uint16 | ||||
| } | ||||
|  | ||||
| type huffmanEncoder struct { | ||||
| 	codes     []hcode | ||||
| 	freqcache []literalNode | ||||
| 	bitCount  [17]int32 | ||||
| 	lns       byLiteral // stored to avoid repeated allocation in generate | ||||
| 	lfs       byFreq    // stored to avoid repeated allocation in generate | ||||
| } | ||||
|  | ||||
| type literalNode struct { | ||||
| 	literal uint16 | ||||
| 	freq    int32 | ||||
| } | ||||
|  | ||||
| // A levelInfo describes the state of the constructed tree for a given depth. | ||||
| type levelInfo struct { | ||||
| 	// Our level.  for better printing | ||||
| 	level int32 | ||||
|  | ||||
| 	// The frequency of the last node at this level | ||||
| 	lastFreq int32 | ||||
|  | ||||
| 	// The frequency of the next character to add to this level | ||||
| 	nextCharFreq int32 | ||||
|  | ||||
| 	// The frequency of the next pair (from level below) to add to this level. | ||||
| 	// Only valid if the "needed" value of the next lower level is 0. | ||||
| 	nextPairFreq int32 | ||||
|  | ||||
| 	// The number of chains remaining to generate for this level before moving | ||||
| 	// up to the next level | ||||
| 	needed int32 | ||||
| } | ||||
|  | ||||
| // set sets the code and length of an hcode. | ||||
| func (h *hcode) set(code uint16, length uint16) { | ||||
| 	h.len = length | ||||
| 	h.code = code | ||||
| } | ||||
|  | ||||
| func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } | ||||
|  | ||||
| func newHuffmanEncoder(size int) *huffmanEncoder { | ||||
| 	return &huffmanEncoder{codes: make([]hcode, size)} | ||||
| } | ||||
|  | ||||
| // Generates a HuffmanCode corresponding to the fixed literal table | ||||
| func generateFixedLiteralEncoding() *huffmanEncoder { | ||||
| 	h := newHuffmanEncoder(maxNumLit) | ||||
| 	codes := h.codes | ||||
| 	var ch uint16 | ||||
| 	for ch = 0; ch < maxNumLit; ch++ { | ||||
| 		var bits uint16 | ||||
| 		var size uint16 | ||||
| 		switch { | ||||
| 		case ch < 144: | ||||
| 			// size 8, 000110000  .. 10111111 | ||||
| 			bits = ch + 48 | ||||
| 			size = 8 | ||||
| 			break | ||||
| 		case ch < 256: | ||||
| 			// size 9, 110010000 .. 111111111 | ||||
| 			bits = ch + 400 - 144 | ||||
| 			size = 9 | ||||
| 			break | ||||
| 		case ch < 280: | ||||
| 			// size 7, 0000000 .. 0010111 | ||||
| 			bits = ch - 256 | ||||
| 			size = 7 | ||||
| 			break | ||||
| 		default: | ||||
| 			// size 8, 11000000 .. 11000111 | ||||
| 			bits = ch + 192 - 280 | ||||
| 			size = 8 | ||||
| 		} | ||||
| 		codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| func generateFixedOffsetEncoding() *huffmanEncoder { | ||||
| 	h := newHuffmanEncoder(30) | ||||
| 	codes := h.codes | ||||
| 	for ch := range codes { | ||||
| 		codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() | ||||
| var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() | ||||
|  | ||||
| func (h *huffmanEncoder) bitLength(freq []int32) int { | ||||
| 	var total int | ||||
| 	for i, f := range freq { | ||||
| 		if f != 0 { | ||||
| 			total += int(f) * int(h.codes[i].len) | ||||
| 		} | ||||
| 	} | ||||
| 	return total | ||||
| } | ||||
|  | ||||
| const maxBitsLimit = 16 | ||||
|  | ||||
| // Return the number of literals assigned to each bit size in the Huffman encoding | ||||
| // | ||||
| // This method is only called when list.length >= 3 | ||||
| // The cases of 0, 1, and 2 literals are handled by special case code. | ||||
| // | ||||
| // list  An array of the literals with non-zero frequencies | ||||
| //             and their associated frequencies. The array is in order of increasing | ||||
| //             frequency, and has as its last element a special element with frequency | ||||
| //             MaxInt32 | ||||
| // maxBits     The maximum number of bits that should be used to encode any literal. | ||||
| //             Must be less than 16. | ||||
| // return      An integer array in which array[i] indicates the number of literals | ||||
| //             that should be encoded in i bits. | ||||
| func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { | ||||
| 	if maxBits >= maxBitsLimit { | ||||
| 		panic("flate: maxBits too large") | ||||
| 	} | ||||
| 	n := int32(len(list)) | ||||
| 	list = list[0 : n+1] | ||||
| 	list[n] = maxNode() | ||||
|  | ||||
| 	// The tree can't have greater depth than n - 1, no matter what. This | ||||
| 	// saves a little bit of work in some small cases | ||||
| 	if maxBits > n-1 { | ||||
| 		maxBits = n - 1 | ||||
| 	} | ||||
|  | ||||
| 	// Create information about each of the levels. | ||||
| 	// A bogus "Level 0" whose sole purpose is so that | ||||
| 	// level1.prev.needed==0.  This makes level1.nextPairFreq | ||||
| 	// be a legitimate value that never gets chosen. | ||||
| 	var levels [maxBitsLimit]levelInfo | ||||
| 	// leafCounts[i] counts the number of literals at the left | ||||
| 	// of ancestors of the rightmost node at level i. | ||||
| 	// leafCounts[i][j] is the number of literals at the left | ||||
| 	// of the level j ancestor. | ||||
| 	var leafCounts [maxBitsLimit][maxBitsLimit]int32 | ||||
|  | ||||
| 	for level := int32(1); level <= maxBits; level++ { | ||||
| 		// For every level, the first two items are the first two characters. | ||||
| 		// We initialize the levels as if we had already figured this out. | ||||
| 		levels[level] = levelInfo{ | ||||
| 			level:        level, | ||||
| 			lastFreq:     list[1].freq, | ||||
| 			nextCharFreq: list[2].freq, | ||||
| 			nextPairFreq: list[0].freq + list[1].freq, | ||||
| 		} | ||||
| 		leafCounts[level][level] = 2 | ||||
| 		if level == 1 { | ||||
| 			levels[level].nextPairFreq = math.MaxInt32 | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// We need a total of 2*n - 2 items at top level and have already generated 2. | ||||
| 	levels[maxBits].needed = 2*n - 4 | ||||
|  | ||||
| 	level := maxBits | ||||
| 	for { | ||||
| 		l := &levels[level] | ||||
| 		if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { | ||||
| 			// We've run out of both leafs and pairs. | ||||
| 			// End all calculations for this level. | ||||
| 			// To make sure we never come back to this level or any lower level, | ||||
| 			// set nextPairFreq impossibly large. | ||||
| 			l.needed = 0 | ||||
| 			levels[level+1].nextPairFreq = math.MaxInt32 | ||||
| 			level++ | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		prevFreq := l.lastFreq | ||||
| 		if l.nextCharFreq < l.nextPairFreq { | ||||
| 			// The next item on this row is a leaf node. | ||||
| 			n := leafCounts[level][level] + 1 | ||||
| 			l.lastFreq = l.nextCharFreq | ||||
| 			// Lower leafCounts are the same of the previous node. | ||||
| 			leafCounts[level][level] = n | ||||
| 			l.nextCharFreq = list[n].freq | ||||
| 		} else { | ||||
| 			// The next item on this row is a pair from the previous row. | ||||
| 			// nextPairFreq isn't valid until we generate two | ||||
| 			// more values in the level below | ||||
| 			l.lastFreq = l.nextPairFreq | ||||
| 			// Take leaf counts from the lower level, except counts[level] remains the same. | ||||
| 			copy(leafCounts[level][:level], leafCounts[level-1][:level]) | ||||
| 			levels[l.level-1].needed = 2 | ||||
| 		} | ||||
|  | ||||
| 		if l.needed--; l.needed == 0 { | ||||
| 			// We've done everything we need to do for this level. | ||||
| 			// Continue calculating one level up. Fill in nextPairFreq | ||||
| 			// of that level with the sum of the two nodes we've just calculated on | ||||
| 			// this level. | ||||
| 			if l.level == maxBits { | ||||
| 				// All done! | ||||
| 				break | ||||
| 			} | ||||
| 			levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq | ||||
| 			level++ | ||||
| 		} else { | ||||
| 			// If we stole from below, move down temporarily to replenish it. | ||||
| 			for levels[level-1].needed > 0 { | ||||
| 				level-- | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Somethings is wrong if at the end, the top level is null or hasn't used | ||||
| 	// all of the leaves. | ||||
| 	if leafCounts[maxBits][maxBits] != n { | ||||
| 		panic("leafCounts[maxBits][maxBits] != n") | ||||
| 	} | ||||
|  | ||||
| 	bitCount := h.bitCount[:maxBits+1] | ||||
| 	bits := 1 | ||||
| 	counts := &leafCounts[maxBits] | ||||
| 	for level := maxBits; level > 0; level-- { | ||||
| 		// chain.leafCount gives the number of literals requiring at least "bits" | ||||
| 		// bits to encode. | ||||
| 		bitCount[bits] = counts[level] - counts[level-1] | ||||
| 		bits++ | ||||
| 	} | ||||
| 	return bitCount | ||||
| } | ||||
|  | ||||
| // Look at the leaves and assign them a bit count and an encoding as specified | ||||
| // in RFC 1951 3.2.2 | ||||
| func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { | ||||
| 	code := uint16(0) | ||||
| 	for n, bits := range bitCount { | ||||
| 		code <<= 1 | ||||
| 		if n == 0 || bits == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		// The literals list[len(list)-bits] .. list[len(list)-bits] | ||||
| 		// are encoded using "bits" bits, and get the values | ||||
| 		// code, code + 1, ....  The code values are | ||||
| 		// assigned in literal order (not frequency order). | ||||
| 		chunk := list[len(list)-int(bits):] | ||||
|  | ||||
| 		h.lns.sort(chunk) | ||||
| 		for _, node := range chunk { | ||||
| 			h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} | ||||
| 			code++ | ||||
| 		} | ||||
| 		list = list[0 : len(list)-int(bits)] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Update this Huffman Code object to be the minimum code for the specified frequency count. | ||||
| // | ||||
| // freq  An array of frequencies, in which frequency[i] gives the frequency of literal i. | ||||
| // maxBits  The maximum number of bits to use for any literal. | ||||
| func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { | ||||
| 	if h.freqcache == nil { | ||||
| 		// Allocate a reusable buffer with the longest possible frequency table. | ||||
| 		// Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. | ||||
| 		// The largest of these is maxNumLit, so we allocate for that case. | ||||
| 		h.freqcache = make([]literalNode, maxNumLit+1) | ||||
| 	} | ||||
| 	list := h.freqcache[:len(freq)+1] | ||||
| 	// Number of non-zero literals | ||||
| 	count := 0 | ||||
| 	// Set list to be the set of all non-zero literals and their frequencies | ||||
| 	for i, f := range freq { | ||||
| 		if f != 0 { | ||||
| 			list[count] = literalNode{uint16(i), f} | ||||
| 			count++ | ||||
| 		} else { | ||||
| 			list[count] = literalNode{} | ||||
| 			h.codes[i].len = 0 | ||||
| 		} | ||||
| 	} | ||||
| 	list[len(freq)] = literalNode{} | ||||
|  | ||||
| 	list = list[:count] | ||||
| 	if count <= 2 { | ||||
| 		// Handle the small cases here, because they are awkward for the general case code. With | ||||
| 		// two or fewer literals, everything has bit length 1. | ||||
| 		for i, node := range list { | ||||
| 			// "list" is in order of increasing literal value. | ||||
| 			h.codes[node.literal].set(uint16(i), 1) | ||||
| 		} | ||||
| 		return | ||||
| 	} | ||||
| 	h.lfs.sort(list) | ||||
|  | ||||
| 	// Get the number of literals for each bit count | ||||
| 	bitCount := h.bitCounts(list, maxBits) | ||||
| 	// And do the assignment | ||||
| 	h.assignEncodingAndSize(bitCount, list) | ||||
| } | ||||
|  | ||||
| type byLiteral []literalNode | ||||
|  | ||||
| func (s *byLiteral) sort(a []literalNode) { | ||||
| 	*s = byLiteral(a) | ||||
| 	sort.Sort(s) | ||||
| } | ||||
|  | ||||
| func (s byLiteral) Len() int { return len(s) } | ||||
|  | ||||
| func (s byLiteral) Less(i, j int) bool { | ||||
| 	return s[i].literal < s[j].literal | ||||
| } | ||||
|  | ||||
| func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | ||||
|  | ||||
| type byFreq []literalNode | ||||
|  | ||||
| func (s *byFreq) sort(a []literalNode) { | ||||
| 	*s = byFreq(a) | ||||
| 	sort.Sort(s) | ||||
| } | ||||
|  | ||||
| func (s byFreq) Len() int { return len(s) } | ||||
|  | ||||
| func (s byFreq) Less(i, j int) bool { | ||||
| 	if s[i].freq == s[j].freq { | ||||
| 		return s[i].literal < s[j].literal | ||||
| 	} | ||||
| 	return s[i].freq < s[j].freq | ||||
| } | ||||
|  | ||||
| func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | ||||
							
								
								
									
										880
									
								
								vendor/github.com/klauspost/compress/flate/inflate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										880
									
								
								vendor/github.com/klauspost/compress/flate/inflate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,880 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package flate implements the DEFLATE compressed data format, described in | ||||
| // RFC 1951.  The gzip and zlib packages implement access to DEFLATE-based file | ||||
| // formats. | ||||
| package flate | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"io" | ||||
| 	"math/bits" | ||||
| 	"strconv" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	maxCodeLen     = 16 // max length of Huffman code | ||||
| 	maxCodeLenMask = 15 // mask for max length of Huffman code | ||||
| 	// The next three numbers come from the RFC section 3.2.7, with the | ||||
| 	// additional proviso in section 3.2.5 which implies that distance codes | ||||
| 	// 30 and 31 should never occur in compressed data. | ||||
| 	maxNumLit  = 286 | ||||
| 	maxNumDist = 30 | ||||
| 	numCodes   = 19 // number of codes in Huffman meta-code | ||||
| ) | ||||
|  | ||||
| // Initialize the fixedHuffmanDecoder only once upon first use. | ||||
| var fixedOnce sync.Once | ||||
| var fixedHuffmanDecoder huffmanDecoder | ||||
|  | ||||
| // A CorruptInputError reports the presence of corrupt input at a given offset. | ||||
| type CorruptInputError int64 | ||||
|  | ||||
| func (e CorruptInputError) Error() string { | ||||
| 	return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) | ||||
| } | ||||
|  | ||||
| // An InternalError reports an error in the flate code itself. | ||||
| type InternalError string | ||||
|  | ||||
| func (e InternalError) Error() string { return "flate: internal error: " + string(e) } | ||||
|  | ||||
| // A ReadError reports an error encountered while reading input. | ||||
| // | ||||
| // Deprecated: No longer returned. | ||||
| type ReadError struct { | ||||
| 	Offset int64 // byte offset where error occurred | ||||
| 	Err    error // error returned by underlying Read | ||||
| } | ||||
|  | ||||
| func (e *ReadError) Error() string { | ||||
| 	return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() | ||||
| } | ||||
|  | ||||
| // A WriteError reports an error encountered while writing output. | ||||
| // | ||||
| // Deprecated: No longer returned. | ||||
| type WriteError struct { | ||||
| 	Offset int64 // byte offset where error occurred | ||||
| 	Err    error // error returned by underlying Write | ||||
| } | ||||
|  | ||||
| func (e *WriteError) Error() string { | ||||
| 	return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() | ||||
| } | ||||
|  | ||||
| // Resetter resets a ReadCloser returned by NewReader or NewReaderDict to | ||||
| // to switch to a new underlying Reader. This permits reusing a ReadCloser | ||||
| // instead of allocating a new one. | ||||
| type Resetter interface { | ||||
| 	// Reset discards any buffered data and resets the Resetter as if it was | ||||
| 	// newly initialized with the given reader. | ||||
| 	Reset(r io.Reader, dict []byte) error | ||||
| } | ||||
|  | ||||
| // The data structure for decoding Huffman tables is based on that of | ||||
| // zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), | ||||
| // For codes smaller than the table width, there are multiple entries | ||||
| // (each combination of trailing bits has the same value). For codes | ||||
| // larger than the table width, the table contains a link to an overflow | ||||
| // table. The width of each entry in the link table is the maximum code | ||||
| // size minus the chunk width. | ||||
| // | ||||
| // Note that you can do a lookup in the table even without all bits | ||||
| // filled. Since the extra bits are zero, and the DEFLATE Huffman codes | ||||
| // have the property that shorter codes come before longer ones, the | ||||
| // bit length estimate in the result is a lower bound on the actual | ||||
| // number of bits. | ||||
| // | ||||
| // See the following: | ||||
| //	http://www.gzip.org/algorithm.txt | ||||
|  | ||||
| // chunk & 15 is number of bits | ||||
| // chunk >> 4 is value, including table link | ||||
|  | ||||
| const ( | ||||
| 	huffmanChunkBits  = 9 | ||||
| 	huffmanNumChunks  = 1 << huffmanChunkBits | ||||
| 	huffmanCountMask  = 15 | ||||
| 	huffmanValueShift = 4 | ||||
| ) | ||||
|  | ||||
| type huffmanDecoder struct { | ||||
| 	min      int                       // the minimum code length | ||||
| 	chunks   *[huffmanNumChunks]uint32 // chunks as described above | ||||
| 	links    [][]uint32                // overflow links | ||||
| 	linkMask uint32                    // mask the width of the link table | ||||
| } | ||||
|  | ||||
| // Initialize Huffman decoding tables from array of code lengths. | ||||
| // Following this function, h is guaranteed to be initialized into a complete | ||||
| // tree (i.e., neither over-subscribed nor under-subscribed). The exception is a | ||||
| // degenerate case where the tree has only a single symbol with length 1. Empty | ||||
| // trees are permitted. | ||||
| func (h *huffmanDecoder) init(lengths []int) bool { | ||||
| 	// Sanity enables additional runtime tests during Huffman | ||||
| 	// table construction. It's intended to be used during | ||||
| 	// development to supplement the currently ad-hoc unit tests. | ||||
| 	const sanity = false | ||||
|  | ||||
| 	if h.chunks == nil { | ||||
| 		h.chunks = &[huffmanNumChunks]uint32{} | ||||
| 	} | ||||
| 	if h.min != 0 { | ||||
| 		*h = huffmanDecoder{chunks: h.chunks, links: h.links} | ||||
| 	} | ||||
|  | ||||
| 	// Count number of codes of each length, | ||||
| 	// compute min and max length. | ||||
| 	var count [maxCodeLen]int | ||||
| 	var min, max int | ||||
| 	for _, n := range lengths { | ||||
| 		if n == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		if min == 0 || n < min { | ||||
| 			min = n | ||||
| 		} | ||||
| 		if n > max { | ||||
| 			max = n | ||||
| 		} | ||||
| 		count[n&maxCodeLenMask]++ | ||||
| 	} | ||||
|  | ||||
| 	// Empty tree. The decompressor.huffSym function will fail later if the tree | ||||
| 	// is used. Technically, an empty tree is only valid for the HDIST tree and | ||||
| 	// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree | ||||
| 	// is guaranteed to fail since it will attempt to use the tree to decode the | ||||
| 	// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is | ||||
| 	// guaranteed to fail later since the compressed data section must be | ||||
| 	// composed of at least one symbol (the end-of-block marker). | ||||
| 	if max == 0 { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	code := 0 | ||||
| 	var nextcode [maxCodeLen]int | ||||
| 	for i := min; i <= max; i++ { | ||||
| 		code <<= 1 | ||||
| 		nextcode[i&maxCodeLenMask] = code | ||||
| 		code += count[i&maxCodeLenMask] | ||||
| 	} | ||||
|  | ||||
| 	// Check that the coding is complete (i.e., that we've | ||||
| 	// assigned all 2-to-the-max possible bit sequences). | ||||
| 	// Exception: To be compatible with zlib, we also need to | ||||
| 	// accept degenerate single-code codings. See also | ||||
| 	// TestDegenerateHuffmanCoding. | ||||
| 	if code != 1<<uint(max) && !(code == 1 && max == 1) { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	h.min = min | ||||
| 	chunks := h.chunks[:] | ||||
| 	for i := range chunks { | ||||
| 		chunks[i] = 0 | ||||
| 	} | ||||
|  | ||||
| 	if max > huffmanChunkBits { | ||||
| 		numLinks := 1 << (uint(max) - huffmanChunkBits) | ||||
| 		h.linkMask = uint32(numLinks - 1) | ||||
|  | ||||
| 		// create link tables | ||||
| 		link := nextcode[huffmanChunkBits+1] >> 1 | ||||
| 		if cap(h.links) < huffmanNumChunks-link { | ||||
| 			h.links = make([][]uint32, huffmanNumChunks-link) | ||||
| 		} else { | ||||
| 			h.links = h.links[:huffmanNumChunks-link] | ||||
| 		} | ||||
| 		for j := uint(link); j < huffmanNumChunks; j++ { | ||||
| 			reverse := int(bits.Reverse16(uint16(j))) | ||||
| 			reverse >>= uint(16 - huffmanChunkBits) | ||||
| 			off := j - uint(link) | ||||
| 			if sanity && h.chunks[reverse] != 0 { | ||||
| 				panic("impossible: overwriting existing chunk") | ||||
| 			} | ||||
| 			h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1)) | ||||
| 			if cap(h.links[off]) < numLinks { | ||||
| 				h.links[off] = make([]uint32, numLinks) | ||||
| 			} else { | ||||
| 				links := h.links[off][:0] | ||||
| 				h.links[off] = links[:numLinks] | ||||
| 			} | ||||
| 		} | ||||
| 	} else { | ||||
| 		h.links = h.links[:0] | ||||
| 	} | ||||
|  | ||||
| 	for i, n := range lengths { | ||||
| 		if n == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		code := nextcode[n] | ||||
| 		nextcode[n]++ | ||||
| 		chunk := uint32(i<<huffmanValueShift | n) | ||||
| 		reverse := int(bits.Reverse16(uint16(code))) | ||||
| 		reverse >>= uint(16 - n) | ||||
| 		if n <= huffmanChunkBits { | ||||
| 			for off := reverse; off < len(h.chunks); off += 1 << uint(n) { | ||||
| 				// We should never need to overwrite | ||||
| 				// an existing chunk. Also, 0 is | ||||
| 				// never a valid chunk, because the | ||||
| 				// lower 4 "count" bits should be | ||||
| 				// between 1 and 15. | ||||
| 				if sanity && h.chunks[off] != 0 { | ||||
| 					panic("impossible: overwriting existing chunk") | ||||
| 				} | ||||
| 				h.chunks[off] = chunk | ||||
| 			} | ||||
| 		} else { | ||||
| 			j := reverse & (huffmanNumChunks - 1) | ||||
| 			if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { | ||||
| 				// Longer codes should have been | ||||
| 				// associated with a link table above. | ||||
| 				panic("impossible: not an indirect chunk") | ||||
| 			} | ||||
| 			value := h.chunks[j] >> huffmanValueShift | ||||
| 			linktab := h.links[value] | ||||
| 			reverse >>= huffmanChunkBits | ||||
| 			for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { | ||||
| 				if sanity && linktab[off] != 0 { | ||||
| 					panic("impossible: overwriting existing chunk") | ||||
| 				} | ||||
| 				linktab[off] = chunk | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if sanity { | ||||
| 		// Above we've sanity checked that we never overwrote | ||||
| 		// an existing entry. Here we additionally check that | ||||
| 		// we filled the tables completely. | ||||
| 		for i, chunk := range h.chunks { | ||||
| 			if chunk == 0 { | ||||
| 				// As an exception, in the degenerate | ||||
| 				// single-code case, we allow odd | ||||
| 				// chunks to be missing. | ||||
| 				if code == 1 && i%2 == 1 { | ||||
| 					continue | ||||
| 				} | ||||
| 				panic("impossible: missing chunk") | ||||
| 			} | ||||
| 		} | ||||
| 		for _, linktab := range h.links { | ||||
| 			for _, chunk := range linktab { | ||||
| 				if chunk == 0 { | ||||
| 					panic("impossible: missing chunk") | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // The actual read interface needed by NewReader. | ||||
| // If the passed in io.Reader does not also have ReadByte, | ||||
| // the NewReader will introduce its own buffering. | ||||
| type Reader interface { | ||||
| 	io.Reader | ||||
| 	io.ByteReader | ||||
| } | ||||
|  | ||||
| // Decompress state. | ||||
| type decompressor struct { | ||||
| 	// Input source. | ||||
| 	r       Reader | ||||
| 	roffset int64 | ||||
|  | ||||
| 	// Input bits, in top of b. | ||||
| 	b  uint32 | ||||
| 	nb uint | ||||
|  | ||||
| 	// Huffman decoders for literal/length, distance. | ||||
| 	h1, h2 huffmanDecoder | ||||
|  | ||||
| 	// Length arrays used to define Huffman codes. | ||||
| 	bits     *[maxNumLit + maxNumDist]int | ||||
| 	codebits *[numCodes]int | ||||
|  | ||||
| 	// Output history, buffer. | ||||
| 	dict dictDecoder | ||||
|  | ||||
| 	// Temporary buffer (avoids repeated allocation). | ||||
| 	buf [4]byte | ||||
|  | ||||
| 	// Next step in the decompression, | ||||
| 	// and decompression state. | ||||
| 	step      func(*decompressor) | ||||
| 	stepState int | ||||
| 	final     bool | ||||
| 	err       error | ||||
| 	toRead    []byte | ||||
| 	hl, hd    *huffmanDecoder | ||||
| 	copyLen   int | ||||
| 	copyDist  int | ||||
| } | ||||
|  | ||||
| func (f *decompressor) nextBlock() { | ||||
| 	for f.nb < 1+2 { | ||||
| 		if f.err = f.moreBits(); f.err != nil { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	f.final = f.b&1 == 1 | ||||
| 	f.b >>= 1 | ||||
| 	typ := f.b & 3 | ||||
| 	f.b >>= 2 | ||||
| 	f.nb -= 1 + 2 | ||||
| 	switch typ { | ||||
| 	case 0: | ||||
| 		f.dataBlock() | ||||
| 	case 1: | ||||
| 		// compressed, fixed Huffman tables | ||||
| 		f.hl = &fixedHuffmanDecoder | ||||
| 		f.hd = nil | ||||
| 		f.huffmanBlock() | ||||
| 	case 2: | ||||
| 		// compressed, dynamic Huffman tables | ||||
| 		if f.err = f.readHuffman(); f.err != nil { | ||||
| 			break | ||||
| 		} | ||||
| 		f.hl = &f.h1 | ||||
| 		f.hd = &f.h2 | ||||
| 		f.huffmanBlock() | ||||
| 	default: | ||||
| 		// 3 is reserved. | ||||
| 		f.err = CorruptInputError(f.roffset) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (f *decompressor) Read(b []byte) (int, error) { | ||||
| 	for { | ||||
| 		if len(f.toRead) > 0 { | ||||
| 			n := copy(b, f.toRead) | ||||
| 			f.toRead = f.toRead[n:] | ||||
| 			if len(f.toRead) == 0 { | ||||
| 				return n, f.err | ||||
| 			} | ||||
| 			return n, nil | ||||
| 		} | ||||
| 		if f.err != nil { | ||||
| 			return 0, f.err | ||||
| 		} | ||||
| 		f.step(f) | ||||
| 		if f.err != nil && len(f.toRead) == 0 { | ||||
| 			f.toRead = f.dict.readFlush() // Flush what's left in case of error | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Support the io.WriteTo interface for io.Copy and friends. | ||||
| func (f *decompressor) WriteTo(w io.Writer) (int64, error) { | ||||
| 	total := int64(0) | ||||
| 	flushed := false | ||||
| 	for { | ||||
| 		if len(f.toRead) > 0 { | ||||
| 			n, err := w.Write(f.toRead) | ||||
| 			total += int64(n) | ||||
| 			if err != nil { | ||||
| 				f.err = err | ||||
| 				return total, err | ||||
| 			} | ||||
| 			if n != len(f.toRead) { | ||||
| 				return total, io.ErrShortWrite | ||||
| 			} | ||||
| 			f.toRead = f.toRead[:0] | ||||
| 		} | ||||
| 		if f.err != nil && flushed { | ||||
| 			if f.err == io.EOF { | ||||
| 				return total, nil | ||||
| 			} | ||||
| 			return total, f.err | ||||
| 		} | ||||
| 		if f.err == nil { | ||||
| 			f.step(f) | ||||
| 		} | ||||
| 		if len(f.toRead) == 0 && f.err != nil && !flushed { | ||||
| 			f.toRead = f.dict.readFlush() // Flush what's left in case of error | ||||
| 			flushed = true | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (f *decompressor) Close() error { | ||||
| 	if f.err == io.EOF { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return f.err | ||||
| } | ||||
|  | ||||
| // RFC 1951 section 3.2.7. | ||||
| // Compression with dynamic Huffman codes | ||||
|  | ||||
| var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} | ||||
|  | ||||
| func (f *decompressor) readHuffman() error { | ||||
| 	// HLIT[5], HDIST[5], HCLEN[4]. | ||||
| 	for f.nb < 5+5+4 { | ||||
| 		if err := f.moreBits(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	nlit := int(f.b&0x1F) + 257 | ||||
| 	if nlit > maxNumLit { | ||||
| 		return CorruptInputError(f.roffset) | ||||
| 	} | ||||
| 	f.b >>= 5 | ||||
| 	ndist := int(f.b&0x1F) + 1 | ||||
| 	if ndist > maxNumDist { | ||||
| 		return CorruptInputError(f.roffset) | ||||
| 	} | ||||
| 	f.b >>= 5 | ||||
| 	nclen := int(f.b&0xF) + 4 | ||||
| 	// numCodes is 19, so nclen is always valid. | ||||
| 	f.b >>= 4 | ||||
| 	f.nb -= 5 + 5 + 4 | ||||
|  | ||||
| 	// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. | ||||
| 	for i := 0; i < nclen; i++ { | ||||
| 		for f.nb < 3 { | ||||
| 			if err := f.moreBits(); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 		f.codebits[codeOrder[i]] = int(f.b & 0x7) | ||||
| 		f.b >>= 3 | ||||
| 		f.nb -= 3 | ||||
| 	} | ||||
| 	for i := nclen; i < len(codeOrder); i++ { | ||||
| 		f.codebits[codeOrder[i]] = 0 | ||||
| 	} | ||||
| 	if !f.h1.init(f.codebits[0:]) { | ||||
| 		return CorruptInputError(f.roffset) | ||||
| 	} | ||||
|  | ||||
| 	// HLIT + 257 code lengths, HDIST + 1 code lengths, | ||||
| 	// using the code length Huffman code. | ||||
| 	for i, n := 0, nlit+ndist; i < n; { | ||||
| 		x, err := f.huffSym(&f.h1) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		if x < 16 { | ||||
| 			// Actual length. | ||||
| 			f.bits[i] = x | ||||
| 			i++ | ||||
| 			continue | ||||
| 		} | ||||
| 		// Repeat previous length or zero. | ||||
| 		var rep int | ||||
| 		var nb uint | ||||
| 		var b int | ||||
| 		switch x { | ||||
| 		default: | ||||
| 			return InternalError("unexpected length code") | ||||
| 		case 16: | ||||
| 			rep = 3 | ||||
| 			nb = 2 | ||||
| 			if i == 0 { | ||||
| 				return CorruptInputError(f.roffset) | ||||
| 			} | ||||
| 			b = f.bits[i-1] | ||||
| 		case 17: | ||||
| 			rep = 3 | ||||
| 			nb = 3 | ||||
| 			b = 0 | ||||
| 		case 18: | ||||
| 			rep = 11 | ||||
| 			nb = 7 | ||||
| 			b = 0 | ||||
| 		} | ||||
| 		for f.nb < nb { | ||||
| 			if err := f.moreBits(); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 		rep += int(f.b & uint32(1<<nb-1)) | ||||
| 		f.b >>= nb | ||||
| 		f.nb -= nb | ||||
| 		if i+rep > n { | ||||
| 			return CorruptInputError(f.roffset) | ||||
| 		} | ||||
| 		for j := 0; j < rep; j++ { | ||||
| 			f.bits[i] = b | ||||
| 			i++ | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { | ||||
| 		return CorruptInputError(f.roffset) | ||||
| 	} | ||||
|  | ||||
| 	// As an optimization, we can initialize the min bits to read at a time | ||||
| 	// for the HLIT tree to the length of the EOB marker since we know that | ||||
| 	// every block must terminate with one. This preserves the property that | ||||
| 	// we never read any extra bytes after the end of the DEFLATE stream. | ||||
| 	if f.h1.min < f.bits[endBlockMarker] { | ||||
| 		f.h1.min = f.bits[endBlockMarker] | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Decode a single Huffman block from f. | ||||
| // hl and hd are the Huffman states for the lit/length values | ||||
| // and the distance values, respectively. If hd == nil, using the | ||||
| // fixed distance encoding associated with fixed Huffman blocks. | ||||
| func (f *decompressor) huffmanBlock() { | ||||
| 	const ( | ||||
| 		stateInit = iota // Zero value must be stateInit | ||||
| 		stateDict | ||||
| 	) | ||||
|  | ||||
| 	switch f.stepState { | ||||
| 	case stateInit: | ||||
| 		goto readLiteral | ||||
| 	case stateDict: | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
| readLiteral: | ||||
| 	// Read literal and/or (length, distance) according to RFC section 3.2.3. | ||||
| 	{ | ||||
| 		v, err := f.huffSym(f.hl) | ||||
| 		if err != nil { | ||||
| 			f.err = err | ||||
| 			return | ||||
| 		} | ||||
| 		var n uint // number of bits extra | ||||
| 		var length int | ||||
| 		switch { | ||||
| 		case v < 256: | ||||
| 			f.dict.writeByte(byte(v)) | ||||
| 			if f.dict.availWrite() == 0 { | ||||
| 				f.toRead = f.dict.readFlush() | ||||
| 				f.step = (*decompressor).huffmanBlock | ||||
| 				f.stepState = stateInit | ||||
| 				return | ||||
| 			} | ||||
| 			goto readLiteral | ||||
| 		case v == 256: | ||||
| 			f.finishBlock() | ||||
| 			return | ||||
| 		// otherwise, reference to older data | ||||
| 		case v < 265: | ||||
| 			length = v - (257 - 3) | ||||
| 			n = 0 | ||||
| 		case v < 269: | ||||
| 			length = v*2 - (265*2 - 11) | ||||
| 			n = 1 | ||||
| 		case v < 273: | ||||
| 			length = v*4 - (269*4 - 19) | ||||
| 			n = 2 | ||||
| 		case v < 277: | ||||
| 			length = v*8 - (273*8 - 35) | ||||
| 			n = 3 | ||||
| 		case v < 281: | ||||
| 			length = v*16 - (277*16 - 67) | ||||
| 			n = 4 | ||||
| 		case v < 285: | ||||
| 			length = v*32 - (281*32 - 131) | ||||
| 			n = 5 | ||||
| 		case v < maxNumLit: | ||||
| 			length = 258 | ||||
| 			n = 0 | ||||
| 		default: | ||||
| 			f.err = CorruptInputError(f.roffset) | ||||
| 			return | ||||
| 		} | ||||
| 		if n > 0 { | ||||
| 			for f.nb < n { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					f.err = err | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			length += int(f.b & uint32(1<<n-1)) | ||||
| 			f.b >>= n | ||||
| 			f.nb -= n | ||||
| 		} | ||||
|  | ||||
| 		var dist int | ||||
| 		if f.hd == nil { | ||||
| 			for f.nb < 5 { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					f.err = err | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			f.b >>= 5 | ||||
| 			f.nb -= 5 | ||||
| 		} else { | ||||
| 			if dist, err = f.huffSym(f.hd); err != nil { | ||||
| 				f.err = err | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| 		case dist < 4: | ||||
| 			dist++ | ||||
| 		case dist < maxNumDist: | ||||
| 			nb := uint(dist-2) >> 1 | ||||
| 			// have 1 bit in bottom of dist, need nb more. | ||||
| 			extra := (dist & 1) << nb | ||||
| 			for f.nb < nb { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					f.err = err | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			extra |= int(f.b & uint32(1<<nb-1)) | ||||
| 			f.b >>= nb | ||||
| 			f.nb -= nb | ||||
| 			dist = 1<<(nb+1) + 1 + extra | ||||
| 		default: | ||||
| 			f.err = CorruptInputError(f.roffset) | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		// No check on length; encoding can be prescient. | ||||
| 		if dist > f.dict.histSize() { | ||||
| 			f.err = CorruptInputError(f.roffset) | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		f.copyLen, f.copyDist = length, dist | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
| copyHistory: | ||||
| 	// Perform a backwards copy according to RFC section 3.2.3. | ||||
| 	{ | ||||
| 		cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | ||||
| 		if cnt == 0 { | ||||
| 			cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | ||||
| 		} | ||||
| 		f.copyLen -= cnt | ||||
|  | ||||
| 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||
| 			f.toRead = f.dict.readFlush() | ||||
| 			f.step = (*decompressor).huffmanBlock // We need to continue this work | ||||
| 			f.stepState = stateDict | ||||
| 			return | ||||
| 		} | ||||
| 		goto readLiteral | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Copy a single uncompressed data block from input to output. | ||||
| func (f *decompressor) dataBlock() { | ||||
| 	// Uncompressed. | ||||
| 	// Discard current half-byte. | ||||
| 	f.nb = 0 | ||||
| 	f.b = 0 | ||||
|  | ||||
| 	// Length then ones-complement of length. | ||||
| 	nr, err := io.ReadFull(f.r, f.buf[0:4]) | ||||
| 	f.roffset += int64(nr) | ||||
| 	if err != nil { | ||||
| 		f.err = noEOF(err) | ||||
| 		return | ||||
| 	} | ||||
| 	n := int(f.buf[0]) | int(f.buf[1])<<8 | ||||
| 	nn := int(f.buf[2]) | int(f.buf[3])<<8 | ||||
| 	if uint16(nn) != uint16(^n) { | ||||
| 		f.err = CorruptInputError(f.roffset) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if n == 0 { | ||||
| 		f.toRead = f.dict.readFlush() | ||||
| 		f.finishBlock() | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	f.copyLen = n | ||||
| 	f.copyData() | ||||
| } | ||||
|  | ||||
| // copyData copies f.copyLen bytes from the underlying reader into f.hist. | ||||
| // It pauses for reads when f.hist is full. | ||||
| func (f *decompressor) copyData() { | ||||
| 	buf := f.dict.writeSlice() | ||||
| 	if len(buf) > f.copyLen { | ||||
| 		buf = buf[:f.copyLen] | ||||
| 	} | ||||
|  | ||||
| 	cnt, err := io.ReadFull(f.r, buf) | ||||
| 	f.roffset += int64(cnt) | ||||
| 	f.copyLen -= cnt | ||||
| 	f.dict.writeMark(cnt) | ||||
| 	if err != nil { | ||||
| 		f.err = noEOF(err) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||
| 		f.toRead = f.dict.readFlush() | ||||
| 		f.step = (*decompressor).copyData | ||||
| 		return | ||||
| 	} | ||||
| 	f.finishBlock() | ||||
| } | ||||
|  | ||||
| func (f *decompressor) finishBlock() { | ||||
| 	if f.final { | ||||
| 		if f.dict.availRead() > 0 { | ||||
| 			f.toRead = f.dict.readFlush() | ||||
| 		} | ||||
| 		f.err = io.EOF | ||||
| 	} | ||||
| 	f.step = (*decompressor).nextBlock | ||||
| } | ||||
|  | ||||
| // noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. | ||||
| func noEOF(e error) error { | ||||
| 	if e == io.EOF { | ||||
| 		return io.ErrUnexpectedEOF | ||||
| 	} | ||||
| 	return e | ||||
| } | ||||
|  | ||||
| func (f *decompressor) moreBits() error { | ||||
| 	c, err := f.r.ReadByte() | ||||
| 	if err != nil { | ||||
| 		return noEOF(err) | ||||
| 	} | ||||
| 	f.roffset++ | ||||
| 	f.b |= uint32(c) << f.nb | ||||
| 	f.nb += 8 | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Read the next Huffman-encoded symbol from f according to h. | ||||
| func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { | ||||
| 	// Since a huffmanDecoder can be empty or be composed of a degenerate tree | ||||
| 	// with single element, huffSym must error on these two edge cases. In both | ||||
| 	// cases, the chunks slice will be 0 for the invalid sequence, leading it | ||||
| 	// satisfy the n == 0 check below. | ||||
| 	n := uint(h.min) | ||||
| 	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, | ||||
| 	// but is smart enough to keep local variables in registers, so use nb and b, | ||||
| 	// inline call to moreBits and reassign b,nb back to f on return. | ||||
| 	nb, b := f.nb, f.b | ||||
| 	for { | ||||
| 		for nb < n { | ||||
| 			c, err := f.r.ReadByte() | ||||
| 			if err != nil { | ||||
| 				f.b = b | ||||
| 				f.nb = nb | ||||
| 				return 0, noEOF(err) | ||||
| 			} | ||||
| 			f.roffset++ | ||||
| 			b |= uint32(c) << (nb & 31) | ||||
| 			nb += 8 | ||||
| 		} | ||||
| 		chunk := h.chunks[b&(huffmanNumChunks-1)] | ||||
| 		n = uint(chunk & huffmanCountMask) | ||||
| 		if n > huffmanChunkBits { | ||||
| 			chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] | ||||
| 			n = uint(chunk & huffmanCountMask) | ||||
| 		} | ||||
| 		if n <= nb { | ||||
| 			if n == 0 { | ||||
| 				f.b = b | ||||
| 				f.nb = nb | ||||
| 				f.err = CorruptInputError(f.roffset) | ||||
| 				return 0, f.err | ||||
| 			} | ||||
| 			f.b = b >> (n & 31) | ||||
| 			f.nb = nb - n | ||||
| 			return int(chunk >> huffmanValueShift), nil | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func makeReader(r io.Reader) Reader { | ||||
| 	if rr, ok := r.(Reader); ok { | ||||
| 		return rr | ||||
| 	} | ||||
| 	return bufio.NewReader(r) | ||||
| } | ||||
|  | ||||
| func fixedHuffmanDecoderInit() { | ||||
| 	fixedOnce.Do(func() { | ||||
| 		// These come from the RFC section 3.2.6. | ||||
| 		var bits [288]int | ||||
| 		for i := 0; i < 144; i++ { | ||||
| 			bits[i] = 8 | ||||
| 		} | ||||
| 		for i := 144; i < 256; i++ { | ||||
| 			bits[i] = 9 | ||||
| 		} | ||||
| 		for i := 256; i < 280; i++ { | ||||
| 			bits[i] = 7 | ||||
| 		} | ||||
| 		for i := 280; i < 288; i++ { | ||||
| 			bits[i] = 8 | ||||
| 		} | ||||
| 		fixedHuffmanDecoder.init(bits[:]) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (f *decompressor) Reset(r io.Reader, dict []byte) error { | ||||
| 	*f = decompressor{ | ||||
| 		r:        makeReader(r), | ||||
| 		bits:     f.bits, | ||||
| 		codebits: f.codebits, | ||||
| 		h1:       f.h1, | ||||
| 		h2:       f.h2, | ||||
| 		dict:     f.dict, | ||||
| 		step:     (*decompressor).nextBlock, | ||||
| 	} | ||||
| 	f.dict.init(maxMatchOffset, dict) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // NewReader returns a new ReadCloser that can be used | ||||
| // to read the uncompressed version of r. | ||||
| // If r does not also implement io.ByteReader, | ||||
| // the decompressor may read more data than necessary from r. | ||||
| // It is the caller's responsibility to call Close on the ReadCloser | ||||
| // when finished reading. | ||||
| // | ||||
| // The ReadCloser returned by NewReader also implements Resetter. | ||||
| func NewReader(r io.Reader) io.ReadCloser { | ||||
| 	fixedHuffmanDecoderInit() | ||||
|  | ||||
| 	var f decompressor | ||||
| 	f.r = makeReader(r) | ||||
| 	f.bits = new([maxNumLit + maxNumDist]int) | ||||
| 	f.codebits = new([numCodes]int) | ||||
| 	f.step = (*decompressor).nextBlock | ||||
| 	f.dict.init(maxMatchOffset, nil) | ||||
| 	return &f | ||||
| } | ||||
|  | ||||
| // NewReaderDict is like NewReader but initializes the reader | ||||
| // with a preset dictionary. The returned Reader behaves as if | ||||
| // the uncompressed data stream started with the given dictionary, | ||||
| // which has already been read. NewReaderDict is typically used | ||||
| // to read data compressed by NewWriterDict. | ||||
| // | ||||
| // The ReadCloser returned by NewReader also implements Resetter. | ||||
| func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { | ||||
| 	fixedHuffmanDecoderInit() | ||||
|  | ||||
| 	var f decompressor | ||||
| 	f.r = makeReader(r) | ||||
| 	f.bits = new([maxNumLit + maxNumDist]int) | ||||
| 	f.codebits = new([numCodes]int) | ||||
| 	f.step = (*decompressor).nextBlock | ||||
| 	f.dict.init(maxMatchOffset, dict) | ||||
| 	return &f | ||||
| } | ||||
							
								
								
									
										48
									
								
								vendor/github.com/klauspost/compress/flate/reverse_bits.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								vendor/github.com/klauspost/compress/flate/reverse_bits.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,48 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| var reverseByte = [256]byte{ | ||||
| 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, | ||||
| 	0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, | ||||
| 	0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, | ||||
| 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, | ||||
| 	0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, | ||||
| 	0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, | ||||
| 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, | ||||
| 	0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, | ||||
| 	0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, | ||||
| 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, | ||||
| 	0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, | ||||
| 	0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, | ||||
| 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, | ||||
| 	0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, | ||||
| 	0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, | ||||
| 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, | ||||
| 	0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, | ||||
| 	0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, | ||||
| 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, | ||||
| 	0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, | ||||
| 	0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, | ||||
| 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, | ||||
| 	0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, | ||||
| 	0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, | ||||
| 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, | ||||
| 	0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, | ||||
| 	0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, | ||||
| 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, | ||||
| 	0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, | ||||
| 	0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, | ||||
| 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, | ||||
| 	0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, | ||||
| } | ||||
|  | ||||
| func reverseUint16(v uint16) uint16 { | ||||
| 	return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 | ||||
| } | ||||
|  | ||||
| func reverseBits(number uint16, bitLength byte) uint16 { | ||||
| 	return reverseUint16(number << uint8(16-bitLength)) | ||||
| } | ||||
							
								
								
									
										900
									
								
								vendor/github.com/klauspost/compress/flate/snappy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										900
									
								
								vendor/github.com/klauspost/compress/flate/snappy.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,900 @@ | ||||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | ||||
| // Modified for deflate by Klaus Post (c) 2015. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| // emitLiteral writes a literal chunk and returns the number of bytes written. | ||||
| func emitLiteral(dst *tokens, lit []byte) { | ||||
| 	ol := int(dst.n) | ||||
| 	for i, v := range lit { | ||||
| 		dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) | ||||
| 	} | ||||
| 	dst.n += uint16(len(lit)) | ||||
| } | ||||
|  | ||||
| // emitCopy writes a copy chunk and returns the number of bytes written. | ||||
| func emitCopy(dst *tokens, offset, length int) { | ||||
| 	dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) | ||||
| 	dst.n++ | ||||
| } | ||||
|  | ||||
| type snappyEnc interface { | ||||
| 	Encode(dst *tokens, src []byte) | ||||
| 	Reset() | ||||
| } | ||||
|  | ||||
| func newSnappy(level int) snappyEnc { | ||||
| 	switch level { | ||||
| 	case 1: | ||||
| 		return &snappyL1{} | ||||
| 	case 2: | ||||
| 		return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} | ||||
| 	case 3: | ||||
| 		return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} | ||||
| 	case 4: | ||||
| 		return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} | ||||
| 	default: | ||||
| 		panic("invalid level specified") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	tableBits       = 14             // Bits used in the table | ||||
| 	tableSize       = 1 << tableBits // Size of the table | ||||
| 	tableMask       = tableSize - 1  // Mask for table indices. Redundant, but can eliminate bounds checks. | ||||
| 	tableShift      = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. | ||||
| 	baseMatchOffset = 1              // The smallest match offset | ||||
| 	baseMatchLength = 3              // The smallest match length per the RFC section 3.2.5 | ||||
| 	maxMatchOffset  = 1 << 15        // The largest match offset | ||||
| ) | ||||
|  | ||||
| func load32(b []byte, i int) uint32 { | ||||
| 	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. | ||||
| 	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 | ||||
| } | ||||
|  | ||||
| func load64(b []byte, i int) uint64 { | ||||
| 	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. | ||||
| 	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | | ||||
| 		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 | ||||
| } | ||||
|  | ||||
| func hash(u uint32) uint32 { | ||||
| 	return (u * 0x1e35a7bd) >> tableShift | ||||
| } | ||||
|  | ||||
| // snappyL1 encapsulates level 1 compression | ||||
| type snappyL1 struct{} | ||||
|  | ||||
| func (e *snappyL1) Reset() {} | ||||
|  | ||||
| func (e *snappyL1) Encode(dst *tokens, src []byte) { | ||||
| 	const ( | ||||
| 		inputMargin            = 16 - 1 | ||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||
| 	) | ||||
|  | ||||
| 	// This check isn't in the Snappy implementation, but there, the caller | ||||
| 	// instead of the callee handles this case. | ||||
| 	if len(src) < minNonLiteralBlockSize { | ||||
| 		// We do not fill the token table. | ||||
| 		// This will be picked up by caller. | ||||
| 		dst.n = uint16(len(src)) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Initialize the hash table. | ||||
| 	// | ||||
| 	// The table element type is uint16, as s < sLimit and sLimit < len(src) | ||||
| 	// and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. | ||||
| 	var table [tableSize]uint16 | ||||
|  | ||||
| 	// sLimit is when to stop looking for offset/length copies. The inputMargin | ||||
| 	// lets us use a fast path for emitLiteral in the main loop, while we are | ||||
| 	// looking for copies. | ||||
| 	sLimit := len(src) - inputMargin | ||||
|  | ||||
| 	// nextEmit is where in src the next emitLiteral should start from. | ||||
| 	nextEmit := 0 | ||||
|  | ||||
| 	// The encoded form must start with a literal, as there are no previous | ||||
| 	// bytes to copy, so we start looking for hash matches at s == 1. | ||||
| 	s := 1 | ||||
| 	nextHash := hash(load32(src, s)) | ||||
|  | ||||
| 	for { | ||||
| 		// Copied from the C++ snappy implementation: | ||||
| 		// | ||||
| 		// Heuristic match skipping: If 32 bytes are scanned with no matches | ||||
| 		// found, start looking only at every other byte. If 32 more bytes are | ||||
| 		// scanned (or skipped), look at every third byte, etc.. When a match | ||||
| 		// is found, immediately go back to looking at every byte. This is a | ||||
| 		// small loss (~5% performance, ~0.1% density) for compressible data | ||||
| 		// due to more bookkeeping, but for non-compressible data (such as | ||||
| 		// JPEG) it's a huge win since the compressor quickly "realizes" the | ||||
| 		// data is incompressible and doesn't bother looking for matches | ||||
| 		// everywhere. | ||||
| 		// | ||||
| 		// The "skip" variable keeps track of how many bytes there are since | ||||
| 		// the last match; dividing it by 32 (ie. right-shifting by five) gives | ||||
| 		// the number of bytes to move ahead for each iteration. | ||||
| 		skip := 32 | ||||
|  | ||||
| 		nextS := s | ||||
| 		candidate := 0 | ||||
| 		for { | ||||
| 			s = nextS | ||||
| 			bytesBetweenHashLookups := skip >> 5 | ||||
| 			nextS = s + bytesBetweenHashLookups | ||||
| 			skip += bytesBetweenHashLookups | ||||
| 			if nextS > sLimit { | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
| 			candidate = int(table[nextHash&tableMask]) | ||||
| 			table[nextHash&tableMask] = uint16(s) | ||||
| 			nextHash = hash(load32(src, nextS)) | ||||
| 			if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// A 4-byte match has been found. We'll later see if more than 4 bytes | ||||
| 		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | ||||
| 		// them as literal bytes. | ||||
| 		emitLiteral(dst, src[nextEmit:s]) | ||||
|  | ||||
| 		// Call emitCopy, and then see if another emitCopy could be our next | ||||
| 		// move. Repeat until we find no match for the input immediately after | ||||
| 		// what was consumed by the last emitCopy call. | ||||
| 		// | ||||
| 		// If we exit this loop normally then we need to call emitLiteral next, | ||||
| 		// though we don't yet know how big the literal will be. We handle that | ||||
| 		// by proceeding to the next iteration of the main loop. We also can | ||||
| 		// exit this loop via goto if we get close to exhausting the input. | ||||
| 		for { | ||||
| 			// Invariant: we have a 4-byte match at s, and no need to emit any | ||||
| 			// literal bytes prior to s. | ||||
| 			base := s | ||||
|  | ||||
| 			// Extend the 4-byte match as long as possible. | ||||
| 			// | ||||
| 			// This is an inlined version of Snappy's: | ||||
| 			//	s = extendMatch(src, candidate+4, s+4) | ||||
| 			s += 4 | ||||
| 			s1 := base + maxMatchLength | ||||
| 			if s1 > len(src) { | ||||
| 				s1 = len(src) | ||||
| 			} | ||||
| 			a := src[s:s1] | ||||
| 			b := src[candidate+4:] | ||||
| 			b = b[:len(a)] | ||||
| 			l := len(a) | ||||
| 			for i := range a { | ||||
| 				if a[i] != b[i] { | ||||
| 					l = i | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			s += l | ||||
|  | ||||
| 			// matchToken is flate's equivalent of Snappy's emitCopy. | ||||
| 			dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) | ||||
| 			dst.n++ | ||||
| 			nextEmit = s | ||||
| 			if s >= sLimit { | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
|  | ||||
| 			// We could immediately start working at s now, but to improve | ||||
| 			// compression we first update the hash table at s-1 and at s. If | ||||
| 			// another emitCopy is not our next move, also calculate nextHash | ||||
| 			// at s+1. At least on GOARCH=amd64, these three hash calculations | ||||
| 			// are faster as one load64 call (with some shifts) instead of | ||||
| 			// three load32 calls. | ||||
| 			x := load64(src, s-1) | ||||
| 			prevHash := hash(uint32(x >> 0)) | ||||
| 			table[prevHash&tableMask] = uint16(s - 1) | ||||
| 			currHash := hash(uint32(x >> 8)) | ||||
| 			candidate = int(table[currHash&tableMask]) | ||||
| 			table[currHash&tableMask] = uint16(s) | ||||
| 			if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { | ||||
| 				nextHash = hash(uint32(x >> 16)) | ||||
| 				s++ | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| emitRemainder: | ||||
| 	if nextEmit < len(src) { | ||||
| 		emitLiteral(dst, src[nextEmit:]) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type tableEntry struct { | ||||
| 	val    uint32 | ||||
| 	offset int32 | ||||
| } | ||||
|  | ||||
| func load3232(b []byte, i int32) uint32 { | ||||
| 	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. | ||||
| 	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 | ||||
| } | ||||
|  | ||||
| func load6432(b []byte, i int32) uint64 { | ||||
| 	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. | ||||
| 	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | | ||||
| 		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 | ||||
| } | ||||
|  | ||||
| // snappyGen maintains the table for matches, | ||||
| // and the previous byte block for level 2. | ||||
| // This is the generic implementation. | ||||
| type snappyGen struct { | ||||
| 	prev []byte | ||||
| 	cur  int32 | ||||
| } | ||||
|  | ||||
| // snappyGen maintains the table for matches, | ||||
| // and the previous byte block for level 2. | ||||
| // This is the generic implementation. | ||||
| type snappyL2 struct { | ||||
| 	snappyGen | ||||
| 	table [tableSize]tableEntry | ||||
| } | ||||
|  | ||||
| // EncodeL2 uses a similar algorithm to level 1, but is capable | ||||
| // of matching across blocks giving better compression at a small slowdown. | ||||
| func (e *snappyL2) Encode(dst *tokens, src []byte) { | ||||
| 	const ( | ||||
| 		inputMargin            = 8 - 1 | ||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||
| 	) | ||||
|  | ||||
| 	// Protect against e.cur wraparound. | ||||
| 	if e.cur > 1<<30 { | ||||
| 		for i := range e.table[:] { | ||||
| 			e.table[i] = tableEntry{} | ||||
| 		} | ||||
| 		e.cur = maxStoreBlockSize | ||||
| 	} | ||||
|  | ||||
| 	// This check isn't in the Snappy implementation, but there, the caller | ||||
| 	// instead of the callee handles this case. | ||||
| 	if len(src) < minNonLiteralBlockSize { | ||||
| 		// We do not fill the token table. | ||||
| 		// This will be picked up by caller. | ||||
| 		dst.n = uint16(len(src)) | ||||
| 		e.cur += maxStoreBlockSize | ||||
| 		e.prev = e.prev[:0] | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// sLimit is when to stop looking for offset/length copies. The inputMargin | ||||
| 	// lets us use a fast path for emitLiteral in the main loop, while we are | ||||
| 	// looking for copies. | ||||
| 	sLimit := int32(len(src) - inputMargin) | ||||
|  | ||||
| 	// nextEmit is where in src the next emitLiteral should start from. | ||||
| 	nextEmit := int32(0) | ||||
| 	s := int32(0) | ||||
| 	cv := load3232(src, s) | ||||
| 	nextHash := hash(cv) | ||||
|  | ||||
| 	for { | ||||
| 		// Copied from the C++ snappy implementation: | ||||
| 		// | ||||
| 		// Heuristic match skipping: If 32 bytes are scanned with no matches | ||||
| 		// found, start looking only at every other byte. If 32 more bytes are | ||||
| 		// scanned (or skipped), look at every third byte, etc.. When a match | ||||
| 		// is found, immediately go back to looking at every byte. This is a | ||||
| 		// small loss (~5% performance, ~0.1% density) for compressible data | ||||
| 		// due to more bookkeeping, but for non-compressible data (such as | ||||
| 		// JPEG) it's a huge win since the compressor quickly "realizes" the | ||||
| 		// data is incompressible and doesn't bother looking for matches | ||||
| 		// everywhere. | ||||
| 		// | ||||
| 		// The "skip" variable keeps track of how many bytes there are since | ||||
| 		// the last match; dividing it by 32 (ie. right-shifting by five) gives | ||||
| 		// the number of bytes to move ahead for each iteration. | ||||
| 		skip := int32(32) | ||||
|  | ||||
| 		nextS := s | ||||
| 		var candidate tableEntry | ||||
| 		for { | ||||
| 			s = nextS | ||||
| 			bytesBetweenHashLookups := skip >> 5 | ||||
| 			nextS = s + bytesBetweenHashLookups | ||||
| 			skip += bytesBetweenHashLookups | ||||
| 			if nextS > sLimit { | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
| 			candidate = e.table[nextHash&tableMask] | ||||
| 			now := load3232(src, nextS) | ||||
| 			e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} | ||||
| 			nextHash = hash(now) | ||||
|  | ||||
| 			offset := s - (candidate.offset - e.cur) | ||||
| 			if offset > maxMatchOffset || cv != candidate.val { | ||||
| 				// Out of range or not matched. | ||||
| 				cv = now | ||||
| 				continue | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		// A 4-byte match has been found. We'll later see if more than 4 bytes | ||||
| 		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | ||||
| 		// them as literal bytes. | ||||
| 		emitLiteral(dst, src[nextEmit:s]) | ||||
|  | ||||
| 		// Call emitCopy, and then see if another emitCopy could be our next | ||||
| 		// move. Repeat until we find no match for the input immediately after | ||||
| 		// what was consumed by the last emitCopy call. | ||||
| 		// | ||||
| 		// If we exit this loop normally then we need to call emitLiteral next, | ||||
| 		// though we don't yet know how big the literal will be. We handle that | ||||
| 		// by proceeding to the next iteration of the main loop. We also can | ||||
| 		// exit this loop via goto if we get close to exhausting the input. | ||||
| 		for { | ||||
| 			// Invariant: we have a 4-byte match at s, and no need to emit any | ||||
| 			// literal bytes prior to s. | ||||
|  | ||||
| 			// Extend the 4-byte match as long as possible. | ||||
| 			// | ||||
| 			s += 4 | ||||
| 			t := candidate.offset - e.cur + 4 | ||||
| 			l := e.matchlen(s, t, src) | ||||
|  | ||||
| 			// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) | ||||
| 			dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) | ||||
| 			dst.n++ | ||||
| 			s += l | ||||
| 			nextEmit = s | ||||
| 			if s >= sLimit { | ||||
| 				t += l | ||||
| 				// Index first pair after match end. | ||||
| 				if int(t+4) < len(src) && t > 0 { | ||||
| 					cv := load3232(src, t) | ||||
| 					e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} | ||||
| 				} | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
|  | ||||
| 			// We could immediately start working at s now, but to improve | ||||
| 			// compression we first update the hash table at s-1 and at s. If | ||||
| 			// another emitCopy is not our next move, also calculate nextHash | ||||
| 			// at s+1. At least on GOARCH=amd64, these three hash calculations | ||||
| 			// are faster as one load64 call (with some shifts) instead of | ||||
| 			// three load32 calls. | ||||
| 			x := load6432(src, s-1) | ||||
| 			prevHash := hash(uint32(x)) | ||||
| 			e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} | ||||
| 			x >>= 8 | ||||
| 			currHash := hash(uint32(x)) | ||||
| 			candidate = e.table[currHash&tableMask] | ||||
| 			e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} | ||||
|  | ||||
| 			offset := s - (candidate.offset - e.cur) | ||||
| 			if offset > maxMatchOffset || uint32(x) != candidate.val { | ||||
| 				cv = uint32(x >> 8) | ||||
| 				nextHash = hash(cv) | ||||
| 				s++ | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| emitRemainder: | ||||
| 	if int(nextEmit) < len(src) { | ||||
| 		emitLiteral(dst, src[nextEmit:]) | ||||
| 	} | ||||
| 	e.cur += int32(len(src)) | ||||
| 	e.prev = e.prev[:len(src)] | ||||
| 	copy(e.prev, src) | ||||
| } | ||||
|  | ||||
| type tableEntryPrev struct { | ||||
| 	Cur  tableEntry | ||||
| 	Prev tableEntry | ||||
| } | ||||
|  | ||||
| // snappyL3 | ||||
| type snappyL3 struct { | ||||
| 	snappyGen | ||||
| 	table [tableSize]tableEntryPrev | ||||
| } | ||||
|  | ||||
| // Encode uses a similar algorithm to level 2, will check up to two candidates. | ||||
| func (e *snappyL3) Encode(dst *tokens, src []byte) { | ||||
| 	const ( | ||||
| 		inputMargin            = 8 - 1 | ||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||
| 	) | ||||
|  | ||||
| 	// Protect against e.cur wraparound. | ||||
| 	if e.cur > 1<<30 { | ||||
| 		for i := range e.table[:] { | ||||
| 			e.table[i] = tableEntryPrev{} | ||||
| 		} | ||||
| 		e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} | ||||
| 	} | ||||
|  | ||||
| 	// This check isn't in the Snappy implementation, but there, the caller | ||||
| 	// instead of the callee handles this case. | ||||
| 	if len(src) < minNonLiteralBlockSize { | ||||
| 		// We do not fill the token table. | ||||
| 		// This will be picked up by caller. | ||||
| 		dst.n = uint16(len(src)) | ||||
| 		e.cur += maxStoreBlockSize | ||||
| 		e.prev = e.prev[:0] | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// sLimit is when to stop looking for offset/length copies. The inputMargin | ||||
| 	// lets us use a fast path for emitLiteral in the main loop, while we are | ||||
| 	// looking for copies. | ||||
| 	sLimit := int32(len(src) - inputMargin) | ||||
|  | ||||
| 	// nextEmit is where in src the next emitLiteral should start from. | ||||
| 	nextEmit := int32(0) | ||||
| 	s := int32(0) | ||||
| 	cv := load3232(src, s) | ||||
| 	nextHash := hash(cv) | ||||
|  | ||||
| 	for { | ||||
| 		// Copied from the C++ snappy implementation: | ||||
| 		// | ||||
| 		// Heuristic match skipping: If 32 bytes are scanned with no matches | ||||
| 		// found, start looking only at every other byte. If 32 more bytes are | ||||
| 		// scanned (or skipped), look at every third byte, etc.. When a match | ||||
| 		// is found, immediately go back to looking at every byte. This is a | ||||
| 		// small loss (~5% performance, ~0.1% density) for compressible data | ||||
| 		// due to more bookkeeping, but for non-compressible data (such as | ||||
| 		// JPEG) it's a huge win since the compressor quickly "realizes" the | ||||
| 		// data is incompressible and doesn't bother looking for matches | ||||
| 		// everywhere. | ||||
| 		// | ||||
| 		// The "skip" variable keeps track of how many bytes there are since | ||||
| 		// the last match; dividing it by 32 (ie. right-shifting by five) gives | ||||
| 		// the number of bytes to move ahead for each iteration. | ||||
| 		skip := int32(32) | ||||
|  | ||||
| 		nextS := s | ||||
| 		var candidate tableEntry | ||||
| 		for { | ||||
| 			s = nextS | ||||
| 			bytesBetweenHashLookups := skip >> 5 | ||||
| 			nextS = s + bytesBetweenHashLookups | ||||
| 			skip += bytesBetweenHashLookups | ||||
| 			if nextS > sLimit { | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
| 			candidates := e.table[nextHash&tableMask] | ||||
| 			now := load3232(src, nextS) | ||||
| 			e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} | ||||
| 			nextHash = hash(now) | ||||
|  | ||||
| 			// Check both candidates | ||||
| 			candidate = candidates.Cur | ||||
| 			if cv == candidate.val { | ||||
| 				offset := s - (candidate.offset - e.cur) | ||||
| 				if offset <= maxMatchOffset { | ||||
| 					break | ||||
| 				} | ||||
| 			} else { | ||||
| 				// We only check if value mismatches. | ||||
| 				// Offset will always be invalid in other cases. | ||||
| 				candidate = candidates.Prev | ||||
| 				if cv == candidate.val { | ||||
| 					offset := s - (candidate.offset - e.cur) | ||||
| 					if offset <= maxMatchOffset { | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			cv = now | ||||
| 		} | ||||
|  | ||||
| 		// A 4-byte match has been found. We'll later see if more than 4 bytes | ||||
| 		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | ||||
| 		// them as literal bytes. | ||||
| 		emitLiteral(dst, src[nextEmit:s]) | ||||
|  | ||||
| 		// Call emitCopy, and then see if another emitCopy could be our next | ||||
| 		// move. Repeat until we find no match for the input immediately after | ||||
| 		// what was consumed by the last emitCopy call. | ||||
| 		// | ||||
| 		// If we exit this loop normally then we need to call emitLiteral next, | ||||
| 		// though we don't yet know how big the literal will be. We handle that | ||||
| 		// by proceeding to the next iteration of the main loop. We also can | ||||
| 		// exit this loop via goto if we get close to exhausting the input. | ||||
| 		for { | ||||
| 			// Invariant: we have a 4-byte match at s, and no need to emit any | ||||
| 			// literal bytes prior to s. | ||||
|  | ||||
| 			// Extend the 4-byte match as long as possible. | ||||
| 			// | ||||
| 			s += 4 | ||||
| 			t := candidate.offset - e.cur + 4 | ||||
| 			l := e.matchlen(s, t, src) | ||||
|  | ||||
| 			// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) | ||||
| 			dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) | ||||
| 			dst.n++ | ||||
| 			s += l | ||||
| 			nextEmit = s | ||||
| 			if s >= sLimit { | ||||
| 				t += l | ||||
| 				// Index first pair after match end. | ||||
| 				if int(t+4) < len(src) && t > 0 { | ||||
| 					cv := load3232(src, t) | ||||
| 					nextHash = hash(cv) | ||||
| 					e.table[nextHash&tableMask] = tableEntryPrev{ | ||||
| 						Prev: e.table[nextHash&tableMask].Cur, | ||||
| 						Cur:  tableEntry{offset: e.cur + t, val: cv}, | ||||
| 					} | ||||
| 				} | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
|  | ||||
| 			// We could immediately start working at s now, but to improve | ||||
| 			// compression we first update the hash table at s-3 to s. If | ||||
| 			// another emitCopy is not our next move, also calculate nextHash | ||||
| 			// at s+1. At least on GOARCH=amd64, these three hash calculations | ||||
| 			// are faster as one load64 call (with some shifts) instead of | ||||
| 			// three load32 calls. | ||||
| 			x := load6432(src, s-3) | ||||
| 			prevHash := hash(uint32(x)) | ||||
| 			e.table[prevHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: e.table[prevHash&tableMask].Cur, | ||||
| 				Cur:  tableEntry{offset: e.cur + s - 3, val: uint32(x)}, | ||||
| 			} | ||||
| 			x >>= 8 | ||||
| 			prevHash = hash(uint32(x)) | ||||
|  | ||||
| 			e.table[prevHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: e.table[prevHash&tableMask].Cur, | ||||
| 				Cur:  tableEntry{offset: e.cur + s - 2, val: uint32(x)}, | ||||
| 			} | ||||
| 			x >>= 8 | ||||
| 			prevHash = hash(uint32(x)) | ||||
|  | ||||
| 			e.table[prevHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: e.table[prevHash&tableMask].Cur, | ||||
| 				Cur:  tableEntry{offset: e.cur + s - 1, val: uint32(x)}, | ||||
| 			} | ||||
| 			x >>= 8 | ||||
| 			currHash := hash(uint32(x)) | ||||
| 			candidates := e.table[currHash&tableMask] | ||||
| 			cv = uint32(x) | ||||
| 			e.table[currHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: candidates.Cur, | ||||
| 				Cur:  tableEntry{offset: s + e.cur, val: cv}, | ||||
| 			} | ||||
|  | ||||
| 			// Check both candidates | ||||
| 			candidate = candidates.Cur | ||||
| 			if cv == candidate.val { | ||||
| 				offset := s - (candidate.offset - e.cur) | ||||
| 				if offset <= maxMatchOffset { | ||||
| 					continue | ||||
| 				} | ||||
| 			} else { | ||||
| 				// We only check if value mismatches. | ||||
| 				// Offset will always be invalid in other cases. | ||||
| 				candidate = candidates.Prev | ||||
| 				if cv == candidate.val { | ||||
| 					offset := s - (candidate.offset - e.cur) | ||||
| 					if offset <= maxMatchOffset { | ||||
| 						continue | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			cv = uint32(x >> 8) | ||||
| 			nextHash = hash(cv) | ||||
| 			s++ | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| emitRemainder: | ||||
| 	if int(nextEmit) < len(src) { | ||||
| 		emitLiteral(dst, src[nextEmit:]) | ||||
| 	} | ||||
| 	e.cur += int32(len(src)) | ||||
| 	e.prev = e.prev[:len(src)] | ||||
| 	copy(e.prev, src) | ||||
| } | ||||
|  | ||||
| // snappyL4 | ||||
| type snappyL4 struct { | ||||
| 	snappyL3 | ||||
| } | ||||
|  | ||||
| // Encode uses a similar algorithm to level 3, | ||||
| // but will check up to two candidates if first isn't long enough. | ||||
| func (e *snappyL4) Encode(dst *tokens, src []byte) { | ||||
| 	const ( | ||||
| 		inputMargin            = 8 - 3 | ||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||
| 		matchLenGood           = 12 | ||||
| 	) | ||||
|  | ||||
| 	// Protect against e.cur wraparound. | ||||
| 	if e.cur > 1<<30 { | ||||
| 		for i := range e.table[:] { | ||||
| 			e.table[i] = tableEntryPrev{} | ||||
| 		} | ||||
| 		e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} | ||||
| 	} | ||||
|  | ||||
| 	// This check isn't in the Snappy implementation, but there, the caller | ||||
| 	// instead of the callee handles this case. | ||||
| 	if len(src) < minNonLiteralBlockSize { | ||||
| 		// We do not fill the token table. | ||||
| 		// This will be picked up by caller. | ||||
| 		dst.n = uint16(len(src)) | ||||
| 		e.cur += maxStoreBlockSize | ||||
| 		e.prev = e.prev[:0] | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// sLimit is when to stop looking for offset/length copies. The inputMargin | ||||
| 	// lets us use a fast path for emitLiteral in the main loop, while we are | ||||
| 	// looking for copies. | ||||
| 	sLimit := int32(len(src) - inputMargin) | ||||
|  | ||||
| 	// nextEmit is where in src the next emitLiteral should start from. | ||||
| 	nextEmit := int32(0) | ||||
| 	s := int32(0) | ||||
| 	cv := load3232(src, s) | ||||
| 	nextHash := hash(cv) | ||||
|  | ||||
| 	for { | ||||
| 		// Copied from the C++ snappy implementation: | ||||
| 		// | ||||
| 		// Heuristic match skipping: If 32 bytes are scanned with no matches | ||||
| 		// found, start looking only at every other byte. If 32 more bytes are | ||||
| 		// scanned (or skipped), look at every third byte, etc.. When a match | ||||
| 		// is found, immediately go back to looking at every byte. This is a | ||||
| 		// small loss (~5% performance, ~0.1% density) for compressible data | ||||
| 		// due to more bookkeeping, but for non-compressible data (such as | ||||
| 		// JPEG) it's a huge win since the compressor quickly "realizes" the | ||||
| 		// data is incompressible and doesn't bother looking for matches | ||||
| 		// everywhere. | ||||
| 		// | ||||
| 		// The "skip" variable keeps track of how many bytes there are since | ||||
| 		// the last match; dividing it by 32 (ie. right-shifting by five) gives | ||||
| 		// the number of bytes to move ahead for each iteration. | ||||
| 		skip := int32(32) | ||||
|  | ||||
| 		nextS := s | ||||
| 		var candidate tableEntry | ||||
| 		var candidateAlt tableEntry | ||||
| 		for { | ||||
| 			s = nextS | ||||
| 			bytesBetweenHashLookups := skip >> 5 | ||||
| 			nextS = s + bytesBetweenHashLookups | ||||
| 			skip += bytesBetweenHashLookups | ||||
| 			if nextS > sLimit { | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
| 			candidates := e.table[nextHash&tableMask] | ||||
| 			now := load3232(src, nextS) | ||||
| 			e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} | ||||
| 			nextHash = hash(now) | ||||
|  | ||||
| 			// Check both candidates | ||||
| 			candidate = candidates.Cur | ||||
| 			if cv == candidate.val { | ||||
| 				offset := s - (candidate.offset - e.cur) | ||||
| 				if offset < maxMatchOffset { | ||||
| 					offset = s - (candidates.Prev.offset - e.cur) | ||||
| 					if cv == candidates.Prev.val && offset < maxMatchOffset { | ||||
| 						candidateAlt = candidates.Prev | ||||
| 					} | ||||
| 					break | ||||
| 				} | ||||
| 			} else { | ||||
| 				// We only check if value mismatches. | ||||
| 				// Offset will always be invalid in other cases. | ||||
| 				candidate = candidates.Prev | ||||
| 				if cv == candidate.val { | ||||
| 					offset := s - (candidate.offset - e.cur) | ||||
| 					if offset < maxMatchOffset { | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			cv = now | ||||
| 		} | ||||
|  | ||||
| 		// A 4-byte match has been found. We'll later see if more than 4 bytes | ||||
| 		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit | ||||
| 		// them as literal bytes. | ||||
| 		emitLiteral(dst, src[nextEmit:s]) | ||||
|  | ||||
| 		// Call emitCopy, and then see if another emitCopy could be our next | ||||
| 		// move. Repeat until we find no match for the input immediately after | ||||
| 		// what was consumed by the last emitCopy call. | ||||
| 		// | ||||
| 		// If we exit this loop normally then we need to call emitLiteral next, | ||||
| 		// though we don't yet know how big the literal will be. We handle that | ||||
| 		// by proceeding to the next iteration of the main loop. We also can | ||||
| 		// exit this loop via goto if we get close to exhausting the input. | ||||
| 		for { | ||||
| 			// Invariant: we have a 4-byte match at s, and no need to emit any | ||||
| 			// literal bytes prior to s. | ||||
|  | ||||
| 			// Extend the 4-byte match as long as possible. | ||||
| 			// | ||||
| 			s += 4 | ||||
| 			t := candidate.offset - e.cur + 4 | ||||
| 			l := e.matchlen(s, t, src) | ||||
| 			// Try alternative candidate if match length < matchLenGood. | ||||
| 			if l < matchLenGood-4 && candidateAlt.offset != 0 { | ||||
| 				t2 := candidateAlt.offset - e.cur + 4 | ||||
| 				l2 := e.matchlen(s, t2, src) | ||||
| 				if l2 > l { | ||||
| 					l = l2 | ||||
| 					t = t2 | ||||
| 				} | ||||
| 			} | ||||
| 			// matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) | ||||
| 			dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) | ||||
| 			dst.n++ | ||||
| 			s += l | ||||
| 			nextEmit = s | ||||
| 			if s >= sLimit { | ||||
| 				t += l | ||||
| 				// Index first pair after match end. | ||||
| 				if int(t+4) < len(src) && t > 0 { | ||||
| 					cv := load3232(src, t) | ||||
| 					nextHash = hash(cv) | ||||
| 					e.table[nextHash&tableMask] = tableEntryPrev{ | ||||
| 						Prev: e.table[nextHash&tableMask].Cur, | ||||
| 						Cur:  tableEntry{offset: e.cur + t, val: cv}, | ||||
| 					} | ||||
| 				} | ||||
| 				goto emitRemainder | ||||
| 			} | ||||
|  | ||||
| 			// We could immediately start working at s now, but to improve | ||||
| 			// compression we first update the hash table at s-3 to s. If | ||||
| 			// another emitCopy is not our next move, also calculate nextHash | ||||
| 			// at s+1. At least on GOARCH=amd64, these three hash calculations | ||||
| 			// are faster as one load64 call (with some shifts) instead of | ||||
| 			// three load32 calls. | ||||
| 			x := load6432(src, s-3) | ||||
| 			prevHash := hash(uint32(x)) | ||||
| 			e.table[prevHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: e.table[prevHash&tableMask].Cur, | ||||
| 				Cur:  tableEntry{offset: e.cur + s - 3, val: uint32(x)}, | ||||
| 			} | ||||
| 			x >>= 8 | ||||
| 			prevHash = hash(uint32(x)) | ||||
|  | ||||
| 			e.table[prevHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: e.table[prevHash&tableMask].Cur, | ||||
| 				Cur:  tableEntry{offset: e.cur + s - 2, val: uint32(x)}, | ||||
| 			} | ||||
| 			x >>= 8 | ||||
| 			prevHash = hash(uint32(x)) | ||||
|  | ||||
| 			e.table[prevHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: e.table[prevHash&tableMask].Cur, | ||||
| 				Cur:  tableEntry{offset: e.cur + s - 1, val: uint32(x)}, | ||||
| 			} | ||||
| 			x >>= 8 | ||||
| 			currHash := hash(uint32(x)) | ||||
| 			candidates := e.table[currHash&tableMask] | ||||
| 			cv = uint32(x) | ||||
| 			e.table[currHash&tableMask] = tableEntryPrev{ | ||||
| 				Prev: candidates.Cur, | ||||
| 				Cur:  tableEntry{offset: s + e.cur, val: cv}, | ||||
| 			} | ||||
|  | ||||
| 			// Check both candidates | ||||
| 			candidate = candidates.Cur | ||||
| 			candidateAlt = tableEntry{} | ||||
| 			if cv == candidate.val { | ||||
| 				offset := s - (candidate.offset - e.cur) | ||||
| 				if offset <= maxMatchOffset { | ||||
| 					offset = s - (candidates.Prev.offset - e.cur) | ||||
| 					if cv == candidates.Prev.val && offset <= maxMatchOffset { | ||||
| 						candidateAlt = candidates.Prev | ||||
| 					} | ||||
| 					continue | ||||
| 				} | ||||
| 			} else { | ||||
| 				// We only check if value mismatches. | ||||
| 				// Offset will always be invalid in other cases. | ||||
| 				candidate = candidates.Prev | ||||
| 				if cv == candidate.val { | ||||
| 					offset := s - (candidate.offset - e.cur) | ||||
| 					if offset <= maxMatchOffset { | ||||
| 						continue | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			cv = uint32(x >> 8) | ||||
| 			nextHash = hash(cv) | ||||
| 			s++ | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| emitRemainder: | ||||
| 	if int(nextEmit) < len(src) { | ||||
| 		emitLiteral(dst, src[nextEmit:]) | ||||
| 	} | ||||
| 	e.cur += int32(len(src)) | ||||
| 	e.prev = e.prev[:len(src)] | ||||
| 	copy(e.prev, src) | ||||
| } | ||||
|  | ||||
| func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { | ||||
| 	s1 := int(s) + maxMatchLength - 4 | ||||
| 	if s1 > len(src) { | ||||
| 		s1 = len(src) | ||||
| 	} | ||||
|  | ||||
| 	// If we are inside the current block | ||||
| 	if t >= 0 { | ||||
| 		b := src[t:] | ||||
| 		a := src[s:s1] | ||||
| 		b = b[:len(a)] | ||||
| 		// Extend the match to be as long as possible. | ||||
| 		for i := range a { | ||||
| 			if a[i] != b[i] { | ||||
| 				return int32(i) | ||||
| 			} | ||||
| 		} | ||||
| 		return int32(len(a)) | ||||
| 	} | ||||
|  | ||||
| 	// We found a match in the previous block. | ||||
| 	tp := int32(len(e.prev)) + t | ||||
| 	if tp < 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	// Extend the match to be as long as possible. | ||||
| 	a := src[s:s1] | ||||
| 	b := e.prev[tp:] | ||||
| 	if len(b) > len(a) { | ||||
| 		b = b[:len(a)] | ||||
| 	} | ||||
| 	a = a[:len(b)] | ||||
| 	for i := range b { | ||||
| 		if a[i] != b[i] { | ||||
| 			return int32(i) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// If we reached our limit, we matched everything we are | ||||
| 	// allowed to in the previous block and we return. | ||||
| 	n := int32(len(b)) | ||||
| 	if int(s+n) == s1 { | ||||
| 		return n | ||||
| 	} | ||||
|  | ||||
| 	// Continue looking for more matches in the current block. | ||||
| 	a = src[s+n : s1] | ||||
| 	b = src[:len(a)] | ||||
| 	for i := range a { | ||||
| 		if a[i] != b[i] { | ||||
| 			return int32(i) + n | ||||
| 		} | ||||
| 	} | ||||
| 	return int32(len(a)) + n | ||||
| } | ||||
|  | ||||
| // Reset the encoding table. | ||||
| func (e *snappyGen) Reset() { | ||||
| 	e.prev = e.prev[:0] | ||||
| 	e.cur += maxMatchOffset | ||||
| } | ||||
							
								
								
									
										115
									
								
								vendor/github.com/klauspost/compress/flate/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										115
									
								
								vendor/github.com/klauspost/compress/flate/token.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,115 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package flate | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| const ( | ||||
| 	// 2 bits:   type   0 = literal  1=EOF  2=Match   3=Unused | ||||
| 	// 8 bits:   xlength = length - MIN_MATCH_LENGTH | ||||
| 	// 22 bits   xoffset = offset - MIN_OFFSET_SIZE, or literal | ||||
| 	lengthShift = 22 | ||||
| 	offsetMask  = 1<<lengthShift - 1 | ||||
| 	typeMask    = 3 << 30 | ||||
| 	literalType = 0 << 30 | ||||
| 	matchType   = 1 << 30 | ||||
| ) | ||||
|  | ||||
| // The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH) | ||||
| // is lengthCodes[length - MIN_MATCH_LENGTH] | ||||
| var lengthCodes = [...]uint32{ | ||||
| 	0, 1, 2, 3, 4, 5, 6, 7, 8, 8, | ||||
| 	9, 9, 10, 10, 11, 11, 12, 12, 12, 12, | ||||
| 	13, 13, 13, 13, 14, 14, 14, 14, 15, 15, | ||||
| 	15, 15, 16, 16, 16, 16, 16, 16, 16, 16, | ||||
| 	17, 17, 17, 17, 17, 17, 17, 17, 18, 18, | ||||
| 	18, 18, 18, 18, 18, 18, 19, 19, 19, 19, | ||||
| 	19, 19, 19, 19, 20, 20, 20, 20, 20, 20, | ||||
| 	20, 20, 20, 20, 20, 20, 20, 20, 20, 20, | ||||
| 	21, 21, 21, 21, 21, 21, 21, 21, 21, 21, | ||||
| 	21, 21, 21, 21, 21, 21, 22, 22, 22, 22, | ||||
| 	22, 22, 22, 22, 22, 22, 22, 22, 22, 22, | ||||
| 	22, 22, 23, 23, 23, 23, 23, 23, 23, 23, | ||||
| 	23, 23, 23, 23, 23, 23, 23, 23, 24, 24, | ||||
| 	24, 24, 24, 24, 24, 24, 24, 24, 24, 24, | ||||
| 	24, 24, 24, 24, 24, 24, 24, 24, 24, 24, | ||||
| 	24, 24, 24, 24, 24, 24, 24, 24, 24, 24, | ||||
| 	25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | ||||
| 	25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | ||||
| 	25, 25, 25, 25, 25, 25, 25, 25, 25, 25, | ||||
| 	25, 25, 26, 26, 26, 26, 26, 26, 26, 26, | ||||
| 	26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | ||||
| 	26, 26, 26, 26, 26, 26, 26, 26, 26, 26, | ||||
| 	26, 26, 26, 26, 27, 27, 27, 27, 27, 27, | ||||
| 	27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | ||||
| 	27, 27, 27, 27, 27, 27, 27, 27, 27, 27, | ||||
| 	27, 27, 27, 27, 27, 28, | ||||
| } | ||||
|  | ||||
| var offsetCodes = [...]uint32{ | ||||
| 	0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, | ||||
| 	8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, | ||||
| 	10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, | ||||
| 	11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, | ||||
| 	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, | ||||
| 	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, | ||||
| 	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, | ||||
| 	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, | ||||
| 	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | ||||
| 	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | ||||
| 	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | ||||
| 	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, | ||||
| 	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | ||||
| 	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | ||||
| 	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | ||||
| 	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, | ||||
| } | ||||
|  | ||||
| type token uint32 | ||||
|  | ||||
| type tokens struct { | ||||
| 	tokens [maxStoreBlockSize + 1]token | ||||
| 	n      uint16 // Must be able to contain maxStoreBlockSize | ||||
| } | ||||
|  | ||||
| // Convert a literal into a literal token. | ||||
| func literalToken(literal uint32) token { return token(literalType + literal) } | ||||
|  | ||||
| // Convert a < xlength, xoffset > pair into a match token. | ||||
| func matchToken(xlength uint32, xoffset uint32) token { | ||||
| 	return token(matchType + xlength<<lengthShift + xoffset) | ||||
| } | ||||
|  | ||||
| func matchTokend(xlength uint32, xoffset uint32) token { | ||||
| 	if xlength > maxMatchLength || xoffset > maxMatchOffset { | ||||
| 		panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset)) | ||||
| 		return token(matchType) | ||||
| 	} | ||||
| 	return token(matchType + xlength<<lengthShift + xoffset) | ||||
| } | ||||
|  | ||||
| // Returns the type of a token | ||||
| func (t token) typ() uint32 { return uint32(t) & typeMask } | ||||
|  | ||||
| // Returns the literal of a literal token | ||||
| func (t token) literal() uint32 { return uint32(t - literalType) } | ||||
|  | ||||
| // Returns the extra offset of a match token | ||||
| func (t token) offset() uint32 { return uint32(t) & offsetMask } | ||||
|  | ||||
| func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) } | ||||
|  | ||||
| func lengthCode(len uint32) uint32 { return lengthCodes[len] } | ||||
|  | ||||
| // Returns the offset code corresponding to a specific offset | ||||
| func offsetCode(off uint32) uint32 { | ||||
| 	if off < uint32(len(offsetCodes)) { | ||||
| 		return offsetCodes[off] | ||||
| 	} else if off>>7 < uint32(len(offsetCodes)) { | ||||
| 		return offsetCodes[off>>7] + 14 | ||||
| 	} else { | ||||
| 		return offsetCodes[off>>14] + 28 | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										344
									
								
								vendor/github.com/klauspost/compress/gzip/gunzip.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										344
									
								
								vendor/github.com/klauspost/compress/gzip/gunzip.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,344 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package gzip implements reading and writing of gzip format compressed files, | ||||
| // as specified in RFC 1952. | ||||
| package gzip | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/binary" | ||||
| 	"errors" | ||||
| 	"hash/crc32" | ||||
| 	"io" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/klauspost/compress/flate" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	gzipID1     = 0x1f | ||||
| 	gzipID2     = 0x8b | ||||
| 	gzipDeflate = 8 | ||||
| 	flagText    = 1 << 0 | ||||
| 	flagHdrCrc  = 1 << 1 | ||||
| 	flagExtra   = 1 << 2 | ||||
| 	flagName    = 1 << 3 | ||||
| 	flagComment = 1 << 4 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// ErrChecksum is returned when reading GZIP data that has an invalid checksum. | ||||
| 	ErrChecksum = errors.New("gzip: invalid checksum") | ||||
| 	// ErrHeader is returned when reading GZIP data that has an invalid header. | ||||
| 	ErrHeader = errors.New("gzip: invalid header") | ||||
| ) | ||||
|  | ||||
| var le = binary.LittleEndian | ||||
|  | ||||
| // noEOF converts io.EOF to io.ErrUnexpectedEOF. | ||||
| func noEOF(err error) error { | ||||
| 	if err == io.EOF { | ||||
| 		return io.ErrUnexpectedEOF | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // The gzip file stores a header giving metadata about the compressed file. | ||||
| // That header is exposed as the fields of the Writer and Reader structs. | ||||
| // | ||||
| // Strings must be UTF-8 encoded and may only contain Unicode code points | ||||
| // U+0001 through U+00FF, due to limitations of the GZIP file format. | ||||
| type Header struct { | ||||
| 	Comment string    // comment | ||||
| 	Extra   []byte    // "extra data" | ||||
| 	ModTime time.Time // modification time | ||||
| 	Name    string    // file name | ||||
| 	OS      byte      // operating system type | ||||
| } | ||||
|  | ||||
| // A Reader is an io.Reader that can be read to retrieve | ||||
| // uncompressed data from a gzip-format compressed file. | ||||
| // | ||||
| // In general, a gzip file can be a concatenation of gzip files, | ||||
| // each with its own header. Reads from the Reader | ||||
| // return the concatenation of the uncompressed data of each. | ||||
| // Only the first header is recorded in the Reader fields. | ||||
| // | ||||
| // Gzip files store a length and checksum of the uncompressed data. | ||||
| // The Reader will return a ErrChecksum when Read | ||||
| // reaches the end of the uncompressed data if it does not | ||||
| // have the expected length or checksum. Clients should treat data | ||||
| // returned by Read as tentative until they receive the io.EOF | ||||
| // marking the end of the data. | ||||
| type Reader struct { | ||||
| 	Header       // valid after NewReader or Reader.Reset | ||||
| 	r            flate.Reader | ||||
| 	decompressor io.ReadCloser | ||||
| 	digest       uint32 // CRC-32, IEEE polynomial (section 8) | ||||
| 	size         uint32 // Uncompressed size (section 2.3.1) | ||||
| 	buf          [512]byte | ||||
| 	err          error | ||||
| 	multistream  bool | ||||
| } | ||||
|  | ||||
| // NewReader creates a new Reader reading the given reader. | ||||
| // If r does not also implement io.ByteReader, | ||||
| // the decompressor may read more data than necessary from r. | ||||
| // | ||||
| // It is the caller's responsibility to call Close on the Reader when done. | ||||
| // | ||||
| // The Reader.Header fields will be valid in the Reader returned. | ||||
| func NewReader(r io.Reader) (*Reader, error) { | ||||
| 	z := new(Reader) | ||||
| 	if err := z.Reset(r); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return z, nil | ||||
| } | ||||
|  | ||||
| // Reset discards the Reader z's state and makes it equivalent to the | ||||
| // result of its original state from NewReader, but reading from r instead. | ||||
| // This permits reusing a Reader rather than allocating a new one. | ||||
| func (z *Reader) Reset(r io.Reader) error { | ||||
| 	*z = Reader{ | ||||
| 		decompressor: z.decompressor, | ||||
| 		multistream:  true, | ||||
| 	} | ||||
| 	if rr, ok := r.(flate.Reader); ok { | ||||
| 		z.r = rr | ||||
| 	} else { | ||||
| 		z.r = bufio.NewReader(r) | ||||
| 	} | ||||
| 	z.Header, z.err = z.readHeader() | ||||
| 	return z.err | ||||
| } | ||||
|  | ||||
| // Multistream controls whether the reader supports multistream files. | ||||
| // | ||||
| // If enabled (the default), the Reader expects the input to be a sequence | ||||
| // of individually gzipped data streams, each with its own header and | ||||
| // trailer, ending at EOF. The effect is that the concatenation of a sequence | ||||
| // of gzipped files is treated as equivalent to the gzip of the concatenation | ||||
| // of the sequence. This is standard behavior for gzip readers. | ||||
| // | ||||
| // Calling Multistream(false) disables this behavior; disabling the behavior | ||||
| // can be useful when reading file formats that distinguish individual gzip | ||||
| // data streams or mix gzip data streams with other data streams. | ||||
| // In this mode, when the Reader reaches the end of the data stream, | ||||
| // Read returns io.EOF. If the underlying reader implements io.ByteReader, | ||||
| // it will be left positioned just after the gzip stream. | ||||
| // To start the next stream, call z.Reset(r) followed by z.Multistream(false). | ||||
| // If there is no next stream, z.Reset(r) will return io.EOF. | ||||
| func (z *Reader) Multistream(ok bool) { | ||||
| 	z.multistream = ok | ||||
| } | ||||
|  | ||||
| // readString reads a NUL-terminated string from z.r. | ||||
| // It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and | ||||
| // will output a string encoded using UTF-8. | ||||
| // This method always updates z.digest with the data read. | ||||
| func (z *Reader) readString() (string, error) { | ||||
| 	var err error | ||||
| 	needConv := false | ||||
| 	for i := 0; ; i++ { | ||||
| 		if i >= len(z.buf) { | ||||
| 			return "", ErrHeader | ||||
| 		} | ||||
| 		z.buf[i], err = z.r.ReadByte() | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 		if z.buf[i] > 0x7f { | ||||
| 			needConv = true | ||||
| 		} | ||||
| 		if z.buf[i] == 0 { | ||||
| 			// Digest covers the NUL terminator. | ||||
| 			z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) | ||||
|  | ||||
| 			// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). | ||||
| 			if needConv { | ||||
| 				s := make([]rune, 0, i) | ||||
| 				for _, v := range z.buf[:i] { | ||||
| 					s = append(s, rune(v)) | ||||
| 				} | ||||
| 				return string(s), nil | ||||
| 			} | ||||
| 			return string(z.buf[:i]), nil | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // readHeader reads the GZIP header according to section 2.3.1. | ||||
| // This method does not set z.err. | ||||
| func (z *Reader) readHeader() (hdr Header, err error) { | ||||
| 	if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { | ||||
| 		// RFC 1952, section 2.2, says the following: | ||||
| 		//	A gzip file consists of a series of "members" (compressed data sets). | ||||
| 		// | ||||
| 		// Other than this, the specification does not clarify whether a | ||||
| 		// "series" is defined as "one or more" or "zero or more". To err on the | ||||
| 		// side of caution, Go interprets this to mean "zero or more". | ||||
| 		// Thus, it is okay to return io.EOF here. | ||||
| 		return hdr, err | ||||
| 	} | ||||
| 	if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { | ||||
| 		return hdr, ErrHeader | ||||
| 	} | ||||
| 	flg := z.buf[3] | ||||
| 	hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) | ||||
| 	// z.buf[8] is XFL and is currently ignored. | ||||
| 	hdr.OS = z.buf[9] | ||||
| 	z.digest = crc32.ChecksumIEEE(z.buf[:10]) | ||||
|  | ||||
| 	if flg&flagExtra != 0 { | ||||
| 		if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { | ||||
| 			return hdr, noEOF(err) | ||||
| 		} | ||||
| 		z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) | ||||
| 		data := make([]byte, le.Uint16(z.buf[:2])) | ||||
| 		if _, err = io.ReadFull(z.r, data); err != nil { | ||||
| 			return hdr, noEOF(err) | ||||
| 		} | ||||
| 		z.digest = crc32.Update(z.digest, crc32.IEEETable, data) | ||||
| 		hdr.Extra = data | ||||
| 	} | ||||
|  | ||||
| 	var s string | ||||
| 	if flg&flagName != 0 { | ||||
| 		if s, err = z.readString(); err != nil { | ||||
| 			return hdr, err | ||||
| 		} | ||||
| 		hdr.Name = s | ||||
| 	} | ||||
|  | ||||
| 	if flg&flagComment != 0 { | ||||
| 		if s, err = z.readString(); err != nil { | ||||
| 			return hdr, err | ||||
| 		} | ||||
| 		hdr.Comment = s | ||||
| 	} | ||||
|  | ||||
| 	if flg&flagHdrCrc != 0 { | ||||
| 		if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { | ||||
| 			return hdr, noEOF(err) | ||||
| 		} | ||||
| 		digest := le.Uint16(z.buf[:2]) | ||||
| 		if digest != uint16(z.digest) { | ||||
| 			return hdr, ErrHeader | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	z.digest = 0 | ||||
| 	if z.decompressor == nil { | ||||
| 		z.decompressor = flate.NewReader(z.r) | ||||
| 	} else { | ||||
| 		z.decompressor.(flate.Resetter).Reset(z.r, nil) | ||||
| 	} | ||||
| 	return hdr, nil | ||||
| } | ||||
|  | ||||
| // Read implements io.Reader, reading uncompressed bytes from its underlying Reader. | ||||
| func (z *Reader) Read(p []byte) (n int, err error) { | ||||
| 	if z.err != nil { | ||||
| 		return 0, z.err | ||||
| 	} | ||||
|  | ||||
| 	n, z.err = z.decompressor.Read(p) | ||||
| 	z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) | ||||
| 	z.size += uint32(n) | ||||
| 	if z.err != io.EOF { | ||||
| 		// In the normal case we return here. | ||||
| 		return n, z.err | ||||
| 	} | ||||
|  | ||||
| 	// Finished file; check checksum and size. | ||||
| 	if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { | ||||
| 		z.err = noEOF(err) | ||||
| 		return n, z.err | ||||
| 	} | ||||
| 	digest := le.Uint32(z.buf[:4]) | ||||
| 	size := le.Uint32(z.buf[4:8]) | ||||
| 	if digest != z.digest || size != z.size { | ||||
| 		z.err = ErrChecksum | ||||
| 		return n, z.err | ||||
| 	} | ||||
| 	z.digest, z.size = 0, 0 | ||||
|  | ||||
| 	// File is ok; check if there is another. | ||||
| 	if !z.multistream { | ||||
| 		return n, io.EOF | ||||
| 	} | ||||
| 	z.err = nil // Remove io.EOF | ||||
|  | ||||
| 	if _, z.err = z.readHeader(); z.err != nil { | ||||
| 		return n, z.err | ||||
| 	} | ||||
|  | ||||
| 	// Read from next file, if necessary. | ||||
| 	if n > 0 { | ||||
| 		return n, nil | ||||
| 	} | ||||
| 	return z.Read(p) | ||||
| } | ||||
|  | ||||
| // Support the io.WriteTo interface for io.Copy and friends. | ||||
| func (z *Reader) WriteTo(w io.Writer) (int64, error) { | ||||
| 	total := int64(0) | ||||
| 	crcWriter := crc32.NewIEEE() | ||||
| 	for { | ||||
| 		if z.err != nil { | ||||
| 			if z.err == io.EOF { | ||||
| 				return total, nil | ||||
| 			} | ||||
| 			return total, z.err | ||||
| 		} | ||||
|  | ||||
| 		// We write both to output and digest. | ||||
| 		mw := io.MultiWriter(w, crcWriter) | ||||
| 		n, err := z.decompressor.(io.WriterTo).WriteTo(mw) | ||||
| 		total += n | ||||
| 		z.size += uint32(n) | ||||
| 		if err != nil { | ||||
| 			z.err = err | ||||
| 			return total, z.err | ||||
| 		} | ||||
|  | ||||
| 		// Finished file; check checksum + size. | ||||
| 		if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { | ||||
| 			if err == io.EOF { | ||||
| 				err = io.ErrUnexpectedEOF | ||||
| 			} | ||||
| 			z.err = err | ||||
| 			return total, err | ||||
| 		} | ||||
| 		z.digest = crcWriter.Sum32() | ||||
| 		digest := le.Uint32(z.buf[:4]) | ||||
| 		size := le.Uint32(z.buf[4:8]) | ||||
| 		if digest != z.digest || size != z.size { | ||||
| 			z.err = ErrChecksum | ||||
| 			return total, z.err | ||||
| 		} | ||||
| 		z.digest, z.size = 0, 0 | ||||
|  | ||||
| 		// File is ok; check if there is another. | ||||
| 		if !z.multistream { | ||||
| 			return total, nil | ||||
| 		} | ||||
| 		crcWriter.Reset() | ||||
| 		z.err = nil // Remove io.EOF | ||||
|  | ||||
| 		if _, z.err = z.readHeader(); z.err != nil { | ||||
| 			if z.err == io.EOF { | ||||
| 				return total, nil | ||||
| 			} | ||||
| 			return total, z.err | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Close closes the Reader. It does not close the underlying io.Reader. | ||||
| // In order for the GZIP checksum to be verified, the reader must be | ||||
| // fully consumed until the io.EOF. | ||||
| func (z *Reader) Close() error { return z.decompressor.Close() } | ||||
							
								
								
									
										251
									
								
								vendor/github.com/klauspost/compress/gzip/gzip.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										251
									
								
								vendor/github.com/klauspost/compress/gzip/gzip.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,251 @@ | ||||
| // Copyright 2010 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package gzip | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"hash/crc32" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/klauspost/compress/flate" | ||||
| ) | ||||
|  | ||||
| // These constants are copied from the flate package, so that code that imports | ||||
| // "compress/gzip" does not also have to import "compress/flate". | ||||
| const ( | ||||
| 	NoCompression       = flate.NoCompression | ||||
| 	BestSpeed           = flate.BestSpeed | ||||
| 	BestCompression     = flate.BestCompression | ||||
| 	DefaultCompression  = flate.DefaultCompression | ||||
| 	ConstantCompression = flate.ConstantCompression | ||||
| 	HuffmanOnly         = flate.HuffmanOnly | ||||
| ) | ||||
|  | ||||
| // A Writer is an io.WriteCloser. | ||||
| // Writes to a Writer are compressed and written to w. | ||||
| type Writer struct { | ||||
| 	Header      // written at first call to Write, Flush, or Close | ||||
| 	w           io.Writer | ||||
| 	level       int | ||||
| 	wroteHeader bool | ||||
| 	compressor  *flate.Writer | ||||
| 	digest      uint32 // CRC-32, IEEE polynomial (section 8) | ||||
| 	size        uint32 // Uncompressed size (section 2.3.1) | ||||
| 	closed      bool | ||||
| 	buf         [10]byte | ||||
| 	err         error | ||||
| } | ||||
|  | ||||
| // NewWriter returns a new Writer. | ||||
| // Writes to the returned writer are compressed and written to w. | ||||
| // | ||||
| // It is the caller's responsibility to call Close on the WriteCloser when done. | ||||
| // Writes may be buffered and not flushed until Close. | ||||
| // | ||||
| // Callers that wish to set the fields in Writer.Header must do so before | ||||
| // the first call to Write, Flush, or Close. | ||||
| func NewWriter(w io.Writer) *Writer { | ||||
| 	z, _ := NewWriterLevel(w, DefaultCompression) | ||||
| 	return z | ||||
| } | ||||
|  | ||||
| // NewWriterLevel is like NewWriter but specifies the compression level instead | ||||
| // of assuming DefaultCompression. | ||||
| // | ||||
| // The compression level can be DefaultCompression, NoCompression, or any | ||||
| // integer value between BestSpeed and BestCompression inclusive. The error | ||||
| // returned will be nil if the level is valid. | ||||
| func NewWriterLevel(w io.Writer, level int) (*Writer, error) { | ||||
| 	if level < HuffmanOnly || level > BestCompression { | ||||
| 		return nil, fmt.Errorf("gzip: invalid compression level: %d", level) | ||||
| 	} | ||||
| 	z := new(Writer) | ||||
| 	z.init(w, level) | ||||
| 	return z, nil | ||||
| } | ||||
|  | ||||
| func (z *Writer) init(w io.Writer, level int) { | ||||
| 	compressor := z.compressor | ||||
| 	if compressor != nil { | ||||
| 		compressor.Reset(w) | ||||
| 	} | ||||
| 	*z = Writer{ | ||||
| 		Header: Header{ | ||||
| 			OS: 255, // unknown | ||||
| 		}, | ||||
| 		w:          w, | ||||
| 		level:      level, | ||||
| 		compressor: compressor, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Reset discards the Writer z's state and makes it equivalent to the | ||||
| // result of its original state from NewWriter or NewWriterLevel, but | ||||
| // writing to w instead. This permits reusing a Writer rather than | ||||
| // allocating a new one. | ||||
| func (z *Writer) Reset(w io.Writer) { | ||||
| 	z.init(w, z.level) | ||||
| } | ||||
|  | ||||
| // writeBytes writes a length-prefixed byte slice to z.w. | ||||
| func (z *Writer) writeBytes(b []byte) error { | ||||
| 	if len(b) > 0xffff { | ||||
| 		return errors.New("gzip.Write: Extra data is too large") | ||||
| 	} | ||||
| 	le.PutUint16(z.buf[:2], uint16(len(b))) | ||||
| 	_, err := z.w.Write(z.buf[:2]) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	_, err = z.w.Write(b) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // writeString writes a UTF-8 string s in GZIP's format to z.w. | ||||
| // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). | ||||
| func (z *Writer) writeString(s string) (err error) { | ||||
| 	// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. | ||||
| 	needconv := false | ||||
| 	for _, v := range s { | ||||
| 		if v == 0 || v > 0xff { | ||||
| 			return errors.New("gzip.Write: non-Latin-1 header string") | ||||
| 		} | ||||
| 		if v > 0x7f { | ||||
| 			needconv = true | ||||
| 		} | ||||
| 	} | ||||
| 	if needconv { | ||||
| 		b := make([]byte, 0, len(s)) | ||||
| 		for _, v := range s { | ||||
| 			b = append(b, byte(v)) | ||||
| 		} | ||||
| 		_, err = z.w.Write(b) | ||||
| 	} else { | ||||
| 		_, err = io.WriteString(z.w, s) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	// GZIP strings are NUL-terminated. | ||||
| 	z.buf[0] = 0 | ||||
| 	_, err = z.w.Write(z.buf[:1]) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Write writes a compressed form of p to the underlying io.Writer. The | ||||
| // compressed bytes are not necessarily flushed until the Writer is closed. | ||||
| func (z *Writer) Write(p []byte) (int, error) { | ||||
| 	if z.err != nil { | ||||
| 		return 0, z.err | ||||
| 	} | ||||
| 	var n int | ||||
| 	// Write the GZIP header lazily. | ||||
| 	if !z.wroteHeader { | ||||
| 		z.wroteHeader = true | ||||
| 		z.buf[0] = gzipID1 | ||||
| 		z.buf[1] = gzipID2 | ||||
| 		z.buf[2] = gzipDeflate | ||||
| 		z.buf[3] = 0 | ||||
| 		if z.Extra != nil { | ||||
| 			z.buf[3] |= 0x04 | ||||
| 		} | ||||
| 		if z.Name != "" { | ||||
| 			z.buf[3] |= 0x08 | ||||
| 		} | ||||
| 		if z.Comment != "" { | ||||
| 			z.buf[3] |= 0x10 | ||||
| 		} | ||||
| 		le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) | ||||
| 		if z.level == BestCompression { | ||||
| 			z.buf[8] = 2 | ||||
| 		} else if z.level == BestSpeed { | ||||
| 			z.buf[8] = 4 | ||||
| 		} else { | ||||
| 			z.buf[8] = 0 | ||||
| 		} | ||||
| 		z.buf[9] = z.OS | ||||
| 		n, z.err = z.w.Write(z.buf[:10]) | ||||
| 		if z.err != nil { | ||||
| 			return n, z.err | ||||
| 		} | ||||
| 		if z.Extra != nil { | ||||
| 			z.err = z.writeBytes(z.Extra) | ||||
| 			if z.err != nil { | ||||
| 				return n, z.err | ||||
| 			} | ||||
| 		} | ||||
| 		if z.Name != "" { | ||||
| 			z.err = z.writeString(z.Name) | ||||
| 			if z.err != nil { | ||||
| 				return n, z.err | ||||
| 			} | ||||
| 		} | ||||
| 		if z.Comment != "" { | ||||
| 			z.err = z.writeString(z.Comment) | ||||
| 			if z.err != nil { | ||||
| 				return n, z.err | ||||
| 			} | ||||
| 		} | ||||
| 		if z.compressor == nil { | ||||
| 			z.compressor, _ = flate.NewWriter(z.w, z.level) | ||||
| 		} | ||||
| 	} | ||||
| 	z.size += uint32(len(p)) | ||||
| 	z.digest = crc32.Update(z.digest, crc32.IEEETable, p) | ||||
| 	n, z.err = z.compressor.Write(p) | ||||
| 	return n, z.err | ||||
| } | ||||
|  | ||||
| // Flush flushes any pending compressed data to the underlying writer. | ||||
| // | ||||
| // It is useful mainly in compressed network protocols, to ensure that | ||||
| // a remote reader has enough data to reconstruct a packet. Flush does | ||||
| // not return until the data has been written. If the underlying | ||||
| // writer returns an error, Flush returns that error. | ||||
| // | ||||
| // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. | ||||
| func (z *Writer) Flush() error { | ||||
| 	if z.err != nil { | ||||
| 		return z.err | ||||
| 	} | ||||
| 	if z.closed { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if !z.wroteHeader { | ||||
| 		z.Write(nil) | ||||
| 		if z.err != nil { | ||||
| 			return z.err | ||||
| 		} | ||||
| 	} | ||||
| 	z.err = z.compressor.Flush() | ||||
| 	return z.err | ||||
| } | ||||
|  | ||||
| // Close closes the Writer, flushing any unwritten data to the underlying | ||||
| // io.Writer, but does not close the underlying io.Writer. | ||||
| func (z *Writer) Close() error { | ||||
| 	if z.err != nil { | ||||
| 		return z.err | ||||
| 	} | ||||
| 	if z.closed { | ||||
| 		return nil | ||||
| 	} | ||||
| 	z.closed = true | ||||
| 	if !z.wroteHeader { | ||||
| 		z.Write(nil) | ||||
| 		if z.err != nil { | ||||
| 			return z.err | ||||
| 		} | ||||
| 	} | ||||
| 	z.err = z.compressor.Close() | ||||
| 	if z.err != nil { | ||||
| 		return z.err | ||||
| 	} | ||||
| 	le.PutUint32(z.buf[:4], z.digest) | ||||
| 	le.PutUint32(z.buf[4:8], z.size) | ||||
| 	_, z.err = z.w.Write(z.buf[:8]) | ||||
| 	return z.err | ||||
| } | ||||
							
								
								
									
										15
									
								
								vendor/github.com/klauspost/compress/snappy/AUTHORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/klauspost/compress/snappy/AUTHORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| # This is the official list of Snappy-Go authors for copyright purposes. | ||||
| # This file is distinct from the CONTRIBUTORS files. | ||||
| # See the latter for an explanation. | ||||
|  | ||||
| # Names should be added to this file as | ||||
| #	Name or Organization <email address> | ||||
| # The email address is not required for organizations. | ||||
|  | ||||
| # Please keep the list sorted. | ||||
|  | ||||
| Damian Gryski <dgryski@gmail.com> | ||||
| Google Inc. | ||||
| Jan Mercl <0xjnml@gmail.com> | ||||
| Rodolfo Carvalho <rhcarvalho@gmail.com> | ||||
| Sebastien Binet <seb.binet@gmail.com> | ||||
							
								
								
									
										37
									
								
								vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| # This is the official list of people who can contribute | ||||
| # (and typically have contributed) code to the Snappy-Go repository. | ||||
| # The AUTHORS file lists the copyright holders; this file | ||||
| # lists people.  For example, Google employees are listed here | ||||
| # but not in AUTHORS, because Google holds the copyright. | ||||
| # | ||||
| # The submission process automatically checks to make sure | ||||
| # that people submitting code are listed in this file (by email address). | ||||
| # | ||||
| # Names should be added to this file only after verifying that | ||||
| # the individual or the individual's organization has agreed to | ||||
| # the appropriate Contributor License Agreement, found here: | ||||
| # | ||||
| #     http://code.google.com/legal/individual-cla-v1.0.html | ||||
| #     http://code.google.com/legal/corporate-cla-v1.0.html | ||||
| # | ||||
| # The agreement for individuals can be filled out on the web. | ||||
| # | ||||
| # When adding J Random Contributor's name to this file, | ||||
| # either J's name or J's organization's name should be | ||||
| # added to the AUTHORS file, depending on whether the | ||||
| # individual or corporate CLA was used. | ||||
|  | ||||
| # Names should be added to this file like so: | ||||
| #     Name <email address> | ||||
|  | ||||
| # Please keep the list sorted. | ||||
|  | ||||
| Damian Gryski <dgryski@gmail.com> | ||||
| Jan Mercl <0xjnml@gmail.com> | ||||
| Kai Backman <kaib@golang.org> | ||||
| Marc-Antoine Ruel <maruel@chromium.org> | ||||
| Nigel Tao <nigeltao@golang.org> | ||||
| Rob Pike <r@golang.org> | ||||
| Rodolfo Carvalho <rhcarvalho@gmail.com> | ||||
| Russ Cox <rsc@golang.org> | ||||
| Sebastien Binet <seb.binet@gmail.com> | ||||
							
								
								
									
										27
									
								
								vendor/github.com/klauspost/compress/snappy/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/klauspost/compress/snappy/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|    * Redistributions of source code must retain the above copyright | ||||
| notice, this list of conditions and the following disclaimer. | ||||
|    * Redistributions in binary form must reproduce the above | ||||
| copyright notice, this list of conditions and the following disclaimer | ||||
| in the documentation and/or other materials provided with the | ||||
| distribution. | ||||
|    * Neither the name of Google Inc. nor the names of its | ||||
| contributors may be used to endorse or promote products derived from | ||||
| this software without specific prior written permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										183
									
								
								vendor/github.com/klauspost/compress/zlib/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										183
									
								
								vendor/github.com/klauspost/compress/zlib/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,183 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| /* | ||||
| Package zlib implements reading and writing of zlib format compressed data, | ||||
| as specified in RFC 1950. | ||||
|  | ||||
| The implementation provides filters that uncompress during reading | ||||
| and compress during writing.  For example, to write compressed data | ||||
| to a buffer: | ||||
|  | ||||
| 	var b bytes.Buffer | ||||
| 	w := zlib.NewWriter(&b) | ||||
| 	w.Write([]byte("hello, world\n")) | ||||
| 	w.Close() | ||||
|  | ||||
| and to read that data back: | ||||
|  | ||||
| 	r, err := zlib.NewReader(&b) | ||||
| 	io.Copy(os.Stdout, r) | ||||
| 	r.Close() | ||||
| */ | ||||
| package zlib | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"errors" | ||||
| 	"hash" | ||||
| 	"hash/adler32" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/klauspost/compress/flate" | ||||
| ) | ||||
|  | ||||
| const zlibDeflate = 8 | ||||
|  | ||||
| var ( | ||||
| 	// ErrChecksum is returned when reading ZLIB data that has an invalid checksum. | ||||
| 	ErrChecksum = errors.New("zlib: invalid checksum") | ||||
| 	// ErrDictionary is returned when reading ZLIB data that has an invalid dictionary. | ||||
| 	ErrDictionary = errors.New("zlib: invalid dictionary") | ||||
| 	// ErrHeader is returned when reading ZLIB data that has an invalid header. | ||||
| 	ErrHeader = errors.New("zlib: invalid header") | ||||
| ) | ||||
|  | ||||
| type reader struct { | ||||
| 	r            flate.Reader | ||||
| 	decompressor io.ReadCloser | ||||
| 	digest       hash.Hash32 | ||||
| 	err          error | ||||
| 	scratch      [4]byte | ||||
| } | ||||
|  | ||||
| // Resetter resets a ReadCloser returned by NewReader or NewReaderDict to | ||||
| // to switch to a new underlying Reader. This permits reusing a ReadCloser | ||||
| // instead of allocating a new one. | ||||
| type Resetter interface { | ||||
| 	// Reset discards any buffered data and resets the Resetter as if it was | ||||
| 	// newly initialized with the given reader. | ||||
| 	Reset(r io.Reader, dict []byte) error | ||||
| } | ||||
|  | ||||
| // NewReader creates a new ReadCloser. | ||||
| // Reads from the returned ReadCloser read and decompress data from r. | ||||
| // If r does not implement io.ByteReader, the decompressor may read more | ||||
| // data than necessary from r. | ||||
| // It is the caller's responsibility to call Close on the ReadCloser when done. | ||||
| // | ||||
| // The ReadCloser returned by NewReader also implements Resetter. | ||||
| func NewReader(r io.Reader) (io.ReadCloser, error) { | ||||
| 	return NewReaderDict(r, nil) | ||||
| } | ||||
|  | ||||
| // NewReaderDict is like NewReader but uses a preset dictionary. | ||||
| // NewReaderDict ignores the dictionary if the compressed data does not refer to it. | ||||
| // If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. | ||||
| // | ||||
| // The ReadCloser returned by NewReaderDict also implements Resetter. | ||||
| func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { | ||||
| 	z := new(reader) | ||||
| 	err := z.Reset(r, dict) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return z, nil | ||||
| } | ||||
|  | ||||
| func (z *reader) Read(p []byte) (int, error) { | ||||
| 	if z.err != nil { | ||||
| 		return 0, z.err | ||||
| 	} | ||||
|  | ||||
| 	var n int | ||||
| 	n, z.err = z.decompressor.Read(p) | ||||
| 	z.digest.Write(p[0:n]) | ||||
| 	if z.err != io.EOF { | ||||
| 		// In the normal case we return here. | ||||
| 		return n, z.err | ||||
| 	} | ||||
|  | ||||
| 	// Finished file; check checksum. | ||||
| 	if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil { | ||||
| 		if err == io.EOF { | ||||
| 			err = io.ErrUnexpectedEOF | ||||
| 		} | ||||
| 		z.err = err | ||||
| 		return n, z.err | ||||
| 	} | ||||
| 	// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). | ||||
| 	checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) | ||||
| 	if checksum != z.digest.Sum32() { | ||||
| 		z.err = ErrChecksum | ||||
| 		return n, z.err | ||||
| 	} | ||||
| 	return n, io.EOF | ||||
| } | ||||
|  | ||||
| // Calling Close does not close the wrapped io.Reader originally passed to NewReader. | ||||
| // In order for the ZLIB checksum to be verified, the reader must be | ||||
| // fully consumed until the io.EOF. | ||||
| func (z *reader) Close() error { | ||||
| 	if z.err != nil && z.err != io.EOF { | ||||
| 		return z.err | ||||
| 	} | ||||
| 	z.err = z.decompressor.Close() | ||||
| 	return z.err | ||||
| } | ||||
|  | ||||
| func (z *reader) Reset(r io.Reader, dict []byte) error { | ||||
| 	*z = reader{decompressor: z.decompressor, digest: z.digest} | ||||
| 	if fr, ok := r.(flate.Reader); ok { | ||||
| 		z.r = fr | ||||
| 	} else { | ||||
| 		z.r = bufio.NewReader(r) | ||||
| 	} | ||||
|  | ||||
| 	// Read the header (RFC 1950 section 2.2.). | ||||
| 	_, z.err = io.ReadFull(z.r, z.scratch[0:2]) | ||||
| 	if z.err != nil { | ||||
| 		if z.err == io.EOF { | ||||
| 			z.err = io.ErrUnexpectedEOF | ||||
| 		} | ||||
| 		return z.err | ||||
| 	} | ||||
| 	h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) | ||||
| 	if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { | ||||
| 		z.err = ErrHeader | ||||
| 		return z.err | ||||
| 	} | ||||
| 	haveDict := z.scratch[1]&0x20 != 0 | ||||
| 	if haveDict { | ||||
| 		_, z.err = io.ReadFull(z.r, z.scratch[0:4]) | ||||
| 		if z.err != nil { | ||||
| 			if z.err == io.EOF { | ||||
| 				z.err = io.ErrUnexpectedEOF | ||||
| 			} | ||||
| 			return z.err | ||||
| 		} | ||||
| 		checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) | ||||
| 		if checksum != adler32.Checksum(dict) { | ||||
| 			z.err = ErrDictionary | ||||
| 			return z.err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if z.decompressor == nil { | ||||
| 		if haveDict { | ||||
| 			z.decompressor = flate.NewReaderDict(z.r, dict) | ||||
| 		} else { | ||||
| 			z.decompressor = flate.NewReader(z.r) | ||||
| 		} | ||||
| 	} else { | ||||
| 		z.decompressor.(flate.Resetter).Reset(z.r, dict) | ||||
| 	} | ||||
|  | ||||
| 	if z.digest != nil { | ||||
| 		z.digest.Reset() | ||||
| 	} else { | ||||
| 		z.digest = adler32.New() | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										201
									
								
								vendor/github.com/klauspost/compress/zlib/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/klauspost/compress/zlib/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,201 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package zlib | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"hash" | ||||
| 	"hash/adler32" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/klauspost/compress/flate" | ||||
| ) | ||||
|  | ||||
| // These constants are copied from the flate package, so that code that imports | ||||
| // "compress/zlib" does not also have to import "compress/flate". | ||||
| const ( | ||||
| 	NoCompression       = flate.NoCompression | ||||
| 	BestSpeed           = flate.BestSpeed | ||||
| 	BestCompression     = flate.BestCompression | ||||
| 	DefaultCompression  = flate.DefaultCompression | ||||
| 	ConstantCompression = flate.ConstantCompression | ||||
| 	HuffmanOnly         = flate.HuffmanOnly | ||||
| ) | ||||
|  | ||||
| // A Writer takes data written to it and writes the compressed | ||||
| // form of that data to an underlying writer (see NewWriter). | ||||
| type Writer struct { | ||||
| 	w           io.Writer | ||||
| 	level       int | ||||
| 	dict        []byte | ||||
| 	compressor  *flate.Writer | ||||
| 	digest      hash.Hash32 | ||||
| 	err         error | ||||
| 	scratch     [4]byte | ||||
| 	wroteHeader bool | ||||
| } | ||||
|  | ||||
| // NewWriter creates a new Writer. | ||||
| // Writes to the returned Writer are compressed and written to w. | ||||
| // | ||||
| // It is the caller's responsibility to call Close on the WriteCloser when done. | ||||
| // Writes may be buffered and not flushed until Close. | ||||
| func NewWriter(w io.Writer) *Writer { | ||||
| 	z, _ := NewWriterLevelDict(w, DefaultCompression, nil) | ||||
| 	return z | ||||
| } | ||||
|  | ||||
| // NewWriterLevel is like NewWriter but specifies the compression level instead | ||||
| // of assuming DefaultCompression. | ||||
| // | ||||
| // The compression level can be DefaultCompression, NoCompression, HuffmanOnly | ||||
| // or any integer value between BestSpeed and BestCompression inclusive. | ||||
| // The error returned will be nil if the level is valid. | ||||
| func NewWriterLevel(w io.Writer, level int) (*Writer, error) { | ||||
| 	return NewWriterLevelDict(w, level, nil) | ||||
| } | ||||
|  | ||||
| // NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to | ||||
| // compress with. | ||||
| // | ||||
| // The dictionary may be nil. If not, its contents should not be modified until | ||||
| // the Writer is closed. | ||||
| func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) { | ||||
| 	if level < HuffmanOnly || level > BestCompression { | ||||
| 		return nil, fmt.Errorf("zlib: invalid compression level: %d", level) | ||||
| 	} | ||||
| 	return &Writer{ | ||||
| 		w:     w, | ||||
| 		level: level, | ||||
| 		dict:  dict, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // Reset clears the state of the Writer z such that it is equivalent to its | ||||
| // initial state from NewWriterLevel or NewWriterLevelDict, but instead writing | ||||
| // to w. | ||||
| func (z *Writer) Reset(w io.Writer) { | ||||
| 	z.w = w | ||||
| 	// z.level and z.dict left unchanged. | ||||
| 	if z.compressor != nil { | ||||
| 		z.compressor.Reset(w) | ||||
| 	} | ||||
| 	if z.digest != nil { | ||||
| 		z.digest.Reset() | ||||
| 	} | ||||
| 	z.err = nil | ||||
| 	z.scratch = [4]byte{} | ||||
| 	z.wroteHeader = false | ||||
| } | ||||
|  | ||||
| // writeHeader writes the ZLIB header. | ||||
| func (z *Writer) writeHeader() (err error) { | ||||
| 	z.wroteHeader = true | ||||
| 	// ZLIB has a two-byte header (as documented in RFC 1950). | ||||
| 	// The first four bits is the CINFO (compression info), which is 7 for the default deflate window size. | ||||
| 	// The next four bits is the CM (compression method), which is 8 for deflate. | ||||
| 	z.scratch[0] = 0x78 | ||||
| 	// The next two bits is the FLEVEL (compression level). The four values are: | ||||
| 	// 0=fastest, 1=fast, 2=default, 3=best. | ||||
| 	// The next bit, FDICT, is set if a dictionary is given. | ||||
| 	// The final five FCHECK bits form a mod-31 checksum. | ||||
| 	switch z.level { | ||||
| 	case -2, 0, 1: | ||||
| 		z.scratch[1] = 0 << 6 | ||||
| 	case 2, 3, 4, 5: | ||||
| 		z.scratch[1] = 1 << 6 | ||||
| 	case 6, -1: | ||||
| 		z.scratch[1] = 2 << 6 | ||||
| 	case 7, 8, 9: | ||||
| 		z.scratch[1] = 3 << 6 | ||||
| 	default: | ||||
| 		panic("unreachable") | ||||
| 	} | ||||
| 	if z.dict != nil { | ||||
| 		z.scratch[1] |= 1 << 5 | ||||
| 	} | ||||
| 	z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) | ||||
| 	if _, err = z.w.Write(z.scratch[0:2]); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if z.dict != nil { | ||||
| 		// The next four bytes are the Adler-32 checksum of the dictionary. | ||||
| 		checksum := adler32.Checksum(z.dict) | ||||
| 		z.scratch[0] = uint8(checksum >> 24) | ||||
| 		z.scratch[1] = uint8(checksum >> 16) | ||||
| 		z.scratch[2] = uint8(checksum >> 8) | ||||
| 		z.scratch[3] = uint8(checksum >> 0) | ||||
| 		if _, err = z.w.Write(z.scratch[0:4]); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	if z.compressor == nil { | ||||
| 		// Initialize deflater unless the Writer is being reused | ||||
| 		// after a Reset call. | ||||
| 		z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		z.digest = adler32.New() | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Write writes a compressed form of p to the underlying io.Writer. The | ||||
| // compressed bytes are not necessarily flushed until the Writer is closed or | ||||
| // explicitly flushed. | ||||
| func (z *Writer) Write(p []byte) (n int, err error) { | ||||
| 	if !z.wroteHeader { | ||||
| 		z.err = z.writeHeader() | ||||
| 	} | ||||
| 	if z.err != nil { | ||||
| 		return 0, z.err | ||||
| 	} | ||||
| 	if len(p) == 0 { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	n, err = z.compressor.Write(p) | ||||
| 	if err != nil { | ||||
| 		z.err = err | ||||
| 		return | ||||
| 	} | ||||
| 	z.digest.Write(p) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Flush flushes the Writer to its underlying io.Writer. | ||||
| func (z *Writer) Flush() error { | ||||
| 	if !z.wroteHeader { | ||||
| 		z.err = z.writeHeader() | ||||
| 	} | ||||
| 	if z.err != nil { | ||||
| 		return z.err | ||||
| 	} | ||||
| 	z.err = z.compressor.Flush() | ||||
| 	return z.err | ||||
| } | ||||
|  | ||||
| // Close closes the Writer, flushing any unwritten data to the underlying | ||||
| // io.Writer, but does not close the underlying io.Writer. | ||||
| func (z *Writer) Close() error { | ||||
| 	if !z.wroteHeader { | ||||
| 		z.err = z.writeHeader() | ||||
| 	} | ||||
| 	if z.err != nil { | ||||
| 		return z.err | ||||
| 	} | ||||
| 	z.err = z.compressor.Close() | ||||
| 	if z.err != nil { | ||||
| 		return z.err | ||||
| 	} | ||||
| 	checksum := z.digest.Sum32() | ||||
| 	// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). | ||||
| 	z.scratch[0] = uint8(checksum >> 24) | ||||
| 	z.scratch[1] = uint8(checksum >> 16) | ||||
| 	z.scratch[2] = uint8(checksum >> 8) | ||||
| 	z.scratch[3] = uint8(checksum >> 0) | ||||
| 	_, z.err = z.w.Write(z.scratch[0:4]) | ||||
| 	return z.err | ||||
| } | ||||
							
								
								
									
										24
									
								
								vendor/github.com/klauspost/cpuid/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/klauspost/cpuid/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| # Compiled Object files, Static and Dynamic libs (Shared Objects) | ||||
| *.o | ||||
| *.a | ||||
| *.so | ||||
|  | ||||
| # Folders | ||||
| _obj | ||||
| _test | ||||
|  | ||||
| # Architecture specific extensions/prefixes | ||||
| *.[568vq] | ||||
| [568vq].out | ||||
|  | ||||
| *.cgo1.go | ||||
| *.cgo2.c | ||||
| _cgo_defun.c | ||||
| _cgo_gotypes.go | ||||
| _cgo_export.* | ||||
|  | ||||
| _testmain.go | ||||
|  | ||||
| *.exe | ||||
| *.test | ||||
| *.prof | ||||
							
								
								
									
										23
									
								
								vendor/github.com/klauspost/cpuid/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/klauspost/cpuid/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| language: go | ||||
|  | ||||
| sudo: false | ||||
|  | ||||
| os: | ||||
|   - linux | ||||
|   - osx   | ||||
| go: | ||||
|   - 1.8.x | ||||
|   - 1.9.x | ||||
|   - 1.10.x | ||||
|   - master | ||||
|  | ||||
| script:  | ||||
|  - go vet ./... | ||||
|  - go test -v ./... | ||||
|  - go test -race ./... | ||||
|  - diff <(gofmt -d .) <("")  | ||||
|  | ||||
| matrix: | ||||
|   allow_failures: | ||||
|     - go: 'master' | ||||
|   fast_finish: true  | ||||
							
								
								
									
										35
									
								
								vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| Developer Certificate of Origin | ||||
| Version 1.1 | ||||
|  | ||||
| Copyright (C) 2015- Klaus Post & Contributors. | ||||
| Email: klauspost@gmail.com | ||||
|  | ||||
| Everyone is permitted to copy and distribute verbatim copies of this | ||||
| license document, but changing it is not allowed. | ||||
|  | ||||
|  | ||||
| Developer's Certificate of Origin 1.1 | ||||
|  | ||||
| By making a contribution to this project, I certify that: | ||||
|  | ||||
| (a) The contribution was created in whole or in part by me and I | ||||
|     have the right to submit it under the open source license | ||||
|     indicated in the file; or | ||||
|  | ||||
| (b) The contribution is based upon previous work that, to the best | ||||
|     of my knowledge, is covered under an appropriate open source | ||||
|     license and I have the right under that license to submit that | ||||
|     work with modifications, whether created in whole or in part | ||||
|     by me, under the same open source license (unless I am | ||||
|     permitted to submit under a different license), as indicated | ||||
|     in the file; or | ||||
|  | ||||
| (c) The contribution was provided directly to me by some other | ||||
|     person who certified (a), (b) or (c) and I have not modified | ||||
|     it. | ||||
|  | ||||
| (d) I understand and agree that this project and the contribution | ||||
|     are public and that a record of the contribution (including all | ||||
|     personal information I submit with it, including my sign-off) is | ||||
|     maintained indefinitely and may be redistributed consistent with | ||||
|     this project or the open source license(s) involved. | ||||
							
								
								
									
										22
									
								
								vendor/github.com/klauspost/cpuid/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								vendor/github.com/klauspost/cpuid/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| The MIT License (MIT) | ||||
|  | ||||
| Copyright (c) 2015 Klaus Post | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
| in the Software without restriction, including without limitation the rights | ||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in all | ||||
| copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||
| SOFTWARE. | ||||
|  | ||||
							
								
								
									
										145
									
								
								vendor/github.com/klauspost/cpuid/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										145
									
								
								vendor/github.com/klauspost/cpuid/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,145 @@ | ||||
| # cpuid | ||||
| Package cpuid provides information about the CPU running the current program. | ||||
|  | ||||
| CPU features are detected on startup, and kept for fast access through the life of the application. | ||||
| Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. | ||||
|  | ||||
| You can access the CPU information by accessing the shared CPU variable of the cpuid library. | ||||
|  | ||||
| Package home: https://github.com/klauspost/cpuid | ||||
|  | ||||
| [![GoDoc][1]][2] [![Build Status][3]][4] | ||||
|  | ||||
| [1]: https://godoc.org/github.com/klauspost/cpuid?status.svg | ||||
| [2]: https://godoc.org/github.com/klauspost/cpuid | ||||
| [3]: https://travis-ci.org/klauspost/cpuid.svg | ||||
| [4]: https://travis-ci.org/klauspost/cpuid | ||||
|  | ||||
| # features | ||||
| ## CPU Instructions | ||||
| *  **CMOV** (i686 CMOV) | ||||
| *  **NX** (NX (No-Execute) bit) | ||||
| *  **AMD3DNOW** (AMD 3DNOW) | ||||
| *  **AMD3DNOWEXT** (AMD 3DNowExt) | ||||
| *  **MMX** (standard MMX) | ||||
| *  **MMXEXT** (SSE integer functions or AMD MMX ext) | ||||
| *  **SSE** (SSE functions) | ||||
| *  **SSE2** (P4 SSE functions) | ||||
| *  **SSE3** (Prescott SSE3 functions) | ||||
| *  **SSSE3** (Conroe SSSE3 functions) | ||||
| *  **SSE4** (Penryn SSE4.1 functions) | ||||
| *  **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) | ||||
| *  **SSE42** (Nehalem SSE4.2 functions) | ||||
| *  **AVX** (AVX functions) | ||||
| *  **AVX2** (AVX2 functions) | ||||
| *  **FMA3** (Intel FMA 3) | ||||
| *  **FMA4** (Bulldozer FMA4 functions) | ||||
| *  **XOP** (Bulldozer XOP functions) | ||||
| *  **F16C** (Half-precision floating-point conversion) | ||||
| *  **BMI1** (Bit Manipulation Instruction Set 1) | ||||
| *  **BMI2** (Bit Manipulation Instruction Set 2) | ||||
| *  **TBM** (AMD Trailing Bit Manipulation) | ||||
| *  **LZCNT** (LZCNT instruction) | ||||
| *  **POPCNT** (POPCNT instruction) | ||||
| *  **AESNI** (Advanced Encryption Standard New Instructions) | ||||
| *  **CLMUL** (Carry-less Multiplication) | ||||
| *  **HTT** (Hyperthreading (enabled)) | ||||
| *  **HLE** (Hardware Lock Elision) | ||||
| *  **RTM** (Restricted Transactional Memory) | ||||
| *  **RDRAND** (RDRAND instruction is available) | ||||
| *  **RDSEED** (RDSEED instruction is available) | ||||
| *  **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) | ||||
| *  **SHA** (Intel SHA Extensions) | ||||
| *  **AVX512F** (AVX-512 Foundation) | ||||
| *  **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) | ||||
| *  **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) | ||||
| *  **AVX512PF** (AVX-512 Prefetch Instructions) | ||||
| *  **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) | ||||
| *  **AVX512CD** (AVX-512 Conflict Detection Instructions) | ||||
| *  **AVX512BW** (AVX-512 Byte and Word Instructions) | ||||
| *  **AVX512VL** (AVX-512 Vector Length Extensions) | ||||
| *  **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) | ||||
| *  **MPX** (Intel MPX (Memory Protection Extensions)) | ||||
| *  **ERMS** (Enhanced REP MOVSB/STOSB) | ||||
| *  **RDTSCP** (RDTSCP Instruction) | ||||
| *  **CX16** (CMPXCHG16B Instruction) | ||||
| *  **SGX** (Software Guard Extensions, with activation details) | ||||
|  | ||||
| ## Performance | ||||
| *  **RDTSCP()** Returns current cycle count. Can be used for benchmarking. | ||||
| *  **SSE2SLOW** (SSE2 is supported, but usually not faster) | ||||
| *  **SSE3SLOW** (SSE3 is supported, but usually not faster) | ||||
| *  **ATOM** (Atom processor, some SSSE3 instructions are slower) | ||||
| *  **Cache line** (Probable size of a cache line). | ||||
| *  **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. | ||||
|  | ||||
| ## Cpu Vendor/VM | ||||
| * **Intel** | ||||
| * **AMD** | ||||
| * **VIA** | ||||
| * **Transmeta** | ||||
| * **NSC** | ||||
| * **KVM**  (Kernel-based Virtual Machine) | ||||
| * **MSVM** (Microsoft Hyper-V or Windows Virtual PC) | ||||
| * **VMware** | ||||
| * **XenHVM** | ||||
|  | ||||
| # installing | ||||
|  | ||||
| ```go get github.com/klauspost/cpuid``` | ||||
|  | ||||
| # example | ||||
|  | ||||
| ```Go | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"github.com/klauspost/cpuid" | ||||
| ) | ||||
|  | ||||
| func main() { | ||||
| 	// Print basic CPU information: | ||||
| 	fmt.Println("Name:", cpuid.CPU.BrandName) | ||||
| 	fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) | ||||
| 	fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) | ||||
| 	fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) | ||||
| 	fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) | ||||
| 	fmt.Println("Features:", cpuid.CPU.Features) | ||||
| 	fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) | ||||
| 	fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") | ||||
| 	fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") | ||||
| 	fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") | ||||
| 	fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") | ||||
|  | ||||
| 	// Test if we have a specific feature: | ||||
| 	if cpuid.CPU.SSE() { | ||||
| 		fmt.Println("We have Streaming SIMD Extensions") | ||||
| 	} | ||||
| } | ||||
| ``` | ||||
|  | ||||
| Sample output: | ||||
| ``` | ||||
| >go run main.go | ||||
| Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz | ||||
| PhysicalCores: 2 | ||||
| ThreadsPerCore: 2 | ||||
| LogicalCores: 4 | ||||
| Family 6 Model: 42 | ||||
| Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL | ||||
| Cacheline bytes: 64 | ||||
| We have Streaming SIMD Extensions | ||||
| ``` | ||||
|  | ||||
| # private package | ||||
|  | ||||
| In the "private" folder you can find an autogenerated version of the library you can include in your own packages. | ||||
|  | ||||
| For this purpose all exports are removed, and functions and constants are lowercased. | ||||
|  | ||||
| This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. | ||||
|  | ||||
| # license | ||||
|  | ||||
| This code is published under an MIT license. See LICENSE file for more information. | ||||
							
								
								
									
										1040
									
								
								vendor/github.com/klauspost/cpuid/cpuid.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1040
									
								
								vendor/github.com/klauspost/cpuid/cpuid.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										42
									
								
								vendor/github.com/klauspost/cpuid/cpuid_386.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/klauspost/cpuid/cpuid_386.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. | ||||
|  | ||||
| // +build 386,!gccgo | ||||
|  | ||||
| // func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) | ||||
| TEXT ·asmCpuid(SB), 7, $0 | ||||
| 	XORL CX, CX | ||||
| 	MOVL op+0(FP), AX | ||||
| 	CPUID | ||||
| 	MOVL AX, eax+4(FP) | ||||
| 	MOVL BX, ebx+8(FP) | ||||
| 	MOVL CX, ecx+12(FP) | ||||
| 	MOVL DX, edx+16(FP) | ||||
| 	RET | ||||
|  | ||||
| // func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) | ||||
| TEXT ·asmCpuidex(SB), 7, $0 | ||||
| 	MOVL op+0(FP), AX | ||||
| 	MOVL op2+4(FP), CX | ||||
| 	CPUID | ||||
| 	MOVL AX, eax+8(FP) | ||||
| 	MOVL BX, ebx+12(FP) | ||||
| 	MOVL CX, ecx+16(FP) | ||||
| 	MOVL DX, edx+20(FP) | ||||
| 	RET | ||||
|  | ||||
| // func xgetbv(index uint32) (eax, edx uint32) | ||||
| TEXT ·asmXgetbv(SB), 7, $0 | ||||
| 	MOVL index+0(FP), CX | ||||
| 	BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV | ||||
| 	MOVL AX, eax+4(FP) | ||||
| 	MOVL DX, edx+8(FP) | ||||
| 	RET | ||||
|  | ||||
| // func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) | ||||
| TEXT ·asmRdtscpAsm(SB), 7, $0 | ||||
| 	BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP | ||||
| 	MOVL AX, eax+0(FP) | ||||
| 	MOVL BX, ebx+4(FP) | ||||
| 	MOVL CX, ecx+8(FP) | ||||
| 	MOVL DX, edx+12(FP) | ||||
| 	RET | ||||
							
								
								
									
										42
									
								
								vendor/github.com/klauspost/cpuid/cpuid_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								vendor/github.com/klauspost/cpuid/cpuid_amd64.s
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. | ||||
|  | ||||
| //+build amd64,!gccgo | ||||
|  | ||||
| // func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) | ||||
| TEXT ·asmCpuid(SB), 7, $0 | ||||
| 	XORQ CX, CX | ||||
| 	MOVL op+0(FP), AX | ||||
| 	CPUID | ||||
| 	MOVL AX, eax+8(FP) | ||||
| 	MOVL BX, ebx+12(FP) | ||||
| 	MOVL CX, ecx+16(FP) | ||||
| 	MOVL DX, edx+20(FP) | ||||
| 	RET | ||||
|  | ||||
| // func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) | ||||
| TEXT ·asmCpuidex(SB), 7, $0 | ||||
| 	MOVL op+0(FP), AX | ||||
| 	MOVL op2+4(FP), CX | ||||
| 	CPUID | ||||
| 	MOVL AX, eax+8(FP) | ||||
| 	MOVL BX, ebx+12(FP) | ||||
| 	MOVL CX, ecx+16(FP) | ||||
| 	MOVL DX, edx+20(FP) | ||||
| 	RET | ||||
|  | ||||
| // func asmXgetbv(index uint32) (eax, edx uint32) | ||||
| TEXT ·asmXgetbv(SB), 7, $0 | ||||
| 	MOVL index+0(FP), CX | ||||
| 	BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV | ||||
| 	MOVL AX, eax+8(FP) | ||||
| 	MOVL DX, edx+12(FP) | ||||
| 	RET | ||||
|  | ||||
| // func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) | ||||
| TEXT ·asmRdtscpAsm(SB), 7, $0 | ||||
| 	BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP | ||||
| 	MOVL AX, eax+0(FP) | ||||
| 	MOVL BX, ebx+4(FP) | ||||
| 	MOVL CX, ecx+8(FP) | ||||
| 	MOVL DX, edx+12(FP) | ||||
| 	RET | ||||
							
								
								
									
										17
									
								
								vendor/github.com/klauspost/cpuid/detect_intel.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								vendor/github.com/klauspost/cpuid/detect_intel.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. | ||||
|  | ||||
| // +build 386,!gccgo amd64,!gccgo | ||||
|  | ||||
| package cpuid | ||||
|  | ||||
| func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) | ||||
| func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) | ||||
| func asmXgetbv(index uint32) (eax, edx uint32) | ||||
| func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) | ||||
|  | ||||
| func initCPU() { | ||||
| 	cpuid = asmCpuid | ||||
| 	cpuidex = asmCpuidex | ||||
| 	xgetbv = asmXgetbv | ||||
| 	rdtscpAsm = asmRdtscpAsm | ||||
| } | ||||
							
								
								
									
										23
									
								
								vendor/github.com/klauspost/cpuid/detect_ref.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/github.com/klauspost/cpuid/detect_ref.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. | ||||
|  | ||||
| // +build !amd64,!386 gccgo | ||||
|  | ||||
| package cpuid | ||||
|  | ||||
| func initCPU() { | ||||
| 	cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { | ||||
| 		return 0, 0, 0, 0 | ||||
| 	} | ||||
|  | ||||
| 	cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { | ||||
| 		return 0, 0, 0, 0 | ||||
| 	} | ||||
|  | ||||
| 	xgetbv = func(index uint32) (eax, edx uint32) { | ||||
| 		return 0, 0 | ||||
| 	} | ||||
|  | ||||
| 	rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { | ||||
| 		return 0, 0, 0, 0 | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										4
									
								
								vendor/github.com/klauspost/cpuid/generate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/klauspost/cpuid/generate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| package cpuid | ||||
|  | ||||
| //go:generate go run private-gen.go | ||||
| //go:generate gofmt -w ./private | ||||
							
								
								
									
										476
									
								
								vendor/github.com/klauspost/cpuid/private-gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										476
									
								
								vendor/github.com/klauspost/cpuid/private-gen.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,476 @@ | ||||
| // +build ignore | ||||
|  | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"go/ast" | ||||
| 	"go/parser" | ||||
| 	"go/printer" | ||||
| 	"go/token" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| var inFiles = []string{"cpuid.go", "cpuid_test.go"} | ||||
| var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} | ||||
| var fileSet = token.NewFileSet() | ||||
| var reWrites = []rewrite{ | ||||
| 	initRewrite("CPUInfo -> cpuInfo"), | ||||
| 	initRewrite("Vendor -> vendor"), | ||||
| 	initRewrite("Flags -> flags"), | ||||
| 	initRewrite("Detect -> detect"), | ||||
| 	initRewrite("CPU -> cpu"), | ||||
| } | ||||
| var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, | ||||
| 	// cpuid_test.go | ||||
| 	"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, | ||||
| } | ||||
|  | ||||
| var excludePrefixes = []string{"test", "benchmark"} | ||||
|  | ||||
| func main() { | ||||
| 	Package := "private" | ||||
| 	parserMode := parser.ParseComments | ||||
| 	exported := make(map[string]rewrite) | ||||
| 	for _, file := range inFiles { | ||||
| 		in, err := os.Open(file) | ||||
| 		if err != nil { | ||||
| 			log.Fatalf("opening input", err) | ||||
| 		} | ||||
|  | ||||
| 		src, err := ioutil.ReadAll(in) | ||||
| 		if err != nil { | ||||
| 			log.Fatalf("reading input", err) | ||||
| 		} | ||||
|  | ||||
| 		astfile, err := parser.ParseFile(fileSet, file, src, parserMode) | ||||
| 		if err != nil { | ||||
| 			log.Fatalf("parsing input", err) | ||||
| 		} | ||||
|  | ||||
| 		for _, rw := range reWrites { | ||||
| 			astfile = rw(astfile) | ||||
| 		} | ||||
|  | ||||
| 		// Inspect the AST and print all identifiers and literals. | ||||
| 		var startDecl token.Pos | ||||
| 		var endDecl token.Pos | ||||
| 		ast.Inspect(astfile, func(n ast.Node) bool { | ||||
| 			var s string | ||||
| 			switch x := n.(type) { | ||||
| 			case *ast.Ident: | ||||
| 				if x.IsExported() { | ||||
| 					t := strings.ToLower(x.Name) | ||||
| 					for _, pre := range excludePrefixes { | ||||
| 						if strings.HasPrefix(t, pre) { | ||||
| 							return true | ||||
| 						} | ||||
| 					} | ||||
| 					if excludeNames[t] != true { | ||||
| 						//if x.Pos() > startDecl && x.Pos() < endDecl { | ||||
| 						exported[x.Name] = initRewrite(x.Name + " -> " + t) | ||||
| 					} | ||||
| 				} | ||||
|  | ||||
| 			case *ast.GenDecl: | ||||
| 				if x.Tok == token.CONST && x.Lparen > 0 { | ||||
| 					startDecl = x.Lparen | ||||
| 					endDecl = x.Rparen | ||||
| 					// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) | ||||
| 				} | ||||
| 			} | ||||
| 			if s != "" { | ||||
| 				fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) | ||||
| 			} | ||||
| 			return true | ||||
| 		}) | ||||
|  | ||||
| 		for _, rw := range exported { | ||||
| 			astfile = rw(astfile) | ||||
| 		} | ||||
|  | ||||
| 		var buf bytes.Buffer | ||||
|  | ||||
| 		printer.Fprint(&buf, fileSet, astfile) | ||||
|  | ||||
| 		// Remove package documentation and insert information | ||||
| 		s := buf.String() | ||||
| 		ind := strings.Index(buf.String(), "\npackage cpuid") | ||||
| 		s = s[ind:] | ||||
| 		s = "// Generated, DO NOT EDIT,\n" + | ||||
| 			"// but copy it to your own project and rename the package.\n" + | ||||
| 			"// See more at http://github.com/klauspost/cpuid\n" + | ||||
| 			s | ||||
|  | ||||
| 		outputName := Package + string(os.PathSeparator) + file | ||||
|  | ||||
| 		err = ioutil.WriteFile(outputName, []byte(s), 0644) | ||||
| 		if err != nil { | ||||
| 			log.Fatalf("writing output: %s", err) | ||||
| 		} | ||||
| 		log.Println("Generated", outputName) | ||||
| 	} | ||||
|  | ||||
| 	for _, file := range copyFiles { | ||||
| 		dst := "" | ||||
| 		if strings.HasPrefix(file, "cpuid") { | ||||
| 			dst = Package + string(os.PathSeparator) + file | ||||
| 		} else { | ||||
| 			dst = Package + string(os.PathSeparator) + "cpuid_" + file | ||||
| 		} | ||||
| 		err := copyFile(file, dst) | ||||
| 		if err != nil { | ||||
| 			log.Fatalf("copying file: %s", err) | ||||
| 		} | ||||
| 		log.Println("Copied", dst) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // CopyFile copies a file from src to dst. If src and dst files exist, and are | ||||
| // the same, then return success. Copy the file contents from src to dst. | ||||
| func copyFile(src, dst string) (err error) { | ||||
| 	sfi, err := os.Stat(src) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	if !sfi.Mode().IsRegular() { | ||||
| 		// cannot copy non-regular files (e.g., directories, | ||||
| 		// symlinks, devices, etc.) | ||||
| 		return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) | ||||
| 	} | ||||
| 	dfi, err := os.Stat(dst) | ||||
| 	if err != nil { | ||||
| 		if !os.IsNotExist(err) { | ||||
| 			return | ||||
| 		} | ||||
| 	} else { | ||||
| 		if !(dfi.Mode().IsRegular()) { | ||||
| 			return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) | ||||
| 		} | ||||
| 		if os.SameFile(sfi, dfi) { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	err = copyFileContents(src, dst) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // copyFileContents copies the contents of the file named src to the file named | ||||
| // by dst. The file will be created if it does not already exist. If the | ||||
| // destination file exists, all it's contents will be replaced by the contents | ||||
| // of the source file. | ||||
| func copyFileContents(src, dst string) (err error) { | ||||
| 	in, err := os.Open(src) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	defer in.Close() | ||||
| 	out, err := os.Create(dst) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		cerr := out.Close() | ||||
| 		if err == nil { | ||||
| 			err = cerr | ||||
| 		} | ||||
| 	}() | ||||
| 	if _, err = io.Copy(out, in); err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	err = out.Sync() | ||||
| 	return | ||||
| } | ||||
|  | ||||
| type rewrite func(*ast.File) *ast.File | ||||
|  | ||||
| // Mostly copied from gofmt | ||||
| func initRewrite(rewriteRule string) rewrite { | ||||
| 	f := strings.Split(rewriteRule, "->") | ||||
| 	if len(f) != 2 { | ||||
| 		fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") | ||||
| 		os.Exit(2) | ||||
| 	} | ||||
| 	pattern := parseExpr(f[0], "pattern") | ||||
| 	replace := parseExpr(f[1], "replacement") | ||||
| 	return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } | ||||
| } | ||||
|  | ||||
| // parseExpr parses s as an expression. | ||||
| // It might make sense to expand this to allow statement patterns, | ||||
| // but there are problems with preserving formatting and also | ||||
| // with what a wildcard for a statement looks like. | ||||
| func parseExpr(s, what string) ast.Expr { | ||||
| 	x, err := parser.ParseExpr(s) | ||||
| 	if err != nil { | ||||
| 		fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) | ||||
| 		os.Exit(2) | ||||
| 	} | ||||
| 	return x | ||||
| } | ||||
|  | ||||
| // Keep this function for debugging. | ||||
| /* | ||||
| func dump(msg string, val reflect.Value) { | ||||
| 	fmt.Printf("%s:\n", msg) | ||||
| 	ast.Print(fileSet, val.Interface()) | ||||
| 	fmt.Println() | ||||
| } | ||||
| */ | ||||
|  | ||||
| // rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. | ||||
| func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { | ||||
| 	cmap := ast.NewCommentMap(fileSet, p, p.Comments) | ||||
| 	m := make(map[string]reflect.Value) | ||||
| 	pat := reflect.ValueOf(pattern) | ||||
| 	repl := reflect.ValueOf(replace) | ||||
|  | ||||
| 	var rewriteVal func(val reflect.Value) reflect.Value | ||||
| 	rewriteVal = func(val reflect.Value) reflect.Value { | ||||
| 		// don't bother if val is invalid to start with | ||||
| 		if !val.IsValid() { | ||||
| 			return reflect.Value{} | ||||
| 		} | ||||
| 		for k := range m { | ||||
| 			delete(m, k) | ||||
| 		} | ||||
| 		val = apply(rewriteVal, val) | ||||
| 		if match(m, pat, val) { | ||||
| 			val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) | ||||
| 		} | ||||
| 		return val | ||||
| 	} | ||||
|  | ||||
| 	r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) | ||||
| 	r.Comments = cmap.Filter(r).Comments() // recreate comments list | ||||
| 	return r | ||||
| } | ||||
|  | ||||
| // set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. | ||||
| func set(x, y reflect.Value) { | ||||
| 	// don't bother if x cannot be set or y is invalid | ||||
| 	if !x.CanSet() || !y.IsValid() { | ||||
| 		return | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		if x := recover(); x != nil { | ||||
| 			if s, ok := x.(string); ok && | ||||
| 				(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { | ||||
| 				// x cannot be set to y - ignore this rewrite | ||||
| 				return | ||||
| 			} | ||||
| 			panic(x) | ||||
| 		} | ||||
| 	}() | ||||
| 	x.Set(y) | ||||
| } | ||||
|  | ||||
| // Values/types for special cases. | ||||
| var ( | ||||
| 	objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) | ||||
| 	scopePtrNil  = reflect.ValueOf((*ast.Scope)(nil)) | ||||
|  | ||||
| 	identType     = reflect.TypeOf((*ast.Ident)(nil)) | ||||
| 	objectPtrType = reflect.TypeOf((*ast.Object)(nil)) | ||||
| 	positionType  = reflect.TypeOf(token.NoPos) | ||||
| 	callExprType  = reflect.TypeOf((*ast.CallExpr)(nil)) | ||||
| 	scopePtrType  = reflect.TypeOf((*ast.Scope)(nil)) | ||||
| ) | ||||
|  | ||||
| // apply replaces each AST field x in val with f(x), returning val. | ||||
| // To avoid extra conversions, f operates on the reflect.Value form. | ||||
| func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { | ||||
| 	if !val.IsValid() { | ||||
| 		return reflect.Value{} | ||||
| 	} | ||||
|  | ||||
| 	// *ast.Objects introduce cycles and are likely incorrect after | ||||
| 	// rewrite; don't follow them but replace with nil instead | ||||
| 	if val.Type() == objectPtrType { | ||||
| 		return objectPtrNil | ||||
| 	} | ||||
|  | ||||
| 	// similarly for scopes: they are likely incorrect after a rewrite; | ||||
| 	// replace them with nil | ||||
| 	if val.Type() == scopePtrType { | ||||
| 		return scopePtrNil | ||||
| 	} | ||||
|  | ||||
| 	switch v := reflect.Indirect(val); v.Kind() { | ||||
| 	case reflect.Slice: | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			e := v.Index(i) | ||||
| 			set(e, f(e)) | ||||
| 		} | ||||
| 	case reflect.Struct: | ||||
| 		for i := 0; i < v.NumField(); i++ { | ||||
| 			e := v.Field(i) | ||||
| 			set(e, f(e)) | ||||
| 		} | ||||
| 	case reflect.Interface: | ||||
| 		e := v.Elem() | ||||
| 		set(v, f(e)) | ||||
| 	} | ||||
| 	return val | ||||
| } | ||||
|  | ||||
| func isWildcard(s string) bool { | ||||
| 	rune, size := utf8.DecodeRuneInString(s) | ||||
| 	return size == len(s) && unicode.IsLower(rune) | ||||
| } | ||||
|  | ||||
| // match returns true if pattern matches val, | ||||
| // recording wildcard submatches in m. | ||||
| // If m == nil, match checks whether pattern == val. | ||||
| func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { | ||||
| 	// Wildcard matches any expression.  If it appears multiple | ||||
| 	// times in the pattern, it must match the same expression | ||||
| 	// each time. | ||||
| 	if m != nil && pattern.IsValid() && pattern.Type() == identType { | ||||
| 		name := pattern.Interface().(*ast.Ident).Name | ||||
| 		if isWildcard(name) && val.IsValid() { | ||||
| 			// wildcards only match valid (non-nil) expressions. | ||||
| 			if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { | ||||
| 				if old, ok := m[name]; ok { | ||||
| 					return match(nil, old, val) | ||||
| 				} | ||||
| 				m[name] = val | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Otherwise, pattern and val must match recursively. | ||||
| 	if !pattern.IsValid() || !val.IsValid() { | ||||
| 		return !pattern.IsValid() && !val.IsValid() | ||||
| 	} | ||||
| 	if pattern.Type() != val.Type() { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	// Special cases. | ||||
| 	switch pattern.Type() { | ||||
| 	case identType: | ||||
| 		// For identifiers, only the names need to match | ||||
| 		// (and none of the other *ast.Object information). | ||||
| 		// This is a common case, handle it all here instead | ||||
| 		// of recursing down any further via reflection. | ||||
| 		p := pattern.Interface().(*ast.Ident) | ||||
| 		v := val.Interface().(*ast.Ident) | ||||
| 		return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name | ||||
| 	case objectPtrType, positionType: | ||||
| 		// object pointers and token positions always match | ||||
| 		return true | ||||
| 	case callExprType: | ||||
| 		// For calls, the Ellipsis fields (token.Position) must | ||||
| 		// match since that is how f(x) and f(x...) are different. | ||||
| 		// Check them here but fall through for the remaining fields. | ||||
| 		p := pattern.Interface().(*ast.CallExpr) | ||||
| 		v := val.Interface().(*ast.CallExpr) | ||||
| 		if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	p := reflect.Indirect(pattern) | ||||
| 	v := reflect.Indirect(val) | ||||
| 	if !p.IsValid() || !v.IsValid() { | ||||
| 		return !p.IsValid() && !v.IsValid() | ||||
| 	} | ||||
|  | ||||
| 	switch p.Kind() { | ||||
| 	case reflect.Slice: | ||||
| 		if p.Len() != v.Len() { | ||||
| 			return false | ||||
| 		} | ||||
| 		for i := 0; i < p.Len(); i++ { | ||||
| 			if !match(m, p.Index(i), v.Index(i)) { | ||||
| 				return false | ||||
| 			} | ||||
| 		} | ||||
| 		return true | ||||
|  | ||||
| 	case reflect.Struct: | ||||
| 		for i := 0; i < p.NumField(); i++ { | ||||
| 			if !match(m, p.Field(i), v.Field(i)) { | ||||
| 				return false | ||||
| 			} | ||||
| 		} | ||||
| 		return true | ||||
|  | ||||
| 	case reflect.Interface: | ||||
| 		return match(m, p.Elem(), v.Elem()) | ||||
| 	} | ||||
|  | ||||
| 	// Handle token integers, etc. | ||||
| 	return p.Interface() == v.Interface() | ||||
| } | ||||
|  | ||||
| // subst returns a copy of pattern with values from m substituted in place | ||||
| // of wildcards and pos used as the position of tokens from the pattern. | ||||
| // if m == nil, subst returns a copy of pattern and doesn't change the line | ||||
| // number information. | ||||
| func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { | ||||
| 	if !pattern.IsValid() { | ||||
| 		return reflect.Value{} | ||||
| 	} | ||||
|  | ||||
| 	// Wildcard gets replaced with map value. | ||||
| 	if m != nil && pattern.Type() == identType { | ||||
| 		name := pattern.Interface().(*ast.Ident).Name | ||||
| 		if isWildcard(name) { | ||||
| 			if old, ok := m[name]; ok { | ||||
| 				return subst(nil, old, reflect.Value{}) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if pos.IsValid() && pattern.Type() == positionType { | ||||
| 		// use new position only if old position was valid in the first place | ||||
| 		if old := pattern.Interface().(token.Pos); !old.IsValid() { | ||||
| 			return pattern | ||||
| 		} | ||||
| 		return pos | ||||
| 	} | ||||
|  | ||||
| 	// Otherwise copy. | ||||
| 	switch p := pattern; p.Kind() { | ||||
| 	case reflect.Slice: | ||||
| 		v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) | ||||
| 		for i := 0; i < p.Len(); i++ { | ||||
| 			v.Index(i).Set(subst(m, p.Index(i), pos)) | ||||
| 		} | ||||
| 		return v | ||||
|  | ||||
| 	case reflect.Struct: | ||||
| 		v := reflect.New(p.Type()).Elem() | ||||
| 		for i := 0; i < p.NumField(); i++ { | ||||
| 			v.Field(i).Set(subst(m, p.Field(i), pos)) | ||||
| 		} | ||||
| 		return v | ||||
|  | ||||
| 	case reflect.Ptr: | ||||
| 		v := reflect.New(p.Type()).Elem() | ||||
| 		if elem := p.Elem(); elem.IsValid() { | ||||
| 			v.Set(subst(m, elem, pos).Addr()) | ||||
| 		} | ||||
| 		return v | ||||
|  | ||||
| 	case reflect.Interface: | ||||
| 		v := reflect.New(p.Type()).Elem() | ||||
| 		if elem := p.Elem(); elem.IsValid() { | ||||
| 			v.Set(subst(m, elem, pos)) | ||||
| 		} | ||||
| 		return v | ||||
| 	} | ||||
|  | ||||
| 	return pattern | ||||
| } | ||||
							
								
								
									
										202
									
								
								vendor/github.com/pquerna/ffjson/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								vendor/github.com/pquerna/ffjson/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,202 @@ | ||||
|  | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										8
									
								
								vendor/github.com/pquerna/ffjson/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/pquerna/ffjson/NOTICE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,8 @@ | ||||
| ffjson | ||||
| Copyright (c) 2014, Paul Querna | ||||
|  | ||||
| This product includes software developed by  | ||||
| Paul Querna (http://paul.querna.org/). | ||||
|  | ||||
| Portions of this software were developed as | ||||
| part of Go, Copyright (c) 2012 The Go Authors. | ||||
							
								
								
									
										92
									
								
								vendor/github.com/pquerna/ffjson/ffjson/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								vendor/github.com/pquerna/ffjson/ffjson/decoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,92 @@ | ||||
| package ffjson | ||||
|  | ||||
| /** | ||||
|  *  Copyright 2015 Paul Querna, Klaus Post | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	fflib "github.com/pquerna/ffjson/fflib/v1" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"reflect" | ||||
| ) | ||||
|  | ||||
| // This is a reusable decoder. | ||||
| // This should not be used by more than one goroutine at the time. | ||||
| type Decoder struct { | ||||
| 	fs *fflib.FFLexer | ||||
| } | ||||
|  | ||||
| // NewDecoder returns a reusable Decoder. | ||||
| func NewDecoder() *Decoder { | ||||
| 	return &Decoder{} | ||||
| } | ||||
|  | ||||
| // Decode the data in the supplied data slice. | ||||
| func (d *Decoder) Decode(data []byte, v interface{}) error { | ||||
| 	f, ok := v.(unmarshalFaster) | ||||
| 	if ok { | ||||
| 		if d.fs == nil { | ||||
| 			d.fs = fflib.NewFFLexer(data) | ||||
| 		} else { | ||||
| 			d.fs.Reset(data) | ||||
| 		} | ||||
| 		return f.UnmarshalJSONFFLexer(d.fs, fflib.FFParse_map_start) | ||||
| 	} | ||||
|  | ||||
| 	um, ok := v.(json.Unmarshaler) | ||||
| 	if ok { | ||||
| 		return um.UnmarshalJSON(data) | ||||
| 	} | ||||
| 	return json.Unmarshal(data, v) | ||||
| } | ||||
|  | ||||
| // Decode the data from the supplied reader. | ||||
| // You should expect that data is read into memory before it is decoded. | ||||
| func (d *Decoder) DecodeReader(r io.Reader, v interface{}) error { | ||||
| 	_, ok := v.(unmarshalFaster) | ||||
| 	_, ok2 := v.(json.Unmarshaler) | ||||
| 	if ok || ok2 { | ||||
| 		data, err := ioutil.ReadAll(r) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		defer fflib.Pool(data) | ||||
| 		return d.Decode(data, v) | ||||
| 	} | ||||
| 	dec := json.NewDecoder(r) | ||||
| 	return dec.Decode(v) | ||||
| } | ||||
|  | ||||
| // DecodeFast will unmarshal the data if fast unmarshal is available. | ||||
| // This function can be used if you want to be sure the fast | ||||
| // unmarshal is used or in testing. | ||||
| // If you would like to have fallback to encoding/json you can use the | ||||
| // regular Decode() method. | ||||
| func (d *Decoder) DecodeFast(data []byte, v interface{}) error { | ||||
| 	f, ok := v.(unmarshalFaster) | ||||
| 	if !ok { | ||||
| 		return errors.New("ffjson unmarshal not available for type " + reflect.TypeOf(v).String()) | ||||
| 	} | ||||
| 	if d.fs == nil { | ||||
| 		d.fs = fflib.NewFFLexer(data) | ||||
| 	} else { | ||||
| 		d.fs.Reset(data) | ||||
| 	} | ||||
| 	return f.UnmarshalJSONFFLexer(d.fs, fflib.FFParse_map_start) | ||||
| } | ||||
							
								
								
									
										85
									
								
								vendor/github.com/pquerna/ffjson/ffjson/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								vendor/github.com/pquerna/ffjson/ffjson/encoder.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | ||||
| package ffjson | ||||
|  | ||||
| /** | ||||
|  *  Copyright 2015 Paul Querna, Klaus Post | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	fflib "github.com/pquerna/ffjson/fflib/v1" | ||||
| 	"io" | ||||
| 	"reflect" | ||||
| ) | ||||
|  | ||||
| // This is a reusable encoder. | ||||
| // It allows to encode many objects to a single writer. | ||||
| // This should not be used by more than one goroutine at the time. | ||||
| type Encoder struct { | ||||
| 	buf fflib.Buffer | ||||
| 	w   io.Writer | ||||
| 	enc *json.Encoder | ||||
| } | ||||
|  | ||||
| // SetEscapeHTML specifies whether problematic HTML characters | ||||
| // should be escaped inside JSON quoted strings. | ||||
| // The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e | ||||
| // to avoid certain safety problems that can arise when embedding JSON in HTML. | ||||
| // | ||||
| // In non-HTML settings where the escaping interferes with the readability | ||||
| // of the output, SetEscapeHTML(false) disables this behavior. | ||||
| func (enc *Encoder) SetEscapeHTML(on bool) { | ||||
| 	enc.enc.SetEscapeHTML(on) | ||||
| } | ||||
|  | ||||
| // NewEncoder returns a reusable Encoder. | ||||
| // Output will be written to the supplied writer. | ||||
| func NewEncoder(w io.Writer) *Encoder { | ||||
| 	return &Encoder{w: w, enc: json.NewEncoder(w)} | ||||
| } | ||||
|  | ||||
| // Encode the data in the supplied value to the stream | ||||
| // given on creation. | ||||
| // When the function returns the output has been | ||||
| // written to the stream. | ||||
| func (e *Encoder) Encode(v interface{}) error { | ||||
| 	f, ok := v.(marshalerFaster) | ||||
| 	if ok { | ||||
| 		e.buf.Reset() | ||||
| 		err := f.MarshalJSONBuf(&e.buf) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		_, err = io.Copy(e.w, &e.buf) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return e.enc.Encode(v) | ||||
| } | ||||
|  | ||||
| // EncodeFast will unmarshal the data if fast marshall is available. | ||||
| // This function can be used if you want to be sure the fast | ||||
| // marshal is used or in testing. | ||||
| // If you would like to have fallback to encoding/json you can use the | ||||
| // regular Encode() method. | ||||
| func (e *Encoder) EncodeFast(v interface{}) error { | ||||
| 	_, ok := v.(marshalerFaster) | ||||
| 	if !ok { | ||||
| 		return errors.New("ffjson marshal not available for type " + reflect.TypeOf(v).String()) | ||||
| 	} | ||||
| 	return e.Encode(v) | ||||
| } | ||||
							
								
								
									
										109
									
								
								vendor/github.com/pquerna/ffjson/ffjson/marshal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								vendor/github.com/pquerna/ffjson/ffjson/marshal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | ||||
| package ffjson | ||||
|  | ||||
| /** | ||||
|  *  Copyright 2015 Paul Querna, Klaus Post | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	fflib "github.com/pquerna/ffjson/fflib/v1" | ||||
| 	"reflect" | ||||
| ) | ||||
|  | ||||
| type marshalerFaster interface { | ||||
| 	MarshalJSONBuf(buf fflib.EncodingBuffer) error | ||||
| } | ||||
|  | ||||
| type unmarshalFaster interface { | ||||
| 	UnmarshalJSONFFLexer(l *fflib.FFLexer, state fflib.FFParseState) error | ||||
| } | ||||
|  | ||||
| // Marshal will act the same way as json.Marshal, except | ||||
| // it will choose the ffjson marshal function before falling | ||||
| // back to using json.Marshal. | ||||
| // Using this function will bypass the internal copying and parsing | ||||
| // the json library normally does, which greatly speeds up encoding time. | ||||
| // It is ok to call this function even if no ffjson code has been | ||||
| // generated for the data type you pass in the interface. | ||||
| func Marshal(v interface{}) ([]byte, error) { | ||||
| 	f, ok := v.(marshalerFaster) | ||||
| 	if ok { | ||||
| 		buf := fflib.Buffer{} | ||||
| 		err := f.MarshalJSONBuf(&buf) | ||||
| 		b := buf.Bytes() | ||||
| 		if err != nil { | ||||
| 			if len(b) > 0 { | ||||
| 				Pool(b) | ||||
| 			} | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return b, nil | ||||
| 	} | ||||
|  | ||||
| 	j, ok := v.(json.Marshaler) | ||||
| 	if ok { | ||||
| 		return j.MarshalJSON() | ||||
| 	} | ||||
| 	return json.Marshal(v) | ||||
| } | ||||
|  | ||||
| // MarshalFast will marshal the data if fast marshal is available. | ||||
| // This function can be used if you want to be sure the fast | ||||
| // marshal is used or in testing. | ||||
| // If you would like to have fallback to encoding/json you can use the | ||||
| // Marshal() method. | ||||
| func MarshalFast(v interface{}) ([]byte, error) { | ||||
| 	_, ok := v.(marshalerFaster) | ||||
| 	if !ok { | ||||
| 		return nil, errors.New("ffjson marshal not available for type " + reflect.TypeOf(v).String()) | ||||
| 	} | ||||
| 	return Marshal(v) | ||||
| } | ||||
|  | ||||
| // Unmarshal will act the same way as json.Unmarshal, except | ||||
| // it will choose the ffjson unmarshal function before falling | ||||
| // back to using json.Unmarshal. | ||||
| // The overhead of unmarshal is lower than on Marshal, | ||||
| // however this should still provide a speedup for your encoding. | ||||
| // It is ok to call this function even if no ffjson code has been | ||||
| // generated for the data type you pass in the interface. | ||||
| func Unmarshal(data []byte, v interface{}) error { | ||||
| 	f, ok := v.(unmarshalFaster) | ||||
| 	if ok { | ||||
| 		fs := fflib.NewFFLexer(data) | ||||
| 		return f.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) | ||||
| 	} | ||||
|  | ||||
| 	j, ok := v.(json.Unmarshaler) | ||||
| 	if ok { | ||||
| 		return j.UnmarshalJSON(data) | ||||
| 	} | ||||
| 	return json.Unmarshal(data, v) | ||||
| } | ||||
|  | ||||
| // UnmarshalFast will unmarshal the data if fast marshall is available. | ||||
| // This function can be used if you want to be sure the fast | ||||
| // unmarshal is used or in testing. | ||||
| // If you would like to have fallback to encoding/json you can use the | ||||
| // Unmarshal() method. | ||||
| func UnmarshalFast(data []byte, v interface{}) error { | ||||
| 	_, ok := v.(unmarshalFaster) | ||||
| 	if !ok { | ||||
| 		return errors.New("ffjson unmarshal not available for type " + reflect.TypeOf(v).String()) | ||||
| 	} | ||||
| 	return Unmarshal(data, v) | ||||
| } | ||||
							
								
								
									
										33
									
								
								vendor/github.com/pquerna/ffjson/ffjson/pool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								vendor/github.com/pquerna/ffjson/ffjson/pool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | ||||
| package ffjson | ||||
|  | ||||
| /** | ||||
|  *  Copyright 2015 Paul Querna, Klaus Post | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| import ( | ||||
| 	fflib "github.com/pquerna/ffjson/fflib/v1" | ||||
| ) | ||||
|  | ||||
| // Send a buffer to the Pool to reuse for other instances. | ||||
| // | ||||
| // On servers where you have a lot of concurrent encoding going on, | ||||
| // you can hand back the byte buffer you get marshalling once you are done using it. | ||||
| // | ||||
| // You may no longer utilize the content of the buffer, since it may be used | ||||
| // by other goroutines. | ||||
| func Pool(b []byte) { | ||||
| 	fflib.Pool(b) | ||||
| } | ||||
							
								
								
									
										421
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										421
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,421 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| // Simple byte buffer for marshaling data. | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| type grower interface { | ||||
| 	Grow(n int) | ||||
| } | ||||
|  | ||||
| type truncater interface { | ||||
| 	Truncate(n int) | ||||
| 	Reset() | ||||
| } | ||||
|  | ||||
| type bytesReader interface { | ||||
| 	Bytes() []byte | ||||
| 	String() string | ||||
| } | ||||
|  | ||||
| type runeWriter interface { | ||||
| 	WriteRune(r rune) (n int, err error) | ||||
| } | ||||
|  | ||||
| type stringWriter interface { | ||||
| 	WriteString(s string) (n int, err error) | ||||
| } | ||||
|  | ||||
| type lener interface { | ||||
| 	Len() int | ||||
| } | ||||
|  | ||||
| type rewinder interface { | ||||
| 	Rewind(n int) (err error) | ||||
| } | ||||
|  | ||||
| type encoder interface { | ||||
| 	Encode(interface{}) error | ||||
| } | ||||
|  | ||||
| // TODO(pquerna): continue to reduce these interfaces | ||||
|  | ||||
| type EncodingBuffer interface { | ||||
| 	io.Writer | ||||
| 	io.WriterTo | ||||
| 	io.ByteWriter | ||||
| 	stringWriter | ||||
| 	truncater | ||||
| 	grower | ||||
| 	rewinder | ||||
| 	encoder | ||||
| } | ||||
|  | ||||
| type DecodingBuffer interface { | ||||
| 	io.ReadWriter | ||||
| 	io.ByteWriter | ||||
| 	stringWriter | ||||
| 	runeWriter | ||||
| 	truncater | ||||
| 	grower | ||||
| 	bytesReader | ||||
| 	lener | ||||
| } | ||||
|  | ||||
| // A Buffer is a variable-sized buffer of bytes with Read and Write methods. | ||||
| // The zero value for Buffer is an empty buffer ready to use. | ||||
| type Buffer struct { | ||||
| 	buf              []byte            // contents are the bytes buf[off : len(buf)] | ||||
| 	off              int               // read at &buf[off], write at &buf[len(buf)] | ||||
| 	runeBytes        [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune | ||||
| 	encoder          *json.Encoder | ||||
| 	skipTrailingByte bool | ||||
| } | ||||
|  | ||||
| // ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. | ||||
| var ErrTooLarge = errors.New("fflib.v1.Buffer: too large") | ||||
|  | ||||
| // Bytes returns a slice of the contents of the unread portion of the buffer; | ||||
| // len(b.Bytes()) == b.Len().  If the caller changes the contents of the | ||||
| // returned slice, the contents of the buffer will change provided there | ||||
| // are no intervening method calls on the Buffer. | ||||
| func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } | ||||
|  | ||||
| // String returns the contents of the unread portion of the buffer | ||||
| // as a string.  If the Buffer is a nil pointer, it returns "<nil>". | ||||
| func (b *Buffer) String() string { | ||||
| 	if b == nil { | ||||
| 		// Special case, useful in debugging. | ||||
| 		return "<nil>" | ||||
| 	} | ||||
| 	return string(b.buf[b.off:]) | ||||
| } | ||||
|  | ||||
| // Len returns the number of bytes of the unread portion of the buffer; | ||||
| // b.Len() == len(b.Bytes()). | ||||
| func (b *Buffer) Len() int { return len(b.buf) - b.off } | ||||
|  | ||||
| // Truncate discards all but the first n unread bytes from the buffer. | ||||
| // It panics if n is negative or greater than the length of the buffer. | ||||
| func (b *Buffer) Truncate(n int) { | ||||
| 	if n == 0 { | ||||
| 		b.off = 0 | ||||
| 		b.buf = b.buf[0:0] | ||||
| 	} else { | ||||
| 		b.buf = b.buf[0 : b.off+n] | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Reset resets the buffer so it has no content. | ||||
| // b.Reset() is the same as b.Truncate(0). | ||||
| func (b *Buffer) Reset() { b.Truncate(0) } | ||||
|  | ||||
| // grow grows the buffer to guarantee space for n more bytes. | ||||
| // It returns the index where bytes should be written. | ||||
| // If the buffer can't grow it will panic with ErrTooLarge. | ||||
| func (b *Buffer) grow(n int) int { | ||||
| 	// If we have no buffer, get one from the pool | ||||
| 	m := b.Len() | ||||
| 	if m == 0 { | ||||
| 		if b.buf == nil { | ||||
| 			b.buf = makeSlice(2 * n) | ||||
| 			b.off = 0 | ||||
| 		} else if b.off != 0 { | ||||
| 			// If buffer is empty, reset to recover space. | ||||
| 			b.Truncate(0) | ||||
| 		} | ||||
| 	} | ||||
| 	if len(b.buf)+n > cap(b.buf) { | ||||
| 		var buf []byte | ||||
| 		if m+n <= cap(b.buf)/2 { | ||||
| 			// We can slide things down instead of allocating a new | ||||
| 			// slice. We only need m+n <= cap(b.buf) to slide, but | ||||
| 			// we instead let capacity get twice as large so we | ||||
| 			// don't spend all our time copying. | ||||
| 			copy(b.buf[:], b.buf[b.off:]) | ||||
| 			buf = b.buf[:m] | ||||
| 		} else { | ||||
| 			// not enough space anywhere | ||||
| 			buf = makeSlice(2*cap(b.buf) + n) | ||||
| 			copy(buf, b.buf[b.off:]) | ||||
| 			Pool(b.buf) | ||||
| 			b.buf = buf | ||||
| 		} | ||||
| 		b.off = 0 | ||||
| 	} | ||||
| 	b.buf = b.buf[0 : b.off+m+n] | ||||
| 	return b.off + m | ||||
| } | ||||
|  | ||||
| // Grow grows the buffer's capacity, if necessary, to guarantee space for | ||||
| // another n bytes. After Grow(n), at least n bytes can be written to the | ||||
| // buffer without another allocation. | ||||
| // If n is negative, Grow will panic. | ||||
| // If the buffer can't grow it will panic with ErrTooLarge. | ||||
| func (b *Buffer) Grow(n int) { | ||||
| 	if n < 0 { | ||||
| 		panic("bytes.Buffer.Grow: negative count") | ||||
| 	} | ||||
| 	m := b.grow(n) | ||||
| 	b.buf = b.buf[0:m] | ||||
| } | ||||
|  | ||||
| // Write appends the contents of p to the buffer, growing the buffer as | ||||
| // needed. The return value n is the length of p; err is always nil. If the | ||||
| // buffer becomes too large, Write will panic with ErrTooLarge. | ||||
| func (b *Buffer) Write(p []byte) (n int, err error) { | ||||
| 	if b.skipTrailingByte { | ||||
| 		p = p[:len(p)-1] | ||||
| 	} | ||||
| 	m := b.grow(len(p)) | ||||
| 	return copy(b.buf[m:], p), nil | ||||
| } | ||||
|  | ||||
| // WriteString appends the contents of s to the buffer, growing the buffer as | ||||
| // needed. The return value n is the length of s; err is always nil. If the | ||||
| // buffer becomes too large, WriteString will panic with ErrTooLarge. | ||||
| func (b *Buffer) WriteString(s string) (n int, err error) { | ||||
| 	m := b.grow(len(s)) | ||||
| 	return copy(b.buf[m:], s), nil | ||||
| } | ||||
|  | ||||
| // MinRead is the minimum slice size passed to a Read call by | ||||
| // Buffer.ReadFrom.  As long as the Buffer has at least MinRead bytes beyond | ||||
| // what is required to hold the contents of r, ReadFrom will not grow the | ||||
| // underlying buffer. | ||||
| const minRead = 512 | ||||
|  | ||||
| // ReadFrom reads data from r until EOF and appends it to the buffer, growing | ||||
| // the buffer as needed. The return value n is the number of bytes read. Any | ||||
| // error except io.EOF encountered during the read is also returned. If the | ||||
| // buffer becomes too large, ReadFrom will panic with ErrTooLarge. | ||||
| func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { | ||||
| 	// If buffer is empty, reset to recover space. | ||||
| 	if b.off >= len(b.buf) { | ||||
| 		b.Truncate(0) | ||||
| 	} | ||||
| 	for { | ||||
| 		if free := cap(b.buf) - len(b.buf); free < minRead { | ||||
| 			// not enough space at end | ||||
| 			newBuf := b.buf | ||||
| 			if b.off+free < minRead { | ||||
| 				// not enough space using beginning of buffer; | ||||
| 				// double buffer capacity | ||||
| 				newBuf = makeSlice(2*cap(b.buf) + minRead) | ||||
| 			} | ||||
| 			copy(newBuf, b.buf[b.off:]) | ||||
| 			Pool(b.buf) | ||||
| 			b.buf = newBuf[:len(b.buf)-b.off] | ||||
| 			b.off = 0 | ||||
| 		} | ||||
| 		m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) | ||||
| 		b.buf = b.buf[0 : len(b.buf)+m] | ||||
| 		n += int64(m) | ||||
| 		if e == io.EOF { | ||||
| 			break | ||||
| 		} | ||||
| 		if e != nil { | ||||
| 			return n, e | ||||
| 		} | ||||
| 	} | ||||
| 	return n, nil // err is EOF, so return nil explicitly | ||||
| } | ||||
|  | ||||
| // WriteTo writes data to w until the buffer is drained or an error occurs. | ||||
| // The return value n is the number of bytes written; it always fits into an | ||||
| // int, but it is int64 to match the io.WriterTo interface. Any error | ||||
| // encountered during the write is also returned. | ||||
| func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { | ||||
| 	if b.off < len(b.buf) { | ||||
| 		nBytes := b.Len() | ||||
| 		m, e := w.Write(b.buf[b.off:]) | ||||
| 		if m > nBytes { | ||||
| 			panic("bytes.Buffer.WriteTo: invalid Write count") | ||||
| 		} | ||||
| 		b.off += m | ||||
| 		n = int64(m) | ||||
| 		if e != nil { | ||||
| 			return n, e | ||||
| 		} | ||||
| 		// all bytes should have been written, by definition of | ||||
| 		// Write method in io.Writer | ||||
| 		if m != nBytes { | ||||
| 			return n, io.ErrShortWrite | ||||
| 		} | ||||
| 	} | ||||
| 	// Buffer is now empty; reset. | ||||
| 	b.Truncate(0) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // WriteByte appends the byte c to the buffer, growing the buffer as needed. | ||||
| // The returned error is always nil, but is included to match bufio.Writer's | ||||
| // WriteByte. If the buffer becomes too large, WriteByte will panic with | ||||
| // ErrTooLarge. | ||||
| func (b *Buffer) WriteByte(c byte) error { | ||||
| 	m := b.grow(1) | ||||
| 	b.buf[m] = c | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (b *Buffer) Rewind(n int) error { | ||||
| 	b.buf = b.buf[:len(b.buf)-n] | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (b *Buffer) Encode(v interface{}) error { | ||||
| 	if b.encoder == nil { | ||||
| 		b.encoder = json.NewEncoder(b) | ||||
| 	} | ||||
| 	b.skipTrailingByte = true | ||||
| 	err := b.encoder.Encode(v) | ||||
| 	b.skipTrailingByte = false | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // WriteRune appends the UTF-8 encoding of Unicode code point r to the | ||||
| // buffer, returning its length and an error, which is always nil but is | ||||
| // included to match bufio.Writer's WriteRune. The buffer is grown as needed; | ||||
| // if it becomes too large, WriteRune will panic with ErrTooLarge. | ||||
| func (b *Buffer) WriteRune(r rune) (n int, err error) { | ||||
| 	if r < utf8.RuneSelf { | ||||
| 		b.WriteByte(byte(r)) | ||||
| 		return 1, nil | ||||
| 	} | ||||
| 	n = utf8.EncodeRune(b.runeBytes[0:], r) | ||||
| 	b.Write(b.runeBytes[0:n]) | ||||
| 	return n, nil | ||||
| } | ||||
|  | ||||
| // Read reads the next len(p) bytes from the buffer or until the buffer | ||||
| // is drained.  The return value n is the number of bytes read.  If the | ||||
| // buffer has no data to return, err is io.EOF (unless len(p) is zero); | ||||
| // otherwise it is nil. | ||||
| func (b *Buffer) Read(p []byte) (n int, err error) { | ||||
| 	if b.off >= len(b.buf) { | ||||
| 		// Buffer is empty, reset to recover space. | ||||
| 		b.Truncate(0) | ||||
| 		if len(p) == 0 { | ||||
| 			return | ||||
| 		} | ||||
| 		return 0, io.EOF | ||||
| 	} | ||||
| 	n = copy(p, b.buf[b.off:]) | ||||
| 	b.off += n | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Next returns a slice containing the next n bytes from the buffer, | ||||
| // advancing the buffer as if the bytes had been returned by Read. | ||||
| // If there are fewer than n bytes in the buffer, Next returns the entire buffer. | ||||
| // The slice is only valid until the next call to a read or write method. | ||||
| func (b *Buffer) Next(n int) []byte { | ||||
| 	m := b.Len() | ||||
| 	if n > m { | ||||
| 		n = m | ||||
| 	} | ||||
| 	data := b.buf[b.off : b.off+n] | ||||
| 	b.off += n | ||||
| 	return data | ||||
| } | ||||
|  | ||||
| // ReadByte reads and returns the next byte from the buffer. | ||||
| // If no byte is available, it returns error io.EOF. | ||||
| func (b *Buffer) ReadByte() (c byte, err error) { | ||||
| 	if b.off >= len(b.buf) { | ||||
| 		// Buffer is empty, reset to recover space. | ||||
| 		b.Truncate(0) | ||||
| 		return 0, io.EOF | ||||
| 	} | ||||
| 	c = b.buf[b.off] | ||||
| 	b.off++ | ||||
| 	return c, nil | ||||
| } | ||||
|  | ||||
| // ReadRune reads and returns the next UTF-8-encoded | ||||
| // Unicode code point from the buffer. | ||||
| // If no bytes are available, the error returned is io.EOF. | ||||
| // If the bytes are an erroneous UTF-8 encoding, it | ||||
| // consumes one byte and returns U+FFFD, 1. | ||||
| func (b *Buffer) ReadRune() (r rune, size int, err error) { | ||||
| 	if b.off >= len(b.buf) { | ||||
| 		// Buffer is empty, reset to recover space. | ||||
| 		b.Truncate(0) | ||||
| 		return 0, 0, io.EOF | ||||
| 	} | ||||
| 	c := b.buf[b.off] | ||||
| 	if c < utf8.RuneSelf { | ||||
| 		b.off++ | ||||
| 		return rune(c), 1, nil | ||||
| 	} | ||||
| 	r, n := utf8.DecodeRune(b.buf[b.off:]) | ||||
| 	b.off += n | ||||
| 	return r, n, nil | ||||
| } | ||||
|  | ||||
| // ReadBytes reads until the first occurrence of delim in the input, | ||||
| // returning a slice containing the data up to and including the delimiter. | ||||
| // If ReadBytes encounters an error before finding a delimiter, | ||||
| // it returns the data read before the error and the error itself (often io.EOF). | ||||
| // ReadBytes returns err != nil if and only if the returned data does not end in | ||||
| // delim. | ||||
| func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { | ||||
| 	slice, err := b.readSlice(delim) | ||||
| 	// return a copy of slice. The buffer's backing array may | ||||
| 	// be overwritten by later calls. | ||||
| 	line = append(line, slice...) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // readSlice is like ReadBytes but returns a reference to internal buffer data. | ||||
| func (b *Buffer) readSlice(delim byte) (line []byte, err error) { | ||||
| 	i := bytes.IndexByte(b.buf[b.off:], delim) | ||||
| 	end := b.off + i + 1 | ||||
| 	if i < 0 { | ||||
| 		end = len(b.buf) | ||||
| 		err = io.EOF | ||||
| 	} | ||||
| 	line = b.buf[b.off:end] | ||||
| 	b.off = end | ||||
| 	return line, err | ||||
| } | ||||
|  | ||||
| // ReadString reads until the first occurrence of delim in the input, | ||||
| // returning a string containing the data up to and including the delimiter. | ||||
| // If ReadString encounters an error before finding a delimiter, | ||||
| // it returns the data read before the error and the error itself (often io.EOF). | ||||
| // ReadString returns err != nil if and only if the returned data does not end | ||||
| // in delim. | ||||
| func (b *Buffer) ReadString(delim byte) (line string, err error) { | ||||
| 	slice, err := b.readSlice(delim) | ||||
| 	return string(slice), err | ||||
| } | ||||
|  | ||||
| // NewBuffer creates and initializes a new Buffer using buf as its initial | ||||
| // contents.  It is intended to prepare a Buffer to read existing data.  It | ||||
| // can also be used to size the internal buffer for writing. To do that, | ||||
| // buf should have the desired capacity but a length of zero. | ||||
| // | ||||
| // In most cases, new(Buffer) (or just declaring a Buffer variable) is | ||||
| // sufficient to initialize a Buffer. | ||||
| func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } | ||||
|  | ||||
| // NewBufferString creates and initializes a new Buffer using string s as its | ||||
| // initial contents. It is intended to prepare a buffer to read an existing | ||||
| // string. | ||||
| // | ||||
| // In most cases, new(Buffer) (or just declaring a Buffer variable) is | ||||
| // sufficient to initialize a Buffer. | ||||
| func NewBufferString(s string) *Buffer { | ||||
| 	return &Buffer{buf: []byte(s)} | ||||
| } | ||||
							
								
								
									
										11
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // +build !go1.3 | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| // Stub version of buffer_pool.go for Go 1.2, which doesn't have sync.Pool. | ||||
|  | ||||
| func Pool(b []byte) {} | ||||
|  | ||||
| func makeSlice(n int) []byte { | ||||
| 	return make([]byte, n) | ||||
| } | ||||
							
								
								
									
										105
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										105
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,105 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // +build go1.3 | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| // Allocation pools for Buffers. | ||||
|  | ||||
| import "sync" | ||||
|  | ||||
| var pools [14]sync.Pool | ||||
| var pool64 *sync.Pool | ||||
|  | ||||
| func init() { | ||||
| 	var i uint | ||||
| 	// TODO(pquerna): add science here around actual pool sizes. | ||||
| 	for i = 6; i < 20; i++ { | ||||
| 		n := 1 << i | ||||
| 		pools[poolNum(n)].New = func() interface{} { return make([]byte, 0, n) } | ||||
| 	} | ||||
| 	pool64 = &pools[0] | ||||
| } | ||||
|  | ||||
| // This returns the pool number that will give a buffer of | ||||
| // at least 'i' bytes. | ||||
| func poolNum(i int) int { | ||||
| 	// TODO(pquerna): convert to log2 w/ bsr asm instruction: | ||||
| 	// 	<https://groups.google.com/forum/#!topic/golang-nuts/uAb5J1_y7ns> | ||||
| 	if i <= 64 { | ||||
| 		return 0 | ||||
| 	} else if i <= 128 { | ||||
| 		return 1 | ||||
| 	} else if i <= 256 { | ||||
| 		return 2 | ||||
| 	} else if i <= 512 { | ||||
| 		return 3 | ||||
| 	} else if i <= 1024 { | ||||
| 		return 4 | ||||
| 	} else if i <= 2048 { | ||||
| 		return 5 | ||||
| 	} else if i <= 4096 { | ||||
| 		return 6 | ||||
| 	} else if i <= 8192 { | ||||
| 		return 7 | ||||
| 	} else if i <= 16384 { | ||||
| 		return 8 | ||||
| 	} else if i <= 32768 { | ||||
| 		return 9 | ||||
| 	} else if i <= 65536 { | ||||
| 		return 10 | ||||
| 	} else if i <= 131072 { | ||||
| 		return 11 | ||||
| 	} else if i <= 262144 { | ||||
| 		return 12 | ||||
| 	} else if i <= 524288 { | ||||
| 		return 13 | ||||
| 	} else { | ||||
| 		return -1 | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Send a buffer to the Pool to reuse for other instances. | ||||
| // You may no longer utilize the content of the buffer, since it may be used | ||||
| // by other goroutines. | ||||
| func Pool(b []byte) { | ||||
| 	if b == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	c := cap(b) | ||||
|  | ||||
| 	// Our smallest buffer is 64 bytes, so we discard smaller buffers. | ||||
| 	if c < 64 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// We need to put the incoming buffer into the NEXT buffer, | ||||
| 	// since a buffer guarantees AT LEAST the number of bytes available | ||||
| 	// that is the top of this buffer. | ||||
| 	// That is the reason for dividing the cap by 2, so it gets into the NEXT bucket. | ||||
| 	// We add 2 to avoid rounding down if size is exactly power of 2. | ||||
| 	pn := poolNum((c + 2) >> 1) | ||||
| 	if pn != -1 { | ||||
| 		pools[pn].Put(b[0:0]) | ||||
| 	} | ||||
| 	// if we didn't have a slot for this []byte, we just drop it and let the GC | ||||
| 	// take care of it. | ||||
| } | ||||
|  | ||||
| // makeSlice allocates a slice of size n -- it will attempt to use a pool'ed | ||||
| // instance whenever possible. | ||||
| func makeSlice(n int) []byte { | ||||
| 	if n <= 64 { | ||||
| 		return pool64.Get().([]byte)[0:n] | ||||
| 	} | ||||
|  | ||||
| 	pn := poolNum(n) | ||||
|  | ||||
| 	if pn != -1 { | ||||
| 		return pools[pn].Get().([]byte)[0:n] | ||||
| 	} else { | ||||
| 		return make([]byte, n) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										88
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,88 @@ | ||||
| /** | ||||
|  *  Copyright 2014 Paul Querna | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| /* Portions of this file are on Go stdlib's strconv/iota.go */ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| import ( | ||||
| 	"github.com/pquerna/ffjson/fflib/v1/internal" | ||||
| ) | ||||
|  | ||||
| func ParseFloat(s []byte, bitSize int) (f float64, err error) { | ||||
| 	return internal.ParseFloat(s, bitSize) | ||||
| } | ||||
|  | ||||
| // ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte | ||||
| func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { | ||||
| 	if len(s) == 1 { | ||||
| 		switch s[0] { | ||||
| 		case '0': | ||||
| 			return 0, nil | ||||
| 		case '1': | ||||
| 			return 1, nil | ||||
| 		case '2': | ||||
| 			return 2, nil | ||||
| 		case '3': | ||||
| 			return 3, nil | ||||
| 		case '4': | ||||
| 			return 4, nil | ||||
| 		case '5': | ||||
| 			return 5, nil | ||||
| 		case '6': | ||||
| 			return 6, nil | ||||
| 		case '7': | ||||
| 			return 7, nil | ||||
| 		case '8': | ||||
| 			return 8, nil | ||||
| 		case '9': | ||||
| 			return 9, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return internal.ParseUint(s, base, bitSize) | ||||
| } | ||||
|  | ||||
| func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { | ||||
| 	if len(s) == 1 { | ||||
| 		switch s[0] { | ||||
| 		case '0': | ||||
| 			return 0, nil | ||||
| 		case '1': | ||||
| 			return 1, nil | ||||
| 		case '2': | ||||
| 			return 2, nil | ||||
| 		case '3': | ||||
| 			return 3, nil | ||||
| 		case '4': | ||||
| 			return 4, nil | ||||
| 		case '5': | ||||
| 			return 5, nil | ||||
| 		case '6': | ||||
| 			return 6, nil | ||||
| 		case '7': | ||||
| 			return 7, nil | ||||
| 		case '8': | ||||
| 			return 8, nil | ||||
| 		case '9': | ||||
| 			return 9, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return internal.ParseInt(s, base, bitSize) | ||||
| } | ||||
							
								
								
									
										378
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										378
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,378 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Multiprecision decimal numbers. | ||||
| // For floating-point formatting only; not general purpose. | ||||
| // Only operations are assign and (binary) left/right shift. | ||||
| // Can do binary floating point in multiprecision decimal precisely | ||||
| // because 2 divides 10; cannot do decimal floating point | ||||
| // in multiprecision binary precisely. | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| type decimal struct { | ||||
| 	d     [800]byte // digits | ||||
| 	nd    int       // number of digits used | ||||
| 	dp    int       // decimal point | ||||
| 	neg   bool | ||||
| 	trunc bool // discarded nonzero digits beyond d[:nd] | ||||
| } | ||||
|  | ||||
| func (a *decimal) String() string { | ||||
| 	n := 10 + a.nd | ||||
| 	if a.dp > 0 { | ||||
| 		n += a.dp | ||||
| 	} | ||||
| 	if a.dp < 0 { | ||||
| 		n += -a.dp | ||||
| 	} | ||||
|  | ||||
| 	buf := make([]byte, n) | ||||
| 	w := 0 | ||||
| 	switch { | ||||
| 	case a.nd == 0: | ||||
| 		return "0" | ||||
|  | ||||
| 	case a.dp <= 0: | ||||
| 		// zeros fill space between decimal point and digits | ||||
| 		buf[w] = '0' | ||||
| 		w++ | ||||
| 		buf[w] = '.' | ||||
| 		w++ | ||||
| 		w += digitZero(buf[w : w+-a.dp]) | ||||
| 		w += copy(buf[w:], a.d[0:a.nd]) | ||||
|  | ||||
| 	case a.dp < a.nd: | ||||
| 		// decimal point in middle of digits | ||||
| 		w += copy(buf[w:], a.d[0:a.dp]) | ||||
| 		buf[w] = '.' | ||||
| 		w++ | ||||
| 		w += copy(buf[w:], a.d[a.dp:a.nd]) | ||||
|  | ||||
| 	default: | ||||
| 		// zeros fill space between digits and decimal point | ||||
| 		w += copy(buf[w:], a.d[0:a.nd]) | ||||
| 		w += digitZero(buf[w : w+a.dp-a.nd]) | ||||
| 	} | ||||
| 	return string(buf[0:w]) | ||||
| } | ||||
|  | ||||
| func digitZero(dst []byte) int { | ||||
| 	for i := range dst { | ||||
| 		dst[i] = '0' | ||||
| 	} | ||||
| 	return len(dst) | ||||
| } | ||||
|  | ||||
| // trim trailing zeros from number. | ||||
| // (They are meaningless; the decimal point is tracked | ||||
| // independent of the number of digits.) | ||||
| func trim(a *decimal) { | ||||
| 	for a.nd > 0 && a.d[a.nd-1] == '0' { | ||||
| 		a.nd-- | ||||
| 	} | ||||
| 	if a.nd == 0 { | ||||
| 		a.dp = 0 | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Assign v to a. | ||||
| func (a *decimal) Assign(v uint64) { | ||||
| 	var buf [24]byte | ||||
|  | ||||
| 	// Write reversed decimal in buf. | ||||
| 	n := 0 | ||||
| 	for v > 0 { | ||||
| 		v1 := v / 10 | ||||
| 		v -= 10 * v1 | ||||
| 		buf[n] = byte(v + '0') | ||||
| 		n++ | ||||
| 		v = v1 | ||||
| 	} | ||||
|  | ||||
| 	// Reverse again to produce forward decimal in a.d. | ||||
| 	a.nd = 0 | ||||
| 	for n--; n >= 0; n-- { | ||||
| 		a.d[a.nd] = buf[n] | ||||
| 		a.nd++ | ||||
| 	} | ||||
| 	a.dp = a.nd | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Maximum shift that we can do in one pass without overflow. | ||||
| // Signed int has 31 bits, and we have to be able to accommodate 9<<k. | ||||
| const maxShift = 27 | ||||
|  | ||||
| // Binary shift right (* 2) by k bits.  k <= maxShift to avoid overflow. | ||||
| func rightShift(a *decimal, k uint) { | ||||
| 	r := 0 // read pointer | ||||
| 	w := 0 // write pointer | ||||
|  | ||||
| 	// Pick up enough leading digits to cover first shift. | ||||
| 	n := 0 | ||||
| 	for ; n>>k == 0; r++ { | ||||
| 		if r >= a.nd { | ||||
| 			if n == 0 { | ||||
| 				// a == 0; shouldn't get here, but handle anyway. | ||||
| 				a.nd = 0 | ||||
| 				return | ||||
| 			} | ||||
| 			for n>>k == 0 { | ||||
| 				n = n * 10 | ||||
| 				r++ | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 		c := int(a.d[r]) | ||||
| 		n = n*10 + c - '0' | ||||
| 	} | ||||
| 	a.dp -= r - 1 | ||||
|  | ||||
| 	// Pick up a digit, put down a digit. | ||||
| 	for ; r < a.nd; r++ { | ||||
| 		c := int(a.d[r]) | ||||
| 		dig := n >> k | ||||
| 		n -= dig << k | ||||
| 		a.d[w] = byte(dig + '0') | ||||
| 		w++ | ||||
| 		n = n*10 + c - '0' | ||||
| 	} | ||||
|  | ||||
| 	// Put down extra digits. | ||||
| 	for n > 0 { | ||||
| 		dig := n >> k | ||||
| 		n -= dig << k | ||||
| 		if w < len(a.d) { | ||||
| 			a.d[w] = byte(dig + '0') | ||||
| 			w++ | ||||
| 		} else if dig > 0 { | ||||
| 			a.trunc = true | ||||
| 		} | ||||
| 		n = n * 10 | ||||
| 	} | ||||
|  | ||||
| 	a.nd = w | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Cheat sheet for left shift: table indexed by shift count giving | ||||
| // number of new digits that will be introduced by that shift. | ||||
| // | ||||
| // For example, leftcheats[4] = {2, "625"}.  That means that | ||||
| // if we are shifting by 4 (multiplying by 16), it will add 2 digits | ||||
| // when the string prefix is "625" through "999", and one fewer digit | ||||
| // if the string prefix is "000" through "624". | ||||
| // | ||||
| // Credit for this trick goes to Ken. | ||||
|  | ||||
| type leftCheat struct { | ||||
| 	delta  int    // number of new digits | ||||
| 	cutoff string //   minus one digit if original < a. | ||||
| } | ||||
|  | ||||
| var leftcheats = []leftCheat{ | ||||
| 	// Leading digits of 1/2^i = 5^i. | ||||
| 	// 5^23 is not an exact 64-bit floating point number, | ||||
| 	// so have to use bc for the math. | ||||
| 	/* | ||||
| 		seq 27 | sed 's/^/5^/' | bc | | ||||
| 		awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } | ||||
| 		{ | ||||
| 			log2 = log(2)/log(10) | ||||
| 			printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", | ||||
| 				int(log2*NR+1), $0, 2**NR) | ||||
| 		}' | ||||
| 	*/ | ||||
| 	{0, ""}, | ||||
| 	{1, "5"},                   // * 2 | ||||
| 	{1, "25"},                  // * 4 | ||||
| 	{1, "125"},                 // * 8 | ||||
| 	{2, "625"},                 // * 16 | ||||
| 	{2, "3125"},                // * 32 | ||||
| 	{2, "15625"},               // * 64 | ||||
| 	{3, "78125"},               // * 128 | ||||
| 	{3, "390625"},              // * 256 | ||||
| 	{3, "1953125"},             // * 512 | ||||
| 	{4, "9765625"},             // * 1024 | ||||
| 	{4, "48828125"},            // * 2048 | ||||
| 	{4, "244140625"},           // * 4096 | ||||
| 	{4, "1220703125"},          // * 8192 | ||||
| 	{5, "6103515625"},          // * 16384 | ||||
| 	{5, "30517578125"},         // * 32768 | ||||
| 	{5, "152587890625"},        // * 65536 | ||||
| 	{6, "762939453125"},        // * 131072 | ||||
| 	{6, "3814697265625"},       // * 262144 | ||||
| 	{6, "19073486328125"},      // * 524288 | ||||
| 	{7, "95367431640625"},      // * 1048576 | ||||
| 	{7, "476837158203125"},     // * 2097152 | ||||
| 	{7, "2384185791015625"},    // * 4194304 | ||||
| 	{7, "11920928955078125"},   // * 8388608 | ||||
| 	{8, "59604644775390625"},   // * 16777216 | ||||
| 	{8, "298023223876953125"},  // * 33554432 | ||||
| 	{8, "1490116119384765625"}, // * 67108864 | ||||
| 	{9, "7450580596923828125"}, // * 134217728 | ||||
| } | ||||
|  | ||||
| // Is the leading prefix of b lexicographically less than s? | ||||
| func prefixIsLessThan(b []byte, s string) bool { | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		if i >= len(b) { | ||||
| 			return true | ||||
| 		} | ||||
| 		if b[i] != s[i] { | ||||
| 			return b[i] < s[i] | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Binary shift left (/ 2) by k bits.  k <= maxShift to avoid overflow. | ||||
| func leftShift(a *decimal, k uint) { | ||||
| 	delta := leftcheats[k].delta | ||||
| 	if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { | ||||
| 		delta-- | ||||
| 	} | ||||
|  | ||||
| 	r := a.nd         // read index | ||||
| 	w := a.nd + delta // write index | ||||
| 	n := 0 | ||||
|  | ||||
| 	// Pick up a digit, put down a digit. | ||||
| 	for r--; r >= 0; r-- { | ||||
| 		n += (int(a.d[r]) - '0') << k | ||||
| 		quo := n / 10 | ||||
| 		rem := n - 10*quo | ||||
| 		w-- | ||||
| 		if w < len(a.d) { | ||||
| 			a.d[w] = byte(rem + '0') | ||||
| 		} else if rem != 0 { | ||||
| 			a.trunc = true | ||||
| 		} | ||||
| 		n = quo | ||||
| 	} | ||||
|  | ||||
| 	// Put down extra digits. | ||||
| 	for n > 0 { | ||||
| 		quo := n / 10 | ||||
| 		rem := n - 10*quo | ||||
| 		w-- | ||||
| 		if w < len(a.d) { | ||||
| 			a.d[w] = byte(rem + '0') | ||||
| 		} else if rem != 0 { | ||||
| 			a.trunc = true | ||||
| 		} | ||||
| 		n = quo | ||||
| 	} | ||||
|  | ||||
| 	a.nd += delta | ||||
| 	if a.nd >= len(a.d) { | ||||
| 		a.nd = len(a.d) | ||||
| 	} | ||||
| 	a.dp += delta | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Binary shift left (k > 0) or right (k < 0). | ||||
| func (a *decimal) Shift(k int) { | ||||
| 	switch { | ||||
| 	case a.nd == 0: | ||||
| 		// nothing to do: a == 0 | ||||
| 	case k > 0: | ||||
| 		for k > maxShift { | ||||
| 			leftShift(a, maxShift) | ||||
| 			k -= maxShift | ||||
| 		} | ||||
| 		leftShift(a, uint(k)) | ||||
| 	case k < 0: | ||||
| 		for k < -maxShift { | ||||
| 			rightShift(a, maxShift) | ||||
| 			k += maxShift | ||||
| 		} | ||||
| 		rightShift(a, uint(-k)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // If we chop a at nd digits, should we round up? | ||||
| func shouldRoundUp(a *decimal, nd int) bool { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return false | ||||
| 	} | ||||
| 	if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even | ||||
| 		// if we truncated, a little higher than what's recorded - always round up | ||||
| 		if a.trunc { | ||||
| 			return true | ||||
| 		} | ||||
| 		return nd > 0 && (a.d[nd-1]-'0')%2 != 0 | ||||
| 	} | ||||
| 	// not halfway - digit tells all | ||||
| 	return a.d[nd] >= '5' | ||||
| } | ||||
|  | ||||
| // Round a to nd digits (or fewer). | ||||
| // If nd is zero, it means we're rounding | ||||
| // just to the left of the digits, as in | ||||
| // 0.09 -> 0.1. | ||||
| func (a *decimal) Round(nd int) { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return | ||||
| 	} | ||||
| 	if shouldRoundUp(a, nd) { | ||||
| 		a.RoundUp(nd) | ||||
| 	} else { | ||||
| 		a.RoundDown(nd) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Round a down to nd digits (or fewer). | ||||
| func (a *decimal) RoundDown(nd int) { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return | ||||
| 	} | ||||
| 	a.nd = nd | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Round a up to nd digits (or fewer). | ||||
| func (a *decimal) RoundUp(nd int) { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// round up | ||||
| 	for i := nd - 1; i >= 0; i-- { | ||||
| 		c := a.d[i] | ||||
| 		if c < '9' { // can stop after this digit | ||||
| 			a.d[i]++ | ||||
| 			a.nd = i + 1 | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Number is all 9s. | ||||
| 	// Change to single 1 with adjusted decimal point. | ||||
| 	a.d[0] = '1' | ||||
| 	a.nd = 1 | ||||
| 	a.dp++ | ||||
| } | ||||
|  | ||||
| // Extract integer part, rounded appropriately. | ||||
| // No guarantees about overflow. | ||||
| func (a *decimal) RoundedInteger() uint64 { | ||||
| 	if a.dp > 20 { | ||||
| 		return 0xFFFFFFFFFFFFFFFF | ||||
| 	} | ||||
| 	var i int | ||||
| 	n := uint64(0) | ||||
| 	for i = 0; i < a.dp && i < a.nd; i++ { | ||||
| 		n = n*10 + uint64(a.d[i]-'0') | ||||
| 	} | ||||
| 	for ; i < a.dp; i++ { | ||||
| 		n *= 10 | ||||
| 	} | ||||
| 	if shouldRoundUp(a, a.dp) { | ||||
| 		n++ | ||||
| 	} | ||||
| 	return n | ||||
| } | ||||
							
								
								
									
										668
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										668
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,668 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| // An extFloat represents an extended floating-point number, with more | ||||
| // precision than a float64. It does not try to save bits: the | ||||
| // number represented by the structure is mant*(2^exp), with a negative | ||||
| // sign if neg is true. | ||||
| type extFloat struct { | ||||
| 	mant uint64 | ||||
| 	exp  int | ||||
| 	neg  bool | ||||
| } | ||||
|  | ||||
| // Powers of ten taken from double-conversion library. | ||||
| // http://code.google.com/p/double-conversion/ | ||||
| const ( | ||||
| 	firstPowerOfTen = -348 | ||||
| 	stepPowerOfTen  = 8 | ||||
| ) | ||||
|  | ||||
| var smallPowersOfTen = [...]extFloat{ | ||||
| 	{1 << 63, -63, false},        // 1 | ||||
| 	{0xa << 60, -60, false},      // 1e1 | ||||
| 	{0x64 << 57, -57, false},     // 1e2 | ||||
| 	{0x3e8 << 54, -54, false},    // 1e3 | ||||
| 	{0x2710 << 50, -50, false},   // 1e4 | ||||
| 	{0x186a0 << 47, -47, false},  // 1e5 | ||||
| 	{0xf4240 << 44, -44, false},  // 1e6 | ||||
| 	{0x989680 << 40, -40, false}, // 1e7 | ||||
| } | ||||
|  | ||||
| var powersOfTen = [...]extFloat{ | ||||
| 	{0xfa8fd5a0081c0288, -1220, false}, // 10^-348 | ||||
| 	{0xbaaee17fa23ebf76, -1193, false}, // 10^-340 | ||||
| 	{0x8b16fb203055ac76, -1166, false}, // 10^-332 | ||||
| 	{0xcf42894a5dce35ea, -1140, false}, // 10^-324 | ||||
| 	{0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 | ||||
| 	{0xe61acf033d1a45df, -1087, false}, // 10^-308 | ||||
| 	{0xab70fe17c79ac6ca, -1060, false}, // 10^-300 | ||||
| 	{0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 | ||||
| 	{0xbe5691ef416bd60c, -1007, false}, // 10^-284 | ||||
| 	{0x8dd01fad907ffc3c, -980, false},  // 10^-276 | ||||
| 	{0xd3515c2831559a83, -954, false},  // 10^-268 | ||||
| 	{0x9d71ac8fada6c9b5, -927, false},  // 10^-260 | ||||
| 	{0xea9c227723ee8bcb, -901, false},  // 10^-252 | ||||
| 	{0xaecc49914078536d, -874, false},  // 10^-244 | ||||
| 	{0x823c12795db6ce57, -847, false},  // 10^-236 | ||||
| 	{0xc21094364dfb5637, -821, false},  // 10^-228 | ||||
| 	{0x9096ea6f3848984f, -794, false},  // 10^-220 | ||||
| 	{0xd77485cb25823ac7, -768, false},  // 10^-212 | ||||
| 	{0xa086cfcd97bf97f4, -741, false},  // 10^-204 | ||||
| 	{0xef340a98172aace5, -715, false},  // 10^-196 | ||||
| 	{0xb23867fb2a35b28e, -688, false},  // 10^-188 | ||||
| 	{0x84c8d4dfd2c63f3b, -661, false},  // 10^-180 | ||||
| 	{0xc5dd44271ad3cdba, -635, false},  // 10^-172 | ||||
| 	{0x936b9fcebb25c996, -608, false},  // 10^-164 | ||||
| 	{0xdbac6c247d62a584, -582, false},  // 10^-156 | ||||
| 	{0xa3ab66580d5fdaf6, -555, false},  // 10^-148 | ||||
| 	{0xf3e2f893dec3f126, -529, false},  // 10^-140 | ||||
| 	{0xb5b5ada8aaff80b8, -502, false},  // 10^-132 | ||||
| 	{0x87625f056c7c4a8b, -475, false},  // 10^-124 | ||||
| 	{0xc9bcff6034c13053, -449, false},  // 10^-116 | ||||
| 	{0x964e858c91ba2655, -422, false},  // 10^-108 | ||||
| 	{0xdff9772470297ebd, -396, false},  // 10^-100 | ||||
| 	{0xa6dfbd9fb8e5b88f, -369, false},  // 10^-92 | ||||
| 	{0xf8a95fcf88747d94, -343, false},  // 10^-84 | ||||
| 	{0xb94470938fa89bcf, -316, false},  // 10^-76 | ||||
| 	{0x8a08f0f8bf0f156b, -289, false},  // 10^-68 | ||||
| 	{0xcdb02555653131b6, -263, false},  // 10^-60 | ||||
| 	{0x993fe2c6d07b7fac, -236, false},  // 10^-52 | ||||
| 	{0xe45c10c42a2b3b06, -210, false},  // 10^-44 | ||||
| 	{0xaa242499697392d3, -183, false},  // 10^-36 | ||||
| 	{0xfd87b5f28300ca0e, -157, false},  // 10^-28 | ||||
| 	{0xbce5086492111aeb, -130, false},  // 10^-20 | ||||
| 	{0x8cbccc096f5088cc, -103, false},  // 10^-12 | ||||
| 	{0xd1b71758e219652c, -77, false},   // 10^-4 | ||||
| 	{0x9c40000000000000, -50, false},   // 10^4 | ||||
| 	{0xe8d4a51000000000, -24, false},   // 10^12 | ||||
| 	{0xad78ebc5ac620000, 3, false},     // 10^20 | ||||
| 	{0x813f3978f8940984, 30, false},    // 10^28 | ||||
| 	{0xc097ce7bc90715b3, 56, false},    // 10^36 | ||||
| 	{0x8f7e32ce7bea5c70, 83, false},    // 10^44 | ||||
| 	{0xd5d238a4abe98068, 109, false},   // 10^52 | ||||
| 	{0x9f4f2726179a2245, 136, false},   // 10^60 | ||||
| 	{0xed63a231d4c4fb27, 162, false},   // 10^68 | ||||
| 	{0xb0de65388cc8ada8, 189, false},   // 10^76 | ||||
| 	{0x83c7088e1aab65db, 216, false},   // 10^84 | ||||
| 	{0xc45d1df942711d9a, 242, false},   // 10^92 | ||||
| 	{0x924d692ca61be758, 269, false},   // 10^100 | ||||
| 	{0xda01ee641a708dea, 295, false},   // 10^108 | ||||
| 	{0xa26da3999aef774a, 322, false},   // 10^116 | ||||
| 	{0xf209787bb47d6b85, 348, false},   // 10^124 | ||||
| 	{0xb454e4a179dd1877, 375, false},   // 10^132 | ||||
| 	{0x865b86925b9bc5c2, 402, false},   // 10^140 | ||||
| 	{0xc83553c5c8965d3d, 428, false},   // 10^148 | ||||
| 	{0x952ab45cfa97a0b3, 455, false},   // 10^156 | ||||
| 	{0xde469fbd99a05fe3, 481, false},   // 10^164 | ||||
| 	{0xa59bc234db398c25, 508, false},   // 10^172 | ||||
| 	{0xf6c69a72a3989f5c, 534, false},   // 10^180 | ||||
| 	{0xb7dcbf5354e9bece, 561, false},   // 10^188 | ||||
| 	{0x88fcf317f22241e2, 588, false},   // 10^196 | ||||
| 	{0xcc20ce9bd35c78a5, 614, false},   // 10^204 | ||||
| 	{0x98165af37b2153df, 641, false},   // 10^212 | ||||
| 	{0xe2a0b5dc971f303a, 667, false},   // 10^220 | ||||
| 	{0xa8d9d1535ce3b396, 694, false},   // 10^228 | ||||
| 	{0xfb9b7cd9a4a7443c, 720, false},   // 10^236 | ||||
| 	{0xbb764c4ca7a44410, 747, false},   // 10^244 | ||||
| 	{0x8bab8eefb6409c1a, 774, false},   // 10^252 | ||||
| 	{0xd01fef10a657842c, 800, false},   // 10^260 | ||||
| 	{0x9b10a4e5e9913129, 827, false},   // 10^268 | ||||
| 	{0xe7109bfba19c0c9d, 853, false},   // 10^276 | ||||
| 	{0xac2820d9623bf429, 880, false},   // 10^284 | ||||
| 	{0x80444b5e7aa7cf85, 907, false},   // 10^292 | ||||
| 	{0xbf21e44003acdd2d, 933, false},   // 10^300 | ||||
| 	{0x8e679c2f5e44ff8f, 960, false},   // 10^308 | ||||
| 	{0xd433179d9c8cb841, 986, false},   // 10^316 | ||||
| 	{0x9e19db92b4e31ba9, 1013, false},  // 10^324 | ||||
| 	{0xeb96bf6ebadf77d9, 1039, false},  // 10^332 | ||||
| 	{0xaf87023b9bf0ee6b, 1066, false},  // 10^340 | ||||
| } | ||||
|  | ||||
| // floatBits returns the bits of the float64 that best approximates | ||||
| // the extFloat passed as receiver. Overflow is set to true if | ||||
| // the resulting float64 is ±Inf. | ||||
| func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { | ||||
| 	f.Normalize() | ||||
|  | ||||
| 	exp := f.exp + 63 | ||||
|  | ||||
| 	// Exponent too small. | ||||
| 	if exp < flt.bias+1 { | ||||
| 		n := flt.bias + 1 - exp | ||||
| 		f.mant >>= uint(n) | ||||
| 		exp += n | ||||
| 	} | ||||
|  | ||||
| 	// Extract 1+flt.mantbits bits from the 64-bit mantissa. | ||||
| 	mant := f.mant >> (63 - flt.mantbits) | ||||
| 	if f.mant&(1<<(62-flt.mantbits)) != 0 { | ||||
| 		// Round up. | ||||
| 		mant += 1 | ||||
| 	} | ||||
|  | ||||
| 	// Rounding might have added a bit; shift down. | ||||
| 	if mant == 2<<flt.mantbits { | ||||
| 		mant >>= 1 | ||||
| 		exp++ | ||||
| 	} | ||||
|  | ||||
| 	// Infinities. | ||||
| 	if exp-flt.bias >= 1<<flt.expbits-1 { | ||||
| 		// ±Inf | ||||
| 		mant = 0 | ||||
| 		exp = 1<<flt.expbits - 1 + flt.bias | ||||
| 		overflow = true | ||||
| 	} else if mant&(1<<flt.mantbits) == 0 { | ||||
| 		// Denormalized? | ||||
| 		exp = flt.bias | ||||
| 	} | ||||
| 	// Assemble bits. | ||||
| 	bits = mant & (uint64(1)<<flt.mantbits - 1) | ||||
| 	bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits | ||||
| 	if f.neg { | ||||
| 		bits |= 1 << (flt.mantbits + flt.expbits) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // AssignComputeBounds sets f to the floating point value | ||||
| // defined by mant, exp and precision given by flt. It returns | ||||
| // lower, upper such that any number in the closed interval | ||||
| // [lower, upper] is converted back to the same floating point number. | ||||
| func (f *extFloat) AssignComputeBounds(mant uint64, exp int, neg bool, flt *floatInfo) (lower, upper extFloat) { | ||||
| 	f.mant = mant | ||||
| 	f.exp = exp - int(flt.mantbits) | ||||
| 	f.neg = neg | ||||
| 	if f.exp <= 0 && mant == (mant>>uint(-f.exp))<<uint(-f.exp) { | ||||
| 		// An exact integer | ||||
| 		f.mant >>= uint(-f.exp) | ||||
| 		f.exp = 0 | ||||
| 		return *f, *f | ||||
| 	} | ||||
| 	expBiased := exp - flt.bias | ||||
|  | ||||
| 	upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} | ||||
| 	if mant != 1<<flt.mantbits || expBiased == 1 { | ||||
| 		lower = extFloat{mant: 2*f.mant - 1, exp: f.exp - 1, neg: f.neg} | ||||
| 	} else { | ||||
| 		lower = extFloat{mant: 4*f.mant - 1, exp: f.exp - 2, neg: f.neg} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Normalize normalizes f so that the highest bit of the mantissa is | ||||
| // set, and returns the number by which the mantissa was left-shifted. | ||||
| func (f *extFloat) Normalize() (shift uint) { | ||||
| 	mant, exp := f.mant, f.exp | ||||
| 	if mant == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if mant>>(64-32) == 0 { | ||||
| 		mant <<= 32 | ||||
| 		exp -= 32 | ||||
| 	} | ||||
| 	if mant>>(64-16) == 0 { | ||||
| 		mant <<= 16 | ||||
| 		exp -= 16 | ||||
| 	} | ||||
| 	if mant>>(64-8) == 0 { | ||||
| 		mant <<= 8 | ||||
| 		exp -= 8 | ||||
| 	} | ||||
| 	if mant>>(64-4) == 0 { | ||||
| 		mant <<= 4 | ||||
| 		exp -= 4 | ||||
| 	} | ||||
| 	if mant>>(64-2) == 0 { | ||||
| 		mant <<= 2 | ||||
| 		exp -= 2 | ||||
| 	} | ||||
| 	if mant>>(64-1) == 0 { | ||||
| 		mant <<= 1 | ||||
| 		exp -= 1 | ||||
| 	} | ||||
| 	shift = uint(f.exp - exp) | ||||
| 	f.mant, f.exp = mant, exp | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Multiply sets f to the product f*g: the result is correctly rounded, | ||||
| // but not normalized. | ||||
| func (f *extFloat) Multiply(g extFloat) { | ||||
| 	fhi, flo := f.mant>>32, uint64(uint32(f.mant)) | ||||
| 	ghi, glo := g.mant>>32, uint64(uint32(g.mant)) | ||||
|  | ||||
| 	// Cross products. | ||||
| 	cross1 := fhi * glo | ||||
| 	cross2 := flo * ghi | ||||
|  | ||||
| 	// f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo | ||||
| 	f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) | ||||
| 	rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) | ||||
| 	// Round up. | ||||
| 	rem += (1 << 31) | ||||
|  | ||||
| 	f.mant += (rem >> 32) | ||||
| 	f.exp = f.exp + g.exp + 64 | ||||
| } | ||||
|  | ||||
| var uint64pow10 = [...]uint64{ | ||||
| 	1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, | ||||
| 	1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, | ||||
| } | ||||
|  | ||||
| // AssignDecimal sets f to an approximate value mantissa*10^exp. It | ||||
| // returns true if the value represented by f is guaranteed to be the | ||||
| // best approximation of d after being rounded to a float64 or | ||||
| // float32 depending on flt. | ||||
| func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { | ||||
| 	const uint64digits = 19 | ||||
| 	const errorscale = 8 | ||||
| 	errors := 0 // An upper bound for error, computed in errorscale*ulp. | ||||
| 	if trunc { | ||||
| 		// the decimal number was truncated. | ||||
| 		errors += errorscale / 2 | ||||
| 	} | ||||
|  | ||||
| 	f.mant = mantissa | ||||
| 	f.exp = 0 | ||||
| 	f.neg = neg | ||||
|  | ||||
| 	// Multiply by powers of ten. | ||||
| 	i := (exp10 - firstPowerOfTen) / stepPowerOfTen | ||||
| 	if exp10 < firstPowerOfTen || i >= len(powersOfTen) { | ||||
| 		return false | ||||
| 	} | ||||
| 	adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen | ||||
|  | ||||
| 	// We multiply by exp%step | ||||
| 	if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { | ||||
| 		// We can multiply the mantissa exactly. | ||||
| 		f.mant *= uint64pow10[adjExp] | ||||
| 		f.Normalize() | ||||
| 	} else { | ||||
| 		f.Normalize() | ||||
| 		f.Multiply(smallPowersOfTen[adjExp]) | ||||
| 		errors += errorscale / 2 | ||||
| 	} | ||||
|  | ||||
| 	// We multiply by 10 to the exp - exp%step. | ||||
| 	f.Multiply(powersOfTen[i]) | ||||
| 	if errors > 0 { | ||||
| 		errors += 1 | ||||
| 	} | ||||
| 	errors += errorscale / 2 | ||||
|  | ||||
| 	// Normalize | ||||
| 	shift := f.Normalize() | ||||
| 	errors <<= shift | ||||
|  | ||||
| 	// Now f is a good approximation of the decimal. | ||||
| 	// Check whether the error is too large: that is, if the mantissa | ||||
| 	// is perturbated by the error, the resulting float64 will change. | ||||
| 	// The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. | ||||
| 	// | ||||
| 	// In many cases the approximation will be good enough. | ||||
| 	denormalExp := flt.bias - 63 | ||||
| 	var extrabits uint | ||||
| 	if f.exp <= denormalExp { | ||||
| 		// f.mant * 2^f.exp is smaller than 2^(flt.bias+1). | ||||
| 		extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) | ||||
| 	} else { | ||||
| 		extrabits = uint(63 - flt.mantbits) | ||||
| 	} | ||||
|  | ||||
| 	halfway := uint64(1) << (extrabits - 1) | ||||
| 	mant_extra := f.mant & (1<<extrabits - 1) | ||||
|  | ||||
| 	// Do a signed comparison here! If the error estimate could make | ||||
| 	// the mantissa round differently for the conversion to double, | ||||
| 	// then we can't give a definite answer. | ||||
| 	if int64(halfway)-int64(errors) < int64(mant_extra) && | ||||
| 		int64(mant_extra) < int64(halfway)+int64(errors) { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Frexp10 is an analogue of math.Frexp for decimal powers. It scales | ||||
| // f by an approximate power of ten 10^-exp, and returns exp10, so | ||||
| // that f*10^exp10 has the same value as the old f, up to an ulp, | ||||
| // as well as the index of 10^-exp in the powersOfTen table. | ||||
| func (f *extFloat) frexp10() (exp10, index int) { | ||||
| 	// The constants expMin and expMax constrain the final value of the | ||||
| 	// binary exponent of f. We want a small integral part in the result | ||||
| 	// because finding digits of an integer requires divisions, whereas | ||||
| 	// digits of the fractional part can be found by repeatedly multiplying | ||||
| 	// by 10. | ||||
| 	const expMin = -60 | ||||
| 	const expMax = -32 | ||||
| 	// Find power of ten such that x * 10^n has a binary exponent | ||||
| 	// between expMin and expMax. | ||||
| 	approxExp10 := ((expMin+expMax)/2 - f.exp) * 28 / 93 // log(10)/log(2) is close to 93/28. | ||||
| 	i := (approxExp10 - firstPowerOfTen) / stepPowerOfTen | ||||
| Loop: | ||||
| 	for { | ||||
| 		exp := f.exp + powersOfTen[i].exp + 64 | ||||
| 		switch { | ||||
| 		case exp < expMin: | ||||
| 			i++ | ||||
| 		case exp > expMax: | ||||
| 			i-- | ||||
| 		default: | ||||
| 			break Loop | ||||
| 		} | ||||
| 	} | ||||
| 	// Apply the desired decimal shift on f. It will have exponent | ||||
| 	// in the desired range. This is multiplication by 10^-exp10. | ||||
| 	f.Multiply(powersOfTen[i]) | ||||
|  | ||||
| 	return -(firstPowerOfTen + i*stepPowerOfTen), i | ||||
| } | ||||
|  | ||||
| // frexp10Many applies a common shift by a power of ten to a, b, c. | ||||
| func frexp10Many(a, b, c *extFloat) (exp10 int) { | ||||
| 	exp10, i := c.frexp10() | ||||
| 	a.Multiply(powersOfTen[i]) | ||||
| 	b.Multiply(powersOfTen[i]) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // FixedDecimal stores in d the first n significant digits | ||||
| // of the decimal representation of f. It returns false | ||||
| // if it cannot be sure of the answer. | ||||
| func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { | ||||
| 	if f.mant == 0 { | ||||
| 		d.nd = 0 | ||||
| 		d.dp = 0 | ||||
| 		d.neg = f.neg | ||||
| 		return true | ||||
| 	} | ||||
| 	if n == 0 { | ||||
| 		panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") | ||||
| 	} | ||||
| 	// Multiply by an appropriate power of ten to have a reasonable | ||||
| 	// number to process. | ||||
| 	f.Normalize() | ||||
| 	exp10, _ := f.frexp10() | ||||
|  | ||||
| 	shift := uint(-f.exp) | ||||
| 	integer := uint32(f.mant >> shift) | ||||
| 	fraction := f.mant - (uint64(integer) << shift) | ||||
| 	ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. | ||||
|  | ||||
| 	// Write exactly n digits to d. | ||||
| 	needed := n        // how many digits are left to write. | ||||
| 	integerDigits := 0 // the number of decimal digits of integer. | ||||
| 	pow10 := uint64(1) // the power of ten by which f was scaled. | ||||
| 	for i, pow := 0, uint64(1); i < 20; i++ { | ||||
| 		if pow > uint64(integer) { | ||||
| 			integerDigits = i | ||||
| 			break | ||||
| 		} | ||||
| 		pow *= 10 | ||||
| 	} | ||||
| 	rest := integer | ||||
| 	if integerDigits > needed { | ||||
| 		// the integral part is already large, trim the last digits. | ||||
| 		pow10 = uint64pow10[integerDigits-needed] | ||||
| 		integer /= uint32(pow10) | ||||
| 		rest -= integer * uint32(pow10) | ||||
| 	} else { | ||||
| 		rest = 0 | ||||
| 	} | ||||
|  | ||||
| 	// Write the digits of integer: the digits of rest are omitted. | ||||
| 	var buf [32]byte | ||||
| 	pos := len(buf) | ||||
| 	for v := integer; v > 0; { | ||||
| 		v1 := v / 10 | ||||
| 		v -= 10 * v1 | ||||
| 		pos-- | ||||
| 		buf[pos] = byte(v + '0') | ||||
| 		v = v1 | ||||
| 	} | ||||
| 	for i := pos; i < len(buf); i++ { | ||||
| 		d.d[i-pos] = buf[i] | ||||
| 	} | ||||
| 	nd := len(buf) - pos | ||||
| 	d.nd = nd | ||||
| 	d.dp = integerDigits + exp10 | ||||
| 	needed -= nd | ||||
|  | ||||
| 	if needed > 0 { | ||||
| 		if rest != 0 || pow10 != 1 { | ||||
| 			panic("strconv: internal error, rest != 0 but needed > 0") | ||||
| 		} | ||||
| 		// Emit digits for the fractional part. Each time, 10*fraction | ||||
| 		// fits in a uint64 without overflow. | ||||
| 		for needed > 0 { | ||||
| 			fraction *= 10 | ||||
| 			ε *= 10 // the uncertainty scales as we multiply by ten. | ||||
| 			if 2*ε > 1<<shift { | ||||
| 				// the error is so large it could modify which digit to write, abort. | ||||
| 				return false | ||||
| 			} | ||||
| 			digit := fraction >> shift | ||||
| 			d.d[nd] = byte(digit + '0') | ||||
| 			fraction -= digit << shift | ||||
| 			nd++ | ||||
| 			needed-- | ||||
| 		} | ||||
| 		d.nd = nd | ||||
| 	} | ||||
|  | ||||
| 	// We have written a truncation of f (a numerator / 10^d.dp). The remaining part | ||||
| 	// can be interpreted as a small number (< 1) to be added to the last digit of the | ||||
| 	// numerator. | ||||
| 	// | ||||
| 	// If rest > 0, the amount is: | ||||
| 	//    (rest<<shift | fraction) / (pow10 << shift) | ||||
| 	//    fraction being known with a ±ε uncertainty. | ||||
| 	//    The fact that n > 0 guarantees that pow10 << shift does not overflow a uint64. | ||||
| 	// | ||||
| 	// If rest = 0, pow10 == 1 and the amount is | ||||
| 	//    fraction / (1 << shift) | ||||
| 	//    fraction being known with a ±ε uncertainty. | ||||
| 	// | ||||
| 	// We pass this information to the rounding routine for adjustment. | ||||
|  | ||||
| 	ok := adjustLastDigitFixed(d, uint64(rest)<<shift|fraction, pow10, shift, ε) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	// Trim trailing zeros. | ||||
| 	for i := d.nd - 1; i >= 0; i-- { | ||||
| 		if d.d[i] != '0' { | ||||
| 			d.nd = i + 1 | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // adjustLastDigitFixed assumes d contains the representation of the integral part | ||||
| // of some number, whose fractional part is num / (den << shift). The numerator | ||||
| // num is only known up to an uncertainty of size ε, assumed to be less than | ||||
| // (den << shift)/2. | ||||
| // | ||||
| // It will increase the last digit by one to account for correct rounding, typically | ||||
| // when the fractional part is greater than 1/2, and will return false if ε is such | ||||
| // that no correct answer can be given. | ||||
| func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { | ||||
| 	if num > den<<shift { | ||||
| 		panic("strconv: num > den<<shift in adjustLastDigitFixed") | ||||
| 	} | ||||
| 	if 2*ε > den<<shift { | ||||
| 		panic("strconv: ε > (den<<shift)/2") | ||||
| 	} | ||||
| 	if 2*(num+ε) < den<<shift { | ||||
| 		return true | ||||
| 	} | ||||
| 	if 2*(num-ε) > den<<shift { | ||||
| 		// increment d by 1. | ||||
| 		i := d.nd - 1 | ||||
| 		for ; i >= 0; i-- { | ||||
| 			if d.d[i] == '9' { | ||||
| 				d.nd-- | ||||
| 			} else { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if i < 0 { | ||||
| 			d.d[0] = '1' | ||||
| 			d.nd = 1 | ||||
| 			d.dp++ | ||||
| 		} else { | ||||
| 			d.d[i]++ | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // ShortestDecimal stores in d the shortest decimal representation of f | ||||
| // which belongs to the open interval (lower, upper), where f is supposed | ||||
| // to lie. It returns false whenever the result is unsure. The implementation | ||||
| // uses the Grisu3 algorithm. | ||||
| func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { | ||||
| 	if f.mant == 0 { | ||||
| 		d.nd = 0 | ||||
| 		d.dp = 0 | ||||
| 		d.neg = f.neg | ||||
| 		return true | ||||
| 	} | ||||
| 	if f.exp == 0 && *lower == *f && *lower == *upper { | ||||
| 		// an exact integer. | ||||
| 		var buf [24]byte | ||||
| 		n := len(buf) - 1 | ||||
| 		for v := f.mant; v > 0; { | ||||
| 			v1 := v / 10 | ||||
| 			v -= 10 * v1 | ||||
| 			buf[n] = byte(v + '0') | ||||
| 			n-- | ||||
| 			v = v1 | ||||
| 		} | ||||
| 		nd := len(buf) - n - 1 | ||||
| 		for i := 0; i < nd; i++ { | ||||
| 			d.d[i] = buf[n+1+i] | ||||
| 		} | ||||
| 		d.nd, d.dp = nd, nd | ||||
| 		for d.nd > 0 && d.d[d.nd-1] == '0' { | ||||
| 			d.nd-- | ||||
| 		} | ||||
| 		if d.nd == 0 { | ||||
| 			d.dp = 0 | ||||
| 		} | ||||
| 		d.neg = f.neg | ||||
| 		return true | ||||
| 	} | ||||
| 	upper.Normalize() | ||||
| 	// Uniformize exponents. | ||||
| 	if f.exp > upper.exp { | ||||
| 		f.mant <<= uint(f.exp - upper.exp) | ||||
| 		f.exp = upper.exp | ||||
| 	} | ||||
| 	if lower.exp > upper.exp { | ||||
| 		lower.mant <<= uint(lower.exp - upper.exp) | ||||
| 		lower.exp = upper.exp | ||||
| 	} | ||||
|  | ||||
| 	exp10 := frexp10Many(lower, f, upper) | ||||
| 	// Take a safety margin due to rounding in frexp10Many, but we lose precision. | ||||
| 	upper.mant++ | ||||
| 	lower.mant-- | ||||
|  | ||||
| 	// The shortest representation of f is either rounded up or down, but | ||||
| 	// in any case, it is a truncation of upper. | ||||
| 	shift := uint(-upper.exp) | ||||
| 	integer := uint32(upper.mant >> shift) | ||||
| 	fraction := upper.mant - (uint64(integer) << shift) | ||||
|  | ||||
| 	// How far we can go down from upper until the result is wrong. | ||||
| 	allowance := upper.mant - lower.mant | ||||
| 	// How far we should go to get a very precise result. | ||||
| 	targetDiff := upper.mant - f.mant | ||||
|  | ||||
| 	// Count integral digits: there are at most 10. | ||||
| 	var integerDigits int | ||||
| 	for i, pow := 0, uint64(1); i < 20; i++ { | ||||
| 		if pow > uint64(integer) { | ||||
| 			integerDigits = i | ||||
| 			break | ||||
| 		} | ||||
| 		pow *= 10 | ||||
| 	} | ||||
| 	for i := 0; i < integerDigits; i++ { | ||||
| 		pow := uint64pow10[integerDigits-i-1] | ||||
| 		digit := integer / uint32(pow) | ||||
| 		d.d[i] = byte(digit + '0') | ||||
| 		integer -= digit * uint32(pow) | ||||
| 		// evaluate whether we should stop. | ||||
| 		if currentDiff := uint64(integer)<<shift + fraction; currentDiff < allowance { | ||||
| 			d.nd = i + 1 | ||||
| 			d.dp = integerDigits + exp10 | ||||
| 			d.neg = f.neg | ||||
| 			// Sometimes allowance is so large the last digit might need to be | ||||
| 			// decremented to get closer to f. | ||||
| 			return adjustLastDigit(d, currentDiff, targetDiff, allowance, pow<<shift, 2) | ||||
| 		} | ||||
| 	} | ||||
| 	d.nd = integerDigits | ||||
| 	d.dp = d.nd + exp10 | ||||
| 	d.neg = f.neg | ||||
|  | ||||
| 	// Compute digits of the fractional part. At each step fraction does not | ||||
| 	// overflow. The choice of minExp implies that fraction is less than 2^60. | ||||
| 	var digit int | ||||
| 	multiplier := uint64(1) | ||||
| 	for { | ||||
| 		fraction *= 10 | ||||
| 		multiplier *= 10 | ||||
| 		digit = int(fraction >> shift) | ||||
| 		d.d[d.nd] = byte(digit + '0') | ||||
| 		d.nd++ | ||||
| 		fraction -= uint64(digit) << shift | ||||
| 		if fraction < allowance*multiplier { | ||||
| 			// We are in the admissible range. Note that if allowance is about to | ||||
| 			// overflow, that is, allowance > 2^64/10, the condition is automatically | ||||
| 			// true due to the limited range of fraction. | ||||
| 			return adjustLastDigit(d, | ||||
| 				fraction, targetDiff*multiplier, allowance*multiplier, | ||||
| 				1<<shift, multiplier*2) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // adjustLastDigit modifies d = x-currentDiff*ε, to get closest to | ||||
| // d = x-targetDiff*ε, without becoming smaller than x-maxDiff*ε. | ||||
| // It assumes that a decimal digit is worth ulpDecimal*ε, and that | ||||
| // all data is known with a error estimate of ulpBinary*ε. | ||||
| func adjustLastDigit(d *decimalSlice, currentDiff, targetDiff, maxDiff, ulpDecimal, ulpBinary uint64) bool { | ||||
| 	if ulpDecimal < 2*ulpBinary { | ||||
| 		// Approximation is too wide. | ||||
| 		return false | ||||
| 	} | ||||
| 	for currentDiff+ulpDecimal/2+ulpBinary < targetDiff { | ||||
| 		d.d[d.nd-1]-- | ||||
| 		currentDiff += ulpDecimal | ||||
| 	} | ||||
| 	if currentDiff+ulpDecimal <= targetDiff+ulpDecimal/2+ulpBinary { | ||||
| 		// we have two choices, and don't know what to do. | ||||
| 		return false | ||||
| 	} | ||||
| 	if currentDiff < ulpBinary || currentDiff > maxDiff-ulpBinary { | ||||
| 		// we went too far | ||||
| 		return false | ||||
| 	} | ||||
| 	if d.nd == 1 && d.d[0] == '0' { | ||||
| 		// the number has actually reached zero. | ||||
| 		d.nd = 0 | ||||
| 		d.dp = 0 | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
							
								
								
									
										121
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/fold.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/fold.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,121 @@ | ||||
| /** | ||||
|  *  Copyright 2014 Paul Querna | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| /* Portions of this file are on Go stdlib's encoding/json/fold.go */ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| import ( | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII. | ||||
| 	kelvin       = '\u212a' | ||||
| 	smallLongEss = '\u017f' | ||||
| ) | ||||
|  | ||||
| // equalFoldRight is a specialization of bytes.EqualFold when s is | ||||
| // known to be all ASCII (including punctuation), but contains an 's', | ||||
| // 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. | ||||
| // See comments on foldFunc. | ||||
| func EqualFoldRight(s, t []byte) bool { | ||||
| 	for _, sb := range s { | ||||
| 		if len(t) == 0 { | ||||
| 			return false | ||||
| 		} | ||||
| 		tb := t[0] | ||||
| 		if tb < utf8.RuneSelf { | ||||
| 			if sb != tb { | ||||
| 				sbUpper := sb & caseMask | ||||
| 				if 'A' <= sbUpper && sbUpper <= 'Z' { | ||||
| 					if sbUpper != tb&caseMask { | ||||
| 						return false | ||||
| 					} | ||||
| 				} else { | ||||
| 					return false | ||||
| 				} | ||||
| 			} | ||||
| 			t = t[1:] | ||||
| 			continue | ||||
| 		} | ||||
| 		// sb is ASCII and t is not. t must be either kelvin | ||||
| 		// sign or long s; sb must be s, S, k, or K. | ||||
| 		tr, size := utf8.DecodeRune(t) | ||||
| 		switch sb { | ||||
| 		case 's', 'S': | ||||
| 			if tr != smallLongEss { | ||||
| 				return false | ||||
| 			} | ||||
| 		case 'k', 'K': | ||||
| 			if tr != kelvin { | ||||
| 				return false | ||||
| 			} | ||||
| 		default: | ||||
| 			return false | ||||
| 		} | ||||
| 		t = t[size:] | ||||
|  | ||||
| 	} | ||||
| 	if len(t) > 0 { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // asciiEqualFold is a specialization of bytes.EqualFold for use when | ||||
| // s is all ASCII (but may contain non-letters) and contains no | ||||
| // special-folding letters. | ||||
| // See comments on foldFunc. | ||||
| func AsciiEqualFold(s, t []byte) bool { | ||||
| 	if len(s) != len(t) { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i, sb := range s { | ||||
| 		tb := t[i] | ||||
| 		if sb == tb { | ||||
| 			continue | ||||
| 		} | ||||
| 		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { | ||||
| 			if sb&caseMask != tb&caseMask { | ||||
| 				return false | ||||
| 			} | ||||
| 		} else { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // simpleLetterEqualFold is a specialization of bytes.EqualFold for | ||||
| // use when s is all ASCII letters (no underscores, etc) and also | ||||
| // doesn't contain 'k', 'K', 's', or 'S'. | ||||
| // See comments on foldFunc. | ||||
| func SimpleLetterEqualFold(s, t []byte) bool { | ||||
| 	if len(s) != len(t) { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i, b := range s { | ||||
| 		if b&caseMask != t[i]&caseMask { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
							
								
								
									
										542
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										542
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,542 @@ | ||||
| package v1 | ||||
|  | ||||
| /** | ||||
|  *  Copyright 2015 Paul Querna, Klaus Post | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| /* Most of this file are on Go stdlib's strconv/ftoa.go */ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| import "math" | ||||
|  | ||||
| // TODO: move elsewhere? | ||||
| type floatInfo struct { | ||||
| 	mantbits uint | ||||
| 	expbits  uint | ||||
| 	bias     int | ||||
| } | ||||
|  | ||||
| var optimize = true // can change for testing | ||||
|  | ||||
| var float32info = floatInfo{23, 8, -127} | ||||
| var float64info = floatInfo{52, 11, -1023} | ||||
|  | ||||
| // AppendFloat appends the string form of the floating-point number f, | ||||
| // as generated by FormatFloat | ||||
| func AppendFloat(dst EncodingBuffer, val float64, fmt byte, prec, bitSize int) { | ||||
| 	var bits uint64 | ||||
| 	var flt *floatInfo | ||||
| 	switch bitSize { | ||||
| 	case 32: | ||||
| 		bits = uint64(math.Float32bits(float32(val))) | ||||
| 		flt = &float32info | ||||
| 	case 64: | ||||
| 		bits = math.Float64bits(val) | ||||
| 		flt = &float64info | ||||
| 	default: | ||||
| 		panic("strconv: illegal AppendFloat/FormatFloat bitSize") | ||||
| 	} | ||||
|  | ||||
| 	neg := bits>>(flt.expbits+flt.mantbits) != 0 | ||||
| 	exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1) | ||||
| 	mant := bits & (uint64(1)<<flt.mantbits - 1) | ||||
|  | ||||
| 	switch exp { | ||||
| 	case 1<<flt.expbits - 1: | ||||
| 		// Inf, NaN | ||||
| 		var s string | ||||
| 		switch { | ||||
| 		case mant != 0: | ||||
| 			s = "NaN" | ||||
| 		case neg: | ||||
| 			s = "-Inf" | ||||
| 		default: | ||||
| 			s = "+Inf" | ||||
| 		} | ||||
| 		dst.WriteString(s) | ||||
| 		return | ||||
|  | ||||
| 	case 0: | ||||
| 		// denormalized | ||||
| 		exp++ | ||||
|  | ||||
| 	default: | ||||
| 		// add implicit top bit | ||||
| 		mant |= uint64(1) << flt.mantbits | ||||
| 	} | ||||
| 	exp += flt.bias | ||||
|  | ||||
| 	// Pick off easy binary format. | ||||
| 	if fmt == 'b' { | ||||
| 		fmtB(dst, neg, mant, exp, flt) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if !optimize { | ||||
| 		bigFtoa(dst, prec, fmt, neg, mant, exp, flt) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	var digs decimalSlice | ||||
| 	ok := false | ||||
| 	// Negative precision means "only as much as needed to be exact." | ||||
| 	shortest := prec < 0 | ||||
| 	if shortest { | ||||
| 		// Try Grisu3 algorithm. | ||||
| 		f := new(extFloat) | ||||
| 		lower, upper := f.AssignComputeBounds(mant, exp, neg, flt) | ||||
| 		var buf [32]byte | ||||
| 		digs.d = buf[:] | ||||
| 		ok = f.ShortestDecimal(&digs, &lower, &upper) | ||||
| 		if !ok { | ||||
| 			bigFtoa(dst, prec, fmt, neg, mant, exp, flt) | ||||
| 			return | ||||
| 		} | ||||
| 		// Precision for shortest representation mode. | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			prec = max(digs.nd-1, 0) | ||||
| 		case 'f': | ||||
| 			prec = max(digs.nd-digs.dp, 0) | ||||
| 		case 'g', 'G': | ||||
| 			prec = digs.nd | ||||
| 		} | ||||
| 	} else if fmt != 'f' { | ||||
| 		// Fixed number of digits. | ||||
| 		digits := prec | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			digits++ | ||||
| 		case 'g', 'G': | ||||
| 			if prec == 0 { | ||||
| 				prec = 1 | ||||
| 			} | ||||
| 			digits = prec | ||||
| 		} | ||||
| 		if digits <= 15 { | ||||
| 			// try fast algorithm when the number of digits is reasonable. | ||||
| 			var buf [24]byte | ||||
| 			digs.d = buf[:] | ||||
| 			f := extFloat{mant, exp - int(flt.mantbits), neg} | ||||
| 			ok = f.FixedDecimal(&digs, digits) | ||||
| 		} | ||||
| 	} | ||||
| 	if !ok { | ||||
| 		bigFtoa(dst, prec, fmt, neg, mant, exp, flt) | ||||
| 		return | ||||
| 	} | ||||
| 	formatDigits(dst, shortest, neg, digs, prec, fmt) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // bigFtoa uses multiprecision computations to format a float. | ||||
| func bigFtoa(dst EncodingBuffer, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) { | ||||
| 	d := new(decimal) | ||||
| 	d.Assign(mant) | ||||
| 	d.Shift(exp - int(flt.mantbits)) | ||||
| 	var digs decimalSlice | ||||
| 	shortest := prec < 0 | ||||
| 	if shortest { | ||||
| 		roundShortest(d, mant, exp, flt) | ||||
| 		digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp} | ||||
| 		// Precision for shortest representation mode. | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			prec = digs.nd - 1 | ||||
| 		case 'f': | ||||
| 			prec = max(digs.nd-digs.dp, 0) | ||||
| 		case 'g', 'G': | ||||
| 			prec = digs.nd | ||||
| 		} | ||||
| 	} else { | ||||
| 		// Round appropriately. | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			d.Round(prec + 1) | ||||
| 		case 'f': | ||||
| 			d.Round(d.dp + prec) | ||||
| 		case 'g', 'G': | ||||
| 			if prec == 0 { | ||||
| 				prec = 1 | ||||
| 			} | ||||
| 			d.Round(prec) | ||||
| 		} | ||||
| 		digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp} | ||||
| 	} | ||||
| 	formatDigits(dst, shortest, neg, digs, prec, fmt) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func formatDigits(dst EncodingBuffer, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) { | ||||
| 	switch fmt { | ||||
| 	case 'e', 'E': | ||||
| 		fmtE(dst, neg, digs, prec, fmt) | ||||
| 		return | ||||
| 	case 'f': | ||||
| 		fmtF(dst, neg, digs, prec) | ||||
| 		return | ||||
| 	case 'g', 'G': | ||||
| 		// trailing fractional zeros in 'e' form will be trimmed. | ||||
| 		eprec := prec | ||||
| 		if eprec > digs.nd && digs.nd >= digs.dp { | ||||
| 			eprec = digs.nd | ||||
| 		} | ||||
| 		// %e is used if the exponent from the conversion | ||||
| 		// is less than -4 or greater than or equal to the precision. | ||||
| 		// if precision was the shortest possible, use precision 6 for this decision. | ||||
| 		if shortest { | ||||
| 			eprec = 6 | ||||
| 		} | ||||
| 		exp := digs.dp - 1 | ||||
| 		if exp < -4 || exp >= eprec { | ||||
| 			if prec > digs.nd { | ||||
| 				prec = digs.nd | ||||
| 			} | ||||
| 			fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') | ||||
| 			return | ||||
| 		} | ||||
| 		if prec > digs.dp { | ||||
| 			prec = digs.nd | ||||
| 		} | ||||
| 		fmtF(dst, neg, digs, max(prec-digs.dp, 0)) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// unknown format | ||||
| 	dst.Write([]byte{'%', fmt}) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Round d (= mant * 2^exp) to the shortest number of digits | ||||
| // that will let the original floating point value be precisely | ||||
| // reconstructed.  Size is original floating point size (64 or 32). | ||||
| func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { | ||||
| 	// If mantissa is zero, the number is zero; stop now. | ||||
| 	if mant == 0 { | ||||
| 		d.nd = 0 | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Compute upper and lower such that any decimal number | ||||
| 	// between upper and lower (possibly inclusive) | ||||
| 	// will round to the original floating point number. | ||||
|  | ||||
| 	// We may see at once that the number is already shortest. | ||||
| 	// | ||||
| 	// Suppose d is not denormal, so that 2^exp <= d < 10^dp. | ||||
| 	// The closest shorter number is at least 10^(dp-nd) away. | ||||
| 	// The lower/upper bounds computed below are at distance | ||||
| 	// at most 2^(exp-mantbits). | ||||
| 	// | ||||
| 	// So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), | ||||
| 	// or equivalently log2(10)*(dp-nd) > exp-mantbits. | ||||
| 	// It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). | ||||
| 	minexp := flt.bias + 1 // minimum possible exponent | ||||
| 	if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { | ||||
| 		// The number is already shortest. | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// d = mant << (exp - mantbits) | ||||
| 	// Next highest floating point number is mant+1 << exp-mantbits. | ||||
| 	// Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. | ||||
| 	upper := new(decimal) | ||||
| 	upper.Assign(mant*2 + 1) | ||||
| 	upper.Shift(exp - int(flt.mantbits) - 1) | ||||
|  | ||||
| 	// d = mant << (exp - mantbits) | ||||
| 	// Next lowest floating point number is mant-1 << exp-mantbits, | ||||
| 	// unless mant-1 drops the significant bit and exp is not the minimum exp, | ||||
| 	// in which case the next lowest is mant*2-1 << exp-mantbits-1. | ||||
| 	// Either way, call it mantlo << explo-mantbits. | ||||
| 	// Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. | ||||
| 	var mantlo uint64 | ||||
| 	var explo int | ||||
| 	if mant > 1<<flt.mantbits || exp == minexp { | ||||
| 		mantlo = mant - 1 | ||||
| 		explo = exp | ||||
| 	} else { | ||||
| 		mantlo = mant*2 - 1 | ||||
| 		explo = exp - 1 | ||||
| 	} | ||||
| 	lower := new(decimal) | ||||
| 	lower.Assign(mantlo*2 + 1) | ||||
| 	lower.Shift(explo - int(flt.mantbits) - 1) | ||||
|  | ||||
| 	// The upper and lower bounds are possible outputs only if | ||||
| 	// the original mantissa is even, so that IEEE round-to-even | ||||
| 	// would round to the original mantissa and not the neighbors. | ||||
| 	inclusive := mant%2 == 0 | ||||
|  | ||||
| 	// Now we can figure out the minimum number of digits required. | ||||
| 	// Walk along until d has distinguished itself from upper and lower. | ||||
| 	for i := 0; i < d.nd; i++ { | ||||
| 		var l, m, u byte // lower, middle, upper digits | ||||
| 		if i < lower.nd { | ||||
| 			l = lower.d[i] | ||||
| 		} else { | ||||
| 			l = '0' | ||||
| 		} | ||||
| 		m = d.d[i] | ||||
| 		if i < upper.nd { | ||||
| 			u = upper.d[i] | ||||
| 		} else { | ||||
| 			u = '0' | ||||
| 		} | ||||
|  | ||||
| 		// Okay to round down (truncate) if lower has a different digit | ||||
| 		// or if lower is inclusive and is exactly the result of rounding down. | ||||
| 		okdown := l != m || (inclusive && l == m && i+1 == lower.nd) | ||||
|  | ||||
| 		// Okay to round up if upper has a different digit and | ||||
| 		// either upper is inclusive or upper is bigger than the result of rounding up. | ||||
| 		okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd) | ||||
|  | ||||
| 		// If it's okay to do either, then round to the nearest one. | ||||
| 		// If it's okay to do only one, do it. | ||||
| 		switch { | ||||
| 		case okdown && okup: | ||||
| 			d.Round(i + 1) | ||||
| 			return | ||||
| 		case okdown: | ||||
| 			d.RoundDown(i + 1) | ||||
| 			return | ||||
| 		case okup: | ||||
| 			d.RoundUp(i + 1) | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type decimalSlice struct { | ||||
| 	d      []byte | ||||
| 	nd, dp int | ||||
| 	neg    bool | ||||
| } | ||||
|  | ||||
| // %e: -d.ddddde±dd | ||||
| func fmtE(dst EncodingBuffer, neg bool, d decimalSlice, prec int, fmt byte) { | ||||
| 	// sign | ||||
| 	if neg { | ||||
| 		dst.WriteByte('-') | ||||
| 	} | ||||
|  | ||||
| 	// first digit | ||||
| 	ch := byte('0') | ||||
| 	if d.nd != 0 { | ||||
| 		ch = d.d[0] | ||||
| 	} | ||||
| 	dst.WriteByte(ch) | ||||
|  | ||||
| 	// .moredigits | ||||
| 	if prec > 0 { | ||||
| 		dst.WriteByte('.') | ||||
| 		i := 1 | ||||
| 		m := min(d.nd, prec+1) | ||||
| 		if i < m { | ||||
| 			dst.Write(d.d[i:m]) | ||||
| 			i = m | ||||
| 		} | ||||
| 		for i <= prec { | ||||
| 			dst.WriteByte('0') | ||||
| 			i++ | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// e± | ||||
| 	dst.WriteByte(fmt) | ||||
| 	exp := d.dp - 1 | ||||
| 	if d.nd == 0 { // special case: 0 has exponent 0 | ||||
| 		exp = 0 | ||||
| 	} | ||||
| 	if exp < 0 { | ||||
| 		ch = '-' | ||||
| 		exp = -exp | ||||
| 	} else { | ||||
| 		ch = '+' | ||||
| 	} | ||||
| 	dst.WriteByte(ch) | ||||
|  | ||||
| 	// dd or ddd | ||||
| 	switch { | ||||
| 	case exp < 10: | ||||
| 		dst.WriteByte('0') | ||||
| 		dst.WriteByte(byte(exp) + '0') | ||||
| 	case exp < 100: | ||||
| 		dst.WriteByte(byte(exp/10) + '0') | ||||
| 		dst.WriteByte(byte(exp%10) + '0') | ||||
| 	default: | ||||
| 		dst.WriteByte(byte(exp/100) + '0') | ||||
| 		dst.WriteByte(byte(exp/10)%10 + '0') | ||||
| 		dst.WriteByte(byte(exp%10) + '0') | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // %f: -ddddddd.ddddd | ||||
| func fmtF(dst EncodingBuffer, neg bool, d decimalSlice, prec int) { | ||||
| 	// sign | ||||
| 	if neg { | ||||
| 		dst.WriteByte('-') | ||||
| 	} | ||||
|  | ||||
| 	// integer, padded with zeros as needed. | ||||
| 	if d.dp > 0 { | ||||
| 		m := min(d.nd, d.dp) | ||||
| 		dst.Write(d.d[:m]) | ||||
| 		for ; m < d.dp; m++ { | ||||
| 			dst.WriteByte('0') | ||||
| 		} | ||||
| 	} else { | ||||
| 		dst.WriteByte('0') | ||||
| 	} | ||||
|  | ||||
| 	// fraction | ||||
| 	if prec > 0 { | ||||
| 		dst.WriteByte('.') | ||||
| 		for i := 0; i < prec; i++ { | ||||
| 			ch := byte('0') | ||||
| 			if j := d.dp + i; 0 <= j && j < d.nd { | ||||
| 				ch = d.d[j] | ||||
| 			} | ||||
| 			dst.WriteByte(ch) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // %b: -ddddddddp±ddd | ||||
| func fmtB(dst EncodingBuffer, neg bool, mant uint64, exp int, flt *floatInfo) { | ||||
| 	// sign | ||||
| 	if neg { | ||||
| 		dst.WriteByte('-') | ||||
| 	} | ||||
|  | ||||
| 	// mantissa | ||||
| 	formatBits(dst, mant, 10, false) | ||||
|  | ||||
| 	// p | ||||
| 	dst.WriteByte('p') | ||||
|  | ||||
| 	// ±exponent | ||||
| 	exp -= int(flt.mantbits) | ||||
| 	if exp >= 0 { | ||||
| 		dst.WriteByte('+') | ||||
| 	} | ||||
| 	formatBits(dst, uint64(exp), 10, exp < 0) | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func min(a, b int) int { | ||||
| 	if a < b { | ||||
| 		return a | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
|  | ||||
| func max(a, b int) int { | ||||
| 	if a > b { | ||||
| 		return a | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
|  | ||||
| // formatBits computes the string representation of u in the given base. | ||||
| // If neg is set, u is treated as negative int64 value. | ||||
| func formatBits(dst EncodingBuffer, u uint64, base int, neg bool) { | ||||
| 	if base < 2 || base > len(digits) { | ||||
| 		panic("strconv: illegal AppendInt/FormatInt base") | ||||
| 	} | ||||
| 	// 2 <= base && base <= len(digits) | ||||
|  | ||||
| 	var a [64 + 1]byte // +1 for sign of 64bit value in base 2 | ||||
| 	i := len(a) | ||||
|  | ||||
| 	if neg { | ||||
| 		u = -u | ||||
| 	} | ||||
|  | ||||
| 	// convert bits | ||||
| 	if base == 10 { | ||||
| 		// common case: use constants for / because | ||||
| 		// the compiler can optimize it into a multiply+shift | ||||
|  | ||||
| 		if ^uintptr(0)>>32 == 0 { | ||||
| 			for u > uint64(^uintptr(0)) { | ||||
| 				q := u / 1e9 | ||||
| 				us := uintptr(u - q*1e9) // us % 1e9 fits into a uintptr | ||||
| 				for j := 9; j > 0; j-- { | ||||
| 					i-- | ||||
| 					qs := us / 10 | ||||
| 					a[i] = byte(us - qs*10 + '0') | ||||
| 					us = qs | ||||
| 				} | ||||
| 				u = q | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// u guaranteed to fit into a uintptr | ||||
| 		us := uintptr(u) | ||||
| 		for us >= 10 { | ||||
| 			i-- | ||||
| 			q := us / 10 | ||||
| 			a[i] = byte(us - q*10 + '0') | ||||
| 			us = q | ||||
| 		} | ||||
| 		// u < 10 | ||||
| 		i-- | ||||
| 		a[i] = byte(us + '0') | ||||
|  | ||||
| 	} else if s := shifts[base]; s > 0 { | ||||
| 		// base is power of 2: use shifts and masks instead of / and % | ||||
| 		b := uint64(base) | ||||
| 		m := uintptr(b) - 1 // == 1<<s - 1 | ||||
| 		for u >= b { | ||||
| 			i-- | ||||
| 			a[i] = digits[uintptr(u)&m] | ||||
| 			u >>= s | ||||
| 		} | ||||
| 		// u < base | ||||
| 		i-- | ||||
| 		a[i] = digits[uintptr(u)] | ||||
|  | ||||
| 	} else { | ||||
| 		// general case | ||||
| 		b := uint64(base) | ||||
| 		for u >= b { | ||||
| 			i-- | ||||
| 			q := u / b | ||||
| 			a[i] = digits[uintptr(u-q*b)] | ||||
| 			u = q | ||||
| 		} | ||||
| 		// u < base | ||||
| 		i-- | ||||
| 		a[i] = digits[uintptr(u)] | ||||
| 	} | ||||
|  | ||||
| 	// add sign, if any | ||||
| 	if neg { | ||||
| 		i-- | ||||
| 		a[i] = '-' | ||||
| 	} | ||||
|  | ||||
| 	dst.Write(a[i:]) | ||||
| } | ||||
							
								
								
									
										936
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										936
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,936 @@ | ||||
| /** | ||||
|  *  Copyright 2014 Paul Querna | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| /* Portions of this file are on Go stdlib's strconv/atof.go */ | ||||
|  | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package internal | ||||
|  | ||||
| // decimal to binary floating point conversion. | ||||
| // Algorithm: | ||||
| //   1) Store input in multiprecision decimal. | ||||
| //   2) Multiply/divide decimal by powers of two until in range [0.5, 1) | ||||
| //   3) Multiply by 2^precision and round to get mantissa. | ||||
|  | ||||
| import "math" | ||||
|  | ||||
| var optimize = true // can change for testing | ||||
|  | ||||
| func equalIgnoreCase(s1 []byte, s2 []byte) bool { | ||||
| 	if len(s1) != len(s2) { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i := 0; i < len(s1); i++ { | ||||
| 		c1 := s1[i] | ||||
| 		if 'A' <= c1 && c1 <= 'Z' { | ||||
| 			c1 += 'a' - 'A' | ||||
| 		} | ||||
| 		c2 := s2[i] | ||||
| 		if 'A' <= c2 && c2 <= 'Z' { | ||||
| 			c2 += 'a' - 'A' | ||||
| 		} | ||||
| 		if c1 != c2 { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func special(s []byte) (f float64, ok bool) { | ||||
| 	if len(s) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	switch s[0] { | ||||
| 	default: | ||||
| 		return | ||||
| 	case '+': | ||||
| 		if equalIgnoreCase(s, []byte("+inf")) || equalIgnoreCase(s, []byte("+infinity")) { | ||||
| 			return math.Inf(1), true | ||||
| 		} | ||||
| 	case '-': | ||||
| 		if equalIgnoreCase(s, []byte("-inf")) || equalIgnoreCase(s, []byte("-infinity")) { | ||||
| 			return math.Inf(-1), true | ||||
| 		} | ||||
| 	case 'n', 'N': | ||||
| 		if equalIgnoreCase(s, []byte("nan")) { | ||||
| 			return math.NaN(), true | ||||
| 		} | ||||
| 	case 'i', 'I': | ||||
| 		if equalIgnoreCase(s, []byte("inf")) || equalIgnoreCase(s, []byte("infinity")) { | ||||
| 			return math.Inf(1), true | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func (b *decimal) set(s []byte) (ok bool) { | ||||
| 	i := 0 | ||||
| 	b.neg = false | ||||
| 	b.trunc = false | ||||
|  | ||||
| 	// optional sign | ||||
| 	if i >= len(s) { | ||||
| 		return | ||||
| 	} | ||||
| 	switch { | ||||
| 	case s[i] == '+': | ||||
| 		i++ | ||||
| 	case s[i] == '-': | ||||
| 		b.neg = true | ||||
| 		i++ | ||||
| 	} | ||||
|  | ||||
| 	// digits | ||||
| 	sawdot := false | ||||
| 	sawdigits := false | ||||
| 	for ; i < len(s); i++ { | ||||
| 		switch { | ||||
| 		case s[i] == '.': | ||||
| 			if sawdot { | ||||
| 				return | ||||
| 			} | ||||
| 			sawdot = true | ||||
| 			b.dp = b.nd | ||||
| 			continue | ||||
|  | ||||
| 		case '0' <= s[i] && s[i] <= '9': | ||||
| 			sawdigits = true | ||||
| 			if s[i] == '0' && b.nd == 0 { // ignore leading zeros | ||||
| 				b.dp-- | ||||
| 				continue | ||||
| 			} | ||||
| 			if b.nd < len(b.d) { | ||||
| 				b.d[b.nd] = s[i] | ||||
| 				b.nd++ | ||||
| 			} else if s[i] != '0' { | ||||
| 				b.trunc = true | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 		break | ||||
| 	} | ||||
| 	if !sawdigits { | ||||
| 		return | ||||
| 	} | ||||
| 	if !sawdot { | ||||
| 		b.dp = b.nd | ||||
| 	} | ||||
|  | ||||
| 	// optional exponent moves decimal point. | ||||
| 	// if we read a very large, very long number, | ||||
| 	// just be sure to move the decimal point by | ||||
| 	// a lot (say, 100000).  it doesn't matter if it's | ||||
| 	// not the exact number. | ||||
| 	if i < len(s) && (s[i] == 'e' || s[i] == 'E') { | ||||
| 		i++ | ||||
| 		if i >= len(s) { | ||||
| 			return | ||||
| 		} | ||||
| 		esign := 1 | ||||
| 		if s[i] == '+' { | ||||
| 			i++ | ||||
| 		} else if s[i] == '-' { | ||||
| 			i++ | ||||
| 			esign = -1 | ||||
| 		} | ||||
| 		if i >= len(s) || s[i] < '0' || s[i] > '9' { | ||||
| 			return | ||||
| 		} | ||||
| 		e := 0 | ||||
| 		for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { | ||||
| 			if e < 10000 { | ||||
| 				e = e*10 + int(s[i]) - '0' | ||||
| 			} | ||||
| 		} | ||||
| 		b.dp += e * esign | ||||
| 	} | ||||
|  | ||||
| 	if i != len(s) { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ok = true | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // readFloat reads a decimal mantissa and exponent from a float | ||||
| // string representation. It sets ok to false if the number could | ||||
| // not fit return types or is invalid. | ||||
| func readFloat(s []byte) (mantissa uint64, exp int, neg, trunc, ok bool) { | ||||
| 	const uint64digits = 19 | ||||
| 	i := 0 | ||||
|  | ||||
| 	// optional sign | ||||
| 	if i >= len(s) { | ||||
| 		return | ||||
| 	} | ||||
| 	switch { | ||||
| 	case s[i] == '+': | ||||
| 		i++ | ||||
| 	case s[i] == '-': | ||||
| 		neg = true | ||||
| 		i++ | ||||
| 	} | ||||
|  | ||||
| 	// digits | ||||
| 	sawdot := false | ||||
| 	sawdigits := false | ||||
| 	nd := 0 | ||||
| 	ndMant := 0 | ||||
| 	dp := 0 | ||||
| 	for ; i < len(s); i++ { | ||||
| 		switch c := s[i]; true { | ||||
| 		case c == '.': | ||||
| 			if sawdot { | ||||
| 				return | ||||
| 			} | ||||
| 			sawdot = true | ||||
| 			dp = nd | ||||
| 			continue | ||||
|  | ||||
| 		case '0' <= c && c <= '9': | ||||
| 			sawdigits = true | ||||
| 			if c == '0' && nd == 0 { // ignore leading zeros | ||||
| 				dp-- | ||||
| 				continue | ||||
| 			} | ||||
| 			nd++ | ||||
| 			if ndMant < uint64digits { | ||||
| 				mantissa *= 10 | ||||
| 				mantissa += uint64(c - '0') | ||||
| 				ndMant++ | ||||
| 			} else if s[i] != '0' { | ||||
| 				trunc = true | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 		break | ||||
| 	} | ||||
| 	if !sawdigits { | ||||
| 		return | ||||
| 	} | ||||
| 	if !sawdot { | ||||
| 		dp = nd | ||||
| 	} | ||||
|  | ||||
| 	// optional exponent moves decimal point. | ||||
| 	// if we read a very large, very long number, | ||||
| 	// just be sure to move the decimal point by | ||||
| 	// a lot (say, 100000).  it doesn't matter if it's | ||||
| 	// not the exact number. | ||||
| 	if i < len(s) && (s[i] == 'e' || s[i] == 'E') { | ||||
| 		i++ | ||||
| 		if i >= len(s) { | ||||
| 			return | ||||
| 		} | ||||
| 		esign := 1 | ||||
| 		if s[i] == '+' { | ||||
| 			i++ | ||||
| 		} else if s[i] == '-' { | ||||
| 			i++ | ||||
| 			esign = -1 | ||||
| 		} | ||||
| 		if i >= len(s) || s[i] < '0' || s[i] > '9' { | ||||
| 			return | ||||
| 		} | ||||
| 		e := 0 | ||||
| 		for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { | ||||
| 			if e < 10000 { | ||||
| 				e = e*10 + int(s[i]) - '0' | ||||
| 			} | ||||
| 		} | ||||
| 		dp += e * esign | ||||
| 	} | ||||
|  | ||||
| 	if i != len(s) { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	exp = dp - ndMant | ||||
| 	ok = true | ||||
| 	return | ||||
|  | ||||
| } | ||||
|  | ||||
| // decimal power of ten to binary power of two. | ||||
| var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26} | ||||
|  | ||||
| func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) { | ||||
| 	var exp int | ||||
| 	var mant uint64 | ||||
|  | ||||
| 	// Zero is always a special case. | ||||
| 	if d.nd == 0 { | ||||
| 		mant = 0 | ||||
| 		exp = flt.bias | ||||
| 		goto out | ||||
| 	} | ||||
|  | ||||
| 	// Obvious overflow/underflow. | ||||
| 	// These bounds are for 64-bit floats. | ||||
| 	// Will have to change if we want to support 80-bit floats in the future. | ||||
| 	if d.dp > 310 { | ||||
| 		goto overflow | ||||
| 	} | ||||
| 	if d.dp < -330 { | ||||
| 		// zero | ||||
| 		mant = 0 | ||||
| 		exp = flt.bias | ||||
| 		goto out | ||||
| 	} | ||||
|  | ||||
| 	// Scale by powers of two until in range [0.5, 1.0) | ||||
| 	exp = 0 | ||||
| 	for d.dp > 0 { | ||||
| 		var n int | ||||
| 		if d.dp >= len(powtab) { | ||||
| 			n = 27 | ||||
| 		} else { | ||||
| 			n = powtab[d.dp] | ||||
| 		} | ||||
| 		d.Shift(-n) | ||||
| 		exp += n | ||||
| 	} | ||||
| 	for d.dp < 0 || d.dp == 0 && d.d[0] < '5' { | ||||
| 		var n int | ||||
| 		if -d.dp >= len(powtab) { | ||||
| 			n = 27 | ||||
| 		} else { | ||||
| 			n = powtab[-d.dp] | ||||
| 		} | ||||
| 		d.Shift(n) | ||||
| 		exp -= n | ||||
| 	} | ||||
|  | ||||
| 	// Our range is [0.5,1) but floating point range is [1,2). | ||||
| 	exp-- | ||||
|  | ||||
| 	// Minimum representable exponent is flt.bias+1. | ||||
| 	// If the exponent is smaller, move it up and | ||||
| 	// adjust d accordingly. | ||||
| 	if exp < flt.bias+1 { | ||||
| 		n := flt.bias + 1 - exp | ||||
| 		d.Shift(-n) | ||||
| 		exp += n | ||||
| 	} | ||||
|  | ||||
| 	if exp-flt.bias >= 1<<flt.expbits-1 { | ||||
| 		goto overflow | ||||
| 	} | ||||
|  | ||||
| 	// Extract 1+flt.mantbits bits. | ||||
| 	d.Shift(int(1 + flt.mantbits)) | ||||
| 	mant = d.RoundedInteger() | ||||
|  | ||||
| 	// Rounding might have added a bit; shift down. | ||||
| 	if mant == 2<<flt.mantbits { | ||||
| 		mant >>= 1 | ||||
| 		exp++ | ||||
| 		if exp-flt.bias >= 1<<flt.expbits-1 { | ||||
| 			goto overflow | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Denormalized? | ||||
| 	if mant&(1<<flt.mantbits) == 0 { | ||||
| 		exp = flt.bias | ||||
| 	} | ||||
| 	goto out | ||||
|  | ||||
| overflow: | ||||
| 	// ±Inf | ||||
| 	mant = 0 | ||||
| 	exp = 1<<flt.expbits - 1 + flt.bias | ||||
| 	overflow = true | ||||
|  | ||||
| out: | ||||
| 	// Assemble bits. | ||||
| 	bits := mant & (uint64(1)<<flt.mantbits - 1) | ||||
| 	bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits | ||||
| 	if d.neg { | ||||
| 		bits |= 1 << flt.mantbits << flt.expbits | ||||
| 	} | ||||
| 	return bits, overflow | ||||
| } | ||||
|  | ||||
| // Exact powers of 10. | ||||
| var float64pow10 = []float64{ | ||||
| 	1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, | ||||
| 	1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, | ||||
| 	1e20, 1e21, 1e22, | ||||
| } | ||||
| var float32pow10 = []float32{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10} | ||||
|  | ||||
| // If possible to convert decimal representation to 64-bit float f exactly, | ||||
| // entirely in floating-point math, do so, avoiding the expense of decimalToFloatBits. | ||||
| // Three common cases: | ||||
| //	value is exact integer | ||||
| //	value is exact integer * exact power of ten | ||||
| //	value is exact integer / exact power of ten | ||||
| // These all produce potentially inexact but correctly rounded answers. | ||||
| func atof64exact(mantissa uint64, exp int, neg bool) (f float64, ok bool) { | ||||
| 	if mantissa>>float64info.mantbits != 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	f = float64(mantissa) | ||||
| 	if neg { | ||||
| 		f = -f | ||||
| 	} | ||||
| 	switch { | ||||
| 	case exp == 0: | ||||
| 		// an integer. | ||||
| 		return f, true | ||||
| 	// Exact integers are <= 10^15. | ||||
| 	// Exact powers of ten are <= 10^22. | ||||
| 	case exp > 0 && exp <= 15+22: // int * 10^k | ||||
| 		// If exponent is big but number of digits is not, | ||||
| 		// can move a few zeros into the integer part. | ||||
| 		if exp > 22 { | ||||
| 			f *= float64pow10[exp-22] | ||||
| 			exp = 22 | ||||
| 		} | ||||
| 		if f > 1e15 || f < -1e15 { | ||||
| 			// the exponent was really too large. | ||||
| 			return | ||||
| 		} | ||||
| 		return f * float64pow10[exp], true | ||||
| 	case exp < 0 && exp >= -22: // int / 10^k | ||||
| 		return f / float64pow10[-exp], true | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // If possible to compute mantissa*10^exp to 32-bit float f exactly, | ||||
| // entirely in floating-point math, do so, avoiding the machinery above. | ||||
| func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) { | ||||
| 	if mantissa>>float32info.mantbits != 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	f = float32(mantissa) | ||||
| 	if neg { | ||||
| 		f = -f | ||||
| 	} | ||||
| 	switch { | ||||
| 	case exp == 0: | ||||
| 		return f, true | ||||
| 	// Exact integers are <= 10^7. | ||||
| 	// Exact powers of ten are <= 10^10. | ||||
| 	case exp > 0 && exp <= 7+10: // int * 10^k | ||||
| 		// If exponent is big but number of digits is not, | ||||
| 		// can move a few zeros into the integer part. | ||||
| 		if exp > 10 { | ||||
| 			f *= float32pow10[exp-10] | ||||
| 			exp = 10 | ||||
| 		} | ||||
| 		if f > 1e7 || f < -1e7 { | ||||
| 			// the exponent was really too large. | ||||
| 			return | ||||
| 		} | ||||
| 		return f * float32pow10[exp], true | ||||
| 	case exp < 0 && exp >= -10: // int / 10^k | ||||
| 		return f / float32pow10[-exp], true | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| const fnParseFloat = "ParseFloat" | ||||
|  | ||||
| func atof32(s []byte) (f float32, err error) { | ||||
| 	if val, ok := special(s); ok { | ||||
| 		return float32(val), nil | ||||
| 	} | ||||
|  | ||||
| 	if optimize { | ||||
| 		// Parse mantissa and exponent. | ||||
| 		mantissa, exp, neg, trunc, ok := readFloat(s) | ||||
| 		if ok { | ||||
| 			// Try pure floating-point arithmetic conversion. | ||||
| 			if !trunc { | ||||
| 				if f, ok := atof32exact(mantissa, exp, neg); ok { | ||||
| 					return f, nil | ||||
| 				} | ||||
| 			} | ||||
| 			// Try another fast path. | ||||
| 			ext := new(extFloat) | ||||
| 			if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float32info); ok { | ||||
| 				b, ovf := ext.floatBits(&float32info) | ||||
| 				f = math.Float32frombits(uint32(b)) | ||||
| 				if ovf { | ||||
| 					err = rangeError(fnParseFloat, string(s)) | ||||
| 				} | ||||
| 				return f, err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	var d decimal | ||||
| 	if !d.set(s) { | ||||
| 		return 0, syntaxError(fnParseFloat, string(s)) | ||||
| 	} | ||||
| 	b, ovf := d.floatBits(&float32info) | ||||
| 	f = math.Float32frombits(uint32(b)) | ||||
| 	if ovf { | ||||
| 		err = rangeError(fnParseFloat, string(s)) | ||||
| 	} | ||||
| 	return f, err | ||||
| } | ||||
|  | ||||
| func atof64(s []byte) (f float64, err error) { | ||||
| 	if val, ok := special(s); ok { | ||||
| 		return val, nil | ||||
| 	} | ||||
|  | ||||
| 	if optimize { | ||||
| 		// Parse mantissa and exponent. | ||||
| 		mantissa, exp, neg, trunc, ok := readFloat(s) | ||||
| 		if ok { | ||||
| 			// Try pure floating-point arithmetic conversion. | ||||
| 			if !trunc { | ||||
| 				if f, ok := atof64exact(mantissa, exp, neg); ok { | ||||
| 					return f, nil | ||||
| 				} | ||||
| 			} | ||||
| 			// Try another fast path. | ||||
| 			ext := new(extFloat) | ||||
| 			if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float64info); ok { | ||||
| 				b, ovf := ext.floatBits(&float64info) | ||||
| 				f = math.Float64frombits(b) | ||||
| 				if ovf { | ||||
| 					err = rangeError(fnParseFloat, string(s)) | ||||
| 				} | ||||
| 				return f, err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	var d decimal | ||||
| 	if !d.set(s) { | ||||
| 		return 0, syntaxError(fnParseFloat, string(s)) | ||||
| 	} | ||||
| 	b, ovf := d.floatBits(&float64info) | ||||
| 	f = math.Float64frombits(b) | ||||
| 	if ovf { | ||||
| 		err = rangeError(fnParseFloat, string(s)) | ||||
| 	} | ||||
| 	return f, err | ||||
| } | ||||
|  | ||||
| // ParseFloat converts the string s to a floating-point number | ||||
| // with the precision specified by bitSize: 32 for float32, or 64 for float64. | ||||
| // When bitSize=32, the result still has type float64, but it will be | ||||
| // convertible to float32 without changing its value. | ||||
| // | ||||
| // If s is well-formed and near a valid floating point number, | ||||
| // ParseFloat returns the nearest floating point number rounded | ||||
| // using IEEE754 unbiased rounding. | ||||
| // | ||||
| // The errors that ParseFloat returns have concrete type *NumError | ||||
| // and include err.Num = s. | ||||
| // | ||||
| // If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax. | ||||
| // | ||||
| // If s is syntactically well-formed but is more than 1/2 ULP | ||||
| // away from the largest floating point number of the given size, | ||||
| // ParseFloat returns f = ±Inf, err.Err = ErrRange. | ||||
| func ParseFloat(s []byte, bitSize int) (f float64, err error) { | ||||
| 	if bitSize == 32 { | ||||
| 		f1, err1 := atof32(s) | ||||
| 		return float64(f1), err1 | ||||
| 	} | ||||
| 	f1, err1 := atof64(s) | ||||
| 	return f1, err1 | ||||
| } | ||||
|  | ||||
| // oroginal: strconv/decimal.go, but not exported, and needed for PareFloat. | ||||
|  | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Multiprecision decimal numbers. | ||||
| // For floating-point formatting only; not general purpose. | ||||
| // Only operations are assign and (binary) left/right shift. | ||||
| // Can do binary floating point in multiprecision decimal precisely | ||||
| // because 2 divides 10; cannot do decimal floating point | ||||
| // in multiprecision binary precisely. | ||||
|  | ||||
| type decimal struct { | ||||
| 	d     [800]byte // digits | ||||
| 	nd    int       // number of digits used | ||||
| 	dp    int       // decimal point | ||||
| 	neg   bool | ||||
| 	trunc bool // discarded nonzero digits beyond d[:nd] | ||||
| } | ||||
|  | ||||
| func (a *decimal) String() string { | ||||
| 	n := 10 + a.nd | ||||
| 	if a.dp > 0 { | ||||
| 		n += a.dp | ||||
| 	} | ||||
| 	if a.dp < 0 { | ||||
| 		n += -a.dp | ||||
| 	} | ||||
|  | ||||
| 	buf := make([]byte, n) | ||||
| 	w := 0 | ||||
| 	switch { | ||||
| 	case a.nd == 0: | ||||
| 		return "0" | ||||
|  | ||||
| 	case a.dp <= 0: | ||||
| 		// zeros fill space between decimal point and digits | ||||
| 		buf[w] = '0' | ||||
| 		w++ | ||||
| 		buf[w] = '.' | ||||
| 		w++ | ||||
| 		w += digitZero(buf[w : w+-a.dp]) | ||||
| 		w += copy(buf[w:], a.d[0:a.nd]) | ||||
|  | ||||
| 	case a.dp < a.nd: | ||||
| 		// decimal point in middle of digits | ||||
| 		w += copy(buf[w:], a.d[0:a.dp]) | ||||
| 		buf[w] = '.' | ||||
| 		w++ | ||||
| 		w += copy(buf[w:], a.d[a.dp:a.nd]) | ||||
|  | ||||
| 	default: | ||||
| 		// zeros fill space between digits and decimal point | ||||
| 		w += copy(buf[w:], a.d[0:a.nd]) | ||||
| 		w += digitZero(buf[w : w+a.dp-a.nd]) | ||||
| 	} | ||||
| 	return string(buf[0:w]) | ||||
| } | ||||
|  | ||||
| func digitZero(dst []byte) int { | ||||
| 	for i := range dst { | ||||
| 		dst[i] = '0' | ||||
| 	} | ||||
| 	return len(dst) | ||||
| } | ||||
|  | ||||
| // trim trailing zeros from number. | ||||
| // (They are meaningless; the decimal point is tracked | ||||
| // independent of the number of digits.) | ||||
| func trim(a *decimal) { | ||||
| 	for a.nd > 0 && a.d[a.nd-1] == '0' { | ||||
| 		a.nd-- | ||||
| 	} | ||||
| 	if a.nd == 0 { | ||||
| 		a.dp = 0 | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Assign v to a. | ||||
| func (a *decimal) Assign(v uint64) { | ||||
| 	var buf [24]byte | ||||
|  | ||||
| 	// Write reversed decimal in buf. | ||||
| 	n := 0 | ||||
| 	for v > 0 { | ||||
| 		v1 := v / 10 | ||||
| 		v -= 10 * v1 | ||||
| 		buf[n] = byte(v + '0') | ||||
| 		n++ | ||||
| 		v = v1 | ||||
| 	} | ||||
|  | ||||
| 	// Reverse again to produce forward decimal in a.d. | ||||
| 	a.nd = 0 | ||||
| 	for n--; n >= 0; n-- { | ||||
| 		a.d[a.nd] = buf[n] | ||||
| 		a.nd++ | ||||
| 	} | ||||
| 	a.dp = a.nd | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Maximum shift that we can do in one pass without overflow. | ||||
| // Signed int has 31 bits, and we have to be able to accommodate 9<<k. | ||||
| const maxShift = 27 | ||||
|  | ||||
| // Binary shift right (* 2) by k bits.  k <= maxShift to avoid overflow. | ||||
| func rightShift(a *decimal, k uint) { | ||||
| 	r := 0 // read pointer | ||||
| 	w := 0 // write pointer | ||||
|  | ||||
| 	// Pick up enough leading digits to cover first shift. | ||||
| 	n := 0 | ||||
| 	for ; n>>k == 0; r++ { | ||||
| 		if r >= a.nd { | ||||
| 			if n == 0 { | ||||
| 				// a == 0; shouldn't get here, but handle anyway. | ||||
| 				a.nd = 0 | ||||
| 				return | ||||
| 			} | ||||
| 			for n>>k == 0 { | ||||
| 				n = n * 10 | ||||
| 				r++ | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 		c := int(a.d[r]) | ||||
| 		n = n*10 + c - '0' | ||||
| 	} | ||||
| 	a.dp -= r - 1 | ||||
|  | ||||
| 	// Pick up a digit, put down a digit. | ||||
| 	for ; r < a.nd; r++ { | ||||
| 		c := int(a.d[r]) | ||||
| 		dig := n >> k | ||||
| 		n -= dig << k | ||||
| 		a.d[w] = byte(dig + '0') | ||||
| 		w++ | ||||
| 		n = n*10 + c - '0' | ||||
| 	} | ||||
|  | ||||
| 	// Put down extra digits. | ||||
| 	for n > 0 { | ||||
| 		dig := n >> k | ||||
| 		n -= dig << k | ||||
| 		if w < len(a.d) { | ||||
| 			a.d[w] = byte(dig + '0') | ||||
| 			w++ | ||||
| 		} else if dig > 0 { | ||||
| 			a.trunc = true | ||||
| 		} | ||||
| 		n = n * 10 | ||||
| 	} | ||||
|  | ||||
| 	a.nd = w | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Cheat sheet for left shift: table indexed by shift count giving | ||||
| // number of new digits that will be introduced by that shift. | ||||
| // | ||||
| // For example, leftcheats[4] = {2, "625"}.  That means that | ||||
| // if we are shifting by 4 (multiplying by 16), it will add 2 digits | ||||
| // when the string prefix is "625" through "999", and one fewer digit | ||||
| // if the string prefix is "000" through "624". | ||||
| // | ||||
| // Credit for this trick goes to Ken. | ||||
|  | ||||
| type leftCheat struct { | ||||
| 	delta  int    // number of new digits | ||||
| 	cutoff string //   minus one digit if original < a. | ||||
| } | ||||
|  | ||||
| var leftcheats = []leftCheat{ | ||||
| 	// Leading digits of 1/2^i = 5^i. | ||||
| 	// 5^23 is not an exact 64-bit floating point number, | ||||
| 	// so have to use bc for the math. | ||||
| 	/* | ||||
| 		seq 27 | sed 's/^/5^/' | bc | | ||||
| 		awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } | ||||
| 		{ | ||||
| 			log2 = log(2)/log(10) | ||||
| 			printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", | ||||
| 				int(log2*NR+1), $0, 2**NR) | ||||
| 		}' | ||||
| 	*/ | ||||
| 	{0, ""}, | ||||
| 	{1, "5"},                   // * 2 | ||||
| 	{1, "25"},                  // * 4 | ||||
| 	{1, "125"},                 // * 8 | ||||
| 	{2, "625"},                 // * 16 | ||||
| 	{2, "3125"},                // * 32 | ||||
| 	{2, "15625"},               // * 64 | ||||
| 	{3, "78125"},               // * 128 | ||||
| 	{3, "390625"},              // * 256 | ||||
| 	{3, "1953125"},             // * 512 | ||||
| 	{4, "9765625"},             // * 1024 | ||||
| 	{4, "48828125"},            // * 2048 | ||||
| 	{4, "244140625"},           // * 4096 | ||||
| 	{4, "1220703125"},          // * 8192 | ||||
| 	{5, "6103515625"},          // * 16384 | ||||
| 	{5, "30517578125"},         // * 32768 | ||||
| 	{5, "152587890625"},        // * 65536 | ||||
| 	{6, "762939453125"},        // * 131072 | ||||
| 	{6, "3814697265625"},       // * 262144 | ||||
| 	{6, "19073486328125"},      // * 524288 | ||||
| 	{7, "95367431640625"},      // * 1048576 | ||||
| 	{7, "476837158203125"},     // * 2097152 | ||||
| 	{7, "2384185791015625"},    // * 4194304 | ||||
| 	{7, "11920928955078125"},   // * 8388608 | ||||
| 	{8, "59604644775390625"},   // * 16777216 | ||||
| 	{8, "298023223876953125"},  // * 33554432 | ||||
| 	{8, "1490116119384765625"}, // * 67108864 | ||||
| 	{9, "7450580596923828125"}, // * 134217728 | ||||
| } | ||||
|  | ||||
| // Is the leading prefix of b lexicographically less than s? | ||||
| func prefixIsLessThan(b []byte, s string) bool { | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		if i >= len(b) { | ||||
| 			return true | ||||
| 		} | ||||
| 		if b[i] != s[i] { | ||||
| 			return b[i] < s[i] | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // Binary shift left (/ 2) by k bits.  k <= maxShift to avoid overflow. | ||||
| func leftShift(a *decimal, k uint) { | ||||
| 	delta := leftcheats[k].delta | ||||
| 	if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { | ||||
| 		delta-- | ||||
| 	} | ||||
|  | ||||
| 	r := a.nd         // read index | ||||
| 	w := a.nd + delta // write index | ||||
| 	n := 0 | ||||
|  | ||||
| 	// Pick up a digit, put down a digit. | ||||
| 	for r--; r >= 0; r-- { | ||||
| 		n += (int(a.d[r]) - '0') << k | ||||
| 		quo := n / 10 | ||||
| 		rem := n - 10*quo | ||||
| 		w-- | ||||
| 		if w < len(a.d) { | ||||
| 			a.d[w] = byte(rem + '0') | ||||
| 		} else if rem != 0 { | ||||
| 			a.trunc = true | ||||
| 		} | ||||
| 		n = quo | ||||
| 	} | ||||
|  | ||||
| 	// Put down extra digits. | ||||
| 	for n > 0 { | ||||
| 		quo := n / 10 | ||||
| 		rem := n - 10*quo | ||||
| 		w-- | ||||
| 		if w < len(a.d) { | ||||
| 			a.d[w] = byte(rem + '0') | ||||
| 		} else if rem != 0 { | ||||
| 			a.trunc = true | ||||
| 		} | ||||
| 		n = quo | ||||
| 	} | ||||
|  | ||||
| 	a.nd += delta | ||||
| 	if a.nd >= len(a.d) { | ||||
| 		a.nd = len(a.d) | ||||
| 	} | ||||
| 	a.dp += delta | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Binary shift left (k > 0) or right (k < 0). | ||||
| func (a *decimal) Shift(k int) { | ||||
| 	switch { | ||||
| 	case a.nd == 0: | ||||
| 		// nothing to do: a == 0 | ||||
| 	case k > 0: | ||||
| 		for k > maxShift { | ||||
| 			leftShift(a, maxShift) | ||||
| 			k -= maxShift | ||||
| 		} | ||||
| 		leftShift(a, uint(k)) | ||||
| 	case k < 0: | ||||
| 		for k < -maxShift { | ||||
| 			rightShift(a, maxShift) | ||||
| 			k += maxShift | ||||
| 		} | ||||
| 		rightShift(a, uint(-k)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // If we chop a at nd digits, should we round up? | ||||
| func shouldRoundUp(a *decimal, nd int) bool { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return false | ||||
| 	} | ||||
| 	if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even | ||||
| 		// if we truncated, a little higher than what's recorded - always round up | ||||
| 		if a.trunc { | ||||
| 			return true | ||||
| 		} | ||||
| 		return nd > 0 && (a.d[nd-1]-'0')%2 != 0 | ||||
| 	} | ||||
| 	// not halfway - digit tells all | ||||
| 	return a.d[nd] >= '5' | ||||
| } | ||||
|  | ||||
| // Round a to nd digits (or fewer). | ||||
| // If nd is zero, it means we're rounding | ||||
| // just to the left of the digits, as in | ||||
| // 0.09 -> 0.1. | ||||
| func (a *decimal) Round(nd int) { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return | ||||
| 	} | ||||
| 	if shouldRoundUp(a, nd) { | ||||
| 		a.RoundUp(nd) | ||||
| 	} else { | ||||
| 		a.RoundDown(nd) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Round a down to nd digits (or fewer). | ||||
| func (a *decimal) RoundDown(nd int) { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return | ||||
| 	} | ||||
| 	a.nd = nd | ||||
| 	trim(a) | ||||
| } | ||||
|  | ||||
| // Round a up to nd digits (or fewer). | ||||
| func (a *decimal) RoundUp(nd int) { | ||||
| 	if nd < 0 || nd >= a.nd { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// round up | ||||
| 	for i := nd - 1; i >= 0; i-- { | ||||
| 		c := a.d[i] | ||||
| 		if c < '9' { // can stop after this digit | ||||
| 			a.d[i]++ | ||||
| 			a.nd = i + 1 | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Number is all 9s. | ||||
| 	// Change to single 1 with adjusted decimal point. | ||||
| 	a.d[0] = '1' | ||||
| 	a.nd = 1 | ||||
| 	a.dp++ | ||||
| } | ||||
|  | ||||
| // Extract integer part, rounded appropriately. | ||||
| // No guarantees about overflow. | ||||
| func (a *decimal) RoundedInteger() uint64 { | ||||
| 	if a.dp > 20 { | ||||
| 		return 0xFFFFFFFFFFFFFFFF | ||||
| 	} | ||||
| 	var i int | ||||
| 	n := uint64(0) | ||||
| 	for i = 0; i < a.dp && i < a.nd; i++ { | ||||
| 		n = n*10 + uint64(a.d[i]-'0') | ||||
| 	} | ||||
| 	for ; i < a.dp; i++ { | ||||
| 		n *= 10 | ||||
| 	} | ||||
| 	if shouldRoundUp(a, a.dp) { | ||||
| 		n++ | ||||
| 	} | ||||
| 	return n | ||||
| } | ||||
							
								
								
									
										213
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										213
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,213 @@ | ||||
| /** | ||||
|  *  Copyright 2014 Paul Querna | ||||
|  * | ||||
|  *  Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  *  you may not use this file except in compliance with the License. | ||||
|  *  You may obtain a copy of the License at | ||||
|  * | ||||
|  *      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  * | ||||
|  *  Unless required by applicable law or agreed to in writing, software | ||||
|  *  distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  *  See the License for the specific language governing permissions and | ||||
|  *  limitations under the License. | ||||
|  * | ||||
|  */ | ||||
|  | ||||
| /* Portions of this file are on Go stdlib's strconv/atoi.go */ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| // ErrRange indicates that a value is out of range for the target type. | ||||
| var ErrRange = errors.New("value out of range") | ||||
|  | ||||
| // ErrSyntax indicates that a value does not have the right syntax for the target type. | ||||
| var ErrSyntax = errors.New("invalid syntax") | ||||
|  | ||||
| // A NumError records a failed conversion. | ||||
| type NumError struct { | ||||
| 	Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat) | ||||
| 	Num  string // the input | ||||
| 	Err  error  // the reason the conversion failed (ErrRange, ErrSyntax) | ||||
| } | ||||
|  | ||||
| func (e *NumError) Error() string { | ||||
| 	return "strconv." + e.Func + ": " + "parsing " + strconv.Quote(e.Num) + ": " + e.Err.Error() | ||||
| } | ||||
|  | ||||
| func syntaxError(fn, str string) *NumError { | ||||
| 	return &NumError{fn, str, ErrSyntax} | ||||
| } | ||||
|  | ||||
| func rangeError(fn, str string) *NumError { | ||||
| 	return &NumError{fn, str, ErrRange} | ||||
| } | ||||
|  | ||||
| const intSize = 32 << uint(^uint(0)>>63) | ||||
|  | ||||
| // IntSize is the size in bits of an int or uint value. | ||||
| const IntSize = intSize | ||||
|  | ||||
| // Return the first number n such that n*base >= 1<<64. | ||||
| func cutoff64(base int) uint64 { | ||||
| 	if base < 2 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	return (1<<64-1)/uint64(base) + 1 | ||||
| } | ||||
|  | ||||
| // ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte | ||||
| func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { | ||||
| 	var cutoff, maxVal uint64 | ||||
|  | ||||
| 	if bitSize == 0 { | ||||
| 		bitSize = int(IntSize) | ||||
| 	} | ||||
|  | ||||
| 	s0 := s | ||||
| 	switch { | ||||
| 	case len(s) < 1: | ||||
| 		err = ErrSyntax | ||||
| 		goto Error | ||||
|  | ||||
| 	case 2 <= base && base <= 36: | ||||
| 		// valid base; nothing to do | ||||
|  | ||||
| 	case base == 0: | ||||
| 		// Look for octal, hex prefix. | ||||
| 		switch { | ||||
| 		case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): | ||||
| 			base = 16 | ||||
| 			s = s[2:] | ||||
| 			if len(s) < 1 { | ||||
| 				err = ErrSyntax | ||||
| 				goto Error | ||||
| 			} | ||||
| 		case s[0] == '0': | ||||
| 			base = 8 | ||||
| 		default: | ||||
| 			base = 10 | ||||
| 		} | ||||
|  | ||||
| 	default: | ||||
| 		err = errors.New("invalid base " + strconv.Itoa(base)) | ||||
| 		goto Error | ||||
| 	} | ||||
|  | ||||
| 	n = 0 | ||||
| 	cutoff = cutoff64(base) | ||||
| 	maxVal = 1<<uint(bitSize) - 1 | ||||
|  | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		var v byte | ||||
| 		d := s[i] | ||||
| 		switch { | ||||
| 		case '0' <= d && d <= '9': | ||||
| 			v = d - '0' | ||||
| 		case 'a' <= d && d <= 'z': | ||||
| 			v = d - 'a' + 10 | ||||
| 		case 'A' <= d && d <= 'Z': | ||||
| 			v = d - 'A' + 10 | ||||
| 		default: | ||||
| 			n = 0 | ||||
| 			err = ErrSyntax | ||||
| 			goto Error | ||||
| 		} | ||||
| 		if int(v) >= base { | ||||
| 			n = 0 | ||||
| 			err = ErrSyntax | ||||
| 			goto Error | ||||
| 		} | ||||
|  | ||||
| 		if n >= cutoff { | ||||
| 			// n*base overflows | ||||
| 			n = 1<<64 - 1 | ||||
| 			err = ErrRange | ||||
| 			goto Error | ||||
| 		} | ||||
| 		n *= uint64(base) | ||||
|  | ||||
| 		n1 := n + uint64(v) | ||||
| 		if n1 < n || n1 > maxVal { | ||||
| 			// n+v overflows | ||||
| 			n = 1<<64 - 1 | ||||
| 			err = ErrRange | ||||
| 			goto Error | ||||
| 		} | ||||
| 		n = n1 | ||||
| 	} | ||||
|  | ||||
| 	return n, nil | ||||
|  | ||||
| Error: | ||||
| 	return n, &NumError{"ParseUint", string(s0), err} | ||||
| } | ||||
|  | ||||
| // ParseInt interprets a string s in the given base (2 to 36) and | ||||
| // returns the corresponding value i.  If base == 0, the base is | ||||
| // implied by the string's prefix: base 16 for "0x", base 8 for | ||||
| // "0", and base 10 otherwise. | ||||
| // | ||||
| // The bitSize argument specifies the integer type | ||||
| // that the result must fit into.  Bit sizes 0, 8, 16, 32, and 64 | ||||
| // correspond to int, int8, int16, int32, and int64. | ||||
| // | ||||
| // The errors that ParseInt returns have concrete type *NumError | ||||
| // and include err.Num = s.  If s is empty or contains invalid | ||||
| // digits, err.Err = ErrSyntax and the returned value is 0; | ||||
| // if the value corresponding to s cannot be represented by a | ||||
| // signed integer of the given size, err.Err = ErrRange and the | ||||
| // returned value is the maximum magnitude integer of the | ||||
| // appropriate bitSize and sign. | ||||
| func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { | ||||
| 	const fnParseInt = "ParseInt" | ||||
|  | ||||
| 	if bitSize == 0 { | ||||
| 		bitSize = int(IntSize) | ||||
| 	} | ||||
|  | ||||
| 	// Empty string bad. | ||||
| 	if len(s) == 0 { | ||||
| 		return 0, syntaxError(fnParseInt, string(s)) | ||||
| 	} | ||||
|  | ||||
| 	// Pick off leading sign. | ||||
| 	s0 := s | ||||
| 	neg := false | ||||
| 	if s[0] == '+' { | ||||
| 		s = s[1:] | ||||
| 	} else if s[0] == '-' { | ||||
| 		neg = true | ||||
| 		s = s[1:] | ||||
| 	} | ||||
|  | ||||
| 	// Convert unsigned and check range. | ||||
| 	var un uint64 | ||||
| 	un, err = ParseUint(s, base, bitSize) | ||||
| 	if err != nil && err.(*NumError).Err != ErrRange { | ||||
| 		err.(*NumError).Func = fnParseInt | ||||
| 		err.(*NumError).Num = string(s0) | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	cutoff := uint64(1 << uint(bitSize-1)) | ||||
| 	if !neg && un >= cutoff { | ||||
| 		return int64(cutoff - 1), rangeError(fnParseInt, string(s0)) | ||||
| 	} | ||||
| 	if neg && un > cutoff { | ||||
| 		return -int64(cutoff), rangeError(fnParseInt, string(s0)) | ||||
| 	} | ||||
| 	n := int64(un) | ||||
| 	if neg { | ||||
| 		n = -n | ||||
| 	} | ||||
| 	return n, nil | ||||
| } | ||||
							
								
								
									
										668
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										668
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,668 @@ | ||||
| // Copyright 2011 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package internal | ||||
|  | ||||
| // An extFloat represents an extended floating-point number, with more | ||||
| // precision than a float64. It does not try to save bits: the | ||||
| // number represented by the structure is mant*(2^exp), with a negative | ||||
| // sign if neg is true. | ||||
| type extFloat struct { | ||||
| 	mant uint64 | ||||
| 	exp  int | ||||
| 	neg  bool | ||||
| } | ||||
|  | ||||
| // Powers of ten taken from double-conversion library. | ||||
| // http://code.google.com/p/double-conversion/ | ||||
| const ( | ||||
| 	firstPowerOfTen = -348 | ||||
| 	stepPowerOfTen  = 8 | ||||
| ) | ||||
|  | ||||
| var smallPowersOfTen = [...]extFloat{ | ||||
| 	{1 << 63, -63, false},        // 1 | ||||
| 	{0xa << 60, -60, false},      // 1e1 | ||||
| 	{0x64 << 57, -57, false},     // 1e2 | ||||
| 	{0x3e8 << 54, -54, false},    // 1e3 | ||||
| 	{0x2710 << 50, -50, false},   // 1e4 | ||||
| 	{0x186a0 << 47, -47, false},  // 1e5 | ||||
| 	{0xf4240 << 44, -44, false},  // 1e6 | ||||
| 	{0x989680 << 40, -40, false}, // 1e7 | ||||
| } | ||||
|  | ||||
| var powersOfTen = [...]extFloat{ | ||||
| 	{0xfa8fd5a0081c0288, -1220, false}, // 10^-348 | ||||
| 	{0xbaaee17fa23ebf76, -1193, false}, // 10^-340 | ||||
| 	{0x8b16fb203055ac76, -1166, false}, // 10^-332 | ||||
| 	{0xcf42894a5dce35ea, -1140, false}, // 10^-324 | ||||
| 	{0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 | ||||
| 	{0xe61acf033d1a45df, -1087, false}, // 10^-308 | ||||
| 	{0xab70fe17c79ac6ca, -1060, false}, // 10^-300 | ||||
| 	{0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 | ||||
| 	{0xbe5691ef416bd60c, -1007, false}, // 10^-284 | ||||
| 	{0x8dd01fad907ffc3c, -980, false},  // 10^-276 | ||||
| 	{0xd3515c2831559a83, -954, false},  // 10^-268 | ||||
| 	{0x9d71ac8fada6c9b5, -927, false},  // 10^-260 | ||||
| 	{0xea9c227723ee8bcb, -901, false},  // 10^-252 | ||||
| 	{0xaecc49914078536d, -874, false},  // 10^-244 | ||||
| 	{0x823c12795db6ce57, -847, false},  // 10^-236 | ||||
| 	{0xc21094364dfb5637, -821, false},  // 10^-228 | ||||
| 	{0x9096ea6f3848984f, -794, false},  // 10^-220 | ||||
| 	{0xd77485cb25823ac7, -768, false},  // 10^-212 | ||||
| 	{0xa086cfcd97bf97f4, -741, false},  // 10^-204 | ||||
| 	{0xef340a98172aace5, -715, false},  // 10^-196 | ||||
| 	{0xb23867fb2a35b28e, -688, false},  // 10^-188 | ||||
| 	{0x84c8d4dfd2c63f3b, -661, false},  // 10^-180 | ||||
| 	{0xc5dd44271ad3cdba, -635, false},  // 10^-172 | ||||
| 	{0x936b9fcebb25c996, -608, false},  // 10^-164 | ||||
| 	{0xdbac6c247d62a584, -582, false},  // 10^-156 | ||||
| 	{0xa3ab66580d5fdaf6, -555, false},  // 10^-148 | ||||
| 	{0xf3e2f893dec3f126, -529, false},  // 10^-140 | ||||
| 	{0xb5b5ada8aaff80b8, -502, false},  // 10^-132 | ||||
| 	{0x87625f056c7c4a8b, -475, false},  // 10^-124 | ||||
| 	{0xc9bcff6034c13053, -449, false},  // 10^-116 | ||||
| 	{0x964e858c91ba2655, -422, false},  // 10^-108 | ||||
| 	{0xdff9772470297ebd, -396, false},  // 10^-100 | ||||
| 	{0xa6dfbd9fb8e5b88f, -369, false},  // 10^-92 | ||||
| 	{0xf8a95fcf88747d94, -343, false},  // 10^-84 | ||||
| 	{0xb94470938fa89bcf, -316, false},  // 10^-76 | ||||
| 	{0x8a08f0f8bf0f156b, -289, false},  // 10^-68 | ||||
| 	{0xcdb02555653131b6, -263, false},  // 10^-60 | ||||
| 	{0x993fe2c6d07b7fac, -236, false},  // 10^-52 | ||||
| 	{0xe45c10c42a2b3b06, -210, false},  // 10^-44 | ||||
| 	{0xaa242499697392d3, -183, false},  // 10^-36 | ||||
| 	{0xfd87b5f28300ca0e, -157, false},  // 10^-28 | ||||
| 	{0xbce5086492111aeb, -130, false},  // 10^-20 | ||||
| 	{0x8cbccc096f5088cc, -103, false},  // 10^-12 | ||||
| 	{0xd1b71758e219652c, -77, false},   // 10^-4 | ||||
| 	{0x9c40000000000000, -50, false},   // 10^4 | ||||
| 	{0xe8d4a51000000000, -24, false},   // 10^12 | ||||
| 	{0xad78ebc5ac620000, 3, false},     // 10^20 | ||||
| 	{0x813f3978f8940984, 30, false},    // 10^28 | ||||
| 	{0xc097ce7bc90715b3, 56, false},    // 10^36 | ||||
| 	{0x8f7e32ce7bea5c70, 83, false},    // 10^44 | ||||
| 	{0xd5d238a4abe98068, 109, false},   // 10^52 | ||||
| 	{0x9f4f2726179a2245, 136, false},   // 10^60 | ||||
| 	{0xed63a231d4c4fb27, 162, false},   // 10^68 | ||||
| 	{0xb0de65388cc8ada8, 189, false},   // 10^76 | ||||
| 	{0x83c7088e1aab65db, 216, false},   // 10^84 | ||||
| 	{0xc45d1df942711d9a, 242, false},   // 10^92 | ||||
| 	{0x924d692ca61be758, 269, false},   // 10^100 | ||||
| 	{0xda01ee641a708dea, 295, false},   // 10^108 | ||||
| 	{0xa26da3999aef774a, 322, false},   // 10^116 | ||||
| 	{0xf209787bb47d6b85, 348, false},   // 10^124 | ||||
| 	{0xb454e4a179dd1877, 375, false},   // 10^132 | ||||
| 	{0x865b86925b9bc5c2, 402, false},   // 10^140 | ||||
| 	{0xc83553c5c8965d3d, 428, false},   // 10^148 | ||||
| 	{0x952ab45cfa97a0b3, 455, false},   // 10^156 | ||||
| 	{0xde469fbd99a05fe3, 481, false},   // 10^164 | ||||
| 	{0xa59bc234db398c25, 508, false},   // 10^172 | ||||
| 	{0xf6c69a72a3989f5c, 534, false},   // 10^180 | ||||
| 	{0xb7dcbf5354e9bece, 561, false},   // 10^188 | ||||
| 	{0x88fcf317f22241e2, 588, false},   // 10^196 | ||||
| 	{0xcc20ce9bd35c78a5, 614, false},   // 10^204 | ||||
| 	{0x98165af37b2153df, 641, false},   // 10^212 | ||||
| 	{0xe2a0b5dc971f303a, 667, false},   // 10^220 | ||||
| 	{0xa8d9d1535ce3b396, 694, false},   // 10^228 | ||||
| 	{0xfb9b7cd9a4a7443c, 720, false},   // 10^236 | ||||
| 	{0xbb764c4ca7a44410, 747, false},   // 10^244 | ||||
| 	{0x8bab8eefb6409c1a, 774, false},   // 10^252 | ||||
| 	{0xd01fef10a657842c, 800, false},   // 10^260 | ||||
| 	{0x9b10a4e5e9913129, 827, false},   // 10^268 | ||||
| 	{0xe7109bfba19c0c9d, 853, false},   // 10^276 | ||||
| 	{0xac2820d9623bf429, 880, false},   // 10^284 | ||||
| 	{0x80444b5e7aa7cf85, 907, false},   // 10^292 | ||||
| 	{0xbf21e44003acdd2d, 933, false},   // 10^300 | ||||
| 	{0x8e679c2f5e44ff8f, 960, false},   // 10^308 | ||||
| 	{0xd433179d9c8cb841, 986, false},   // 10^316 | ||||
| 	{0x9e19db92b4e31ba9, 1013, false},  // 10^324 | ||||
| 	{0xeb96bf6ebadf77d9, 1039, false},  // 10^332 | ||||
| 	{0xaf87023b9bf0ee6b, 1066, false},  // 10^340 | ||||
| } | ||||
|  | ||||
| // floatBits returns the bits of the float64 that best approximates | ||||
| // the extFloat passed as receiver. Overflow is set to true if | ||||
| // the resulting float64 is ±Inf. | ||||
| func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { | ||||
| 	f.Normalize() | ||||
|  | ||||
| 	exp := f.exp + 63 | ||||
|  | ||||
| 	// Exponent too small. | ||||
| 	if exp < flt.bias+1 { | ||||
| 		n := flt.bias + 1 - exp | ||||
| 		f.mant >>= uint(n) | ||||
| 		exp += n | ||||
| 	} | ||||
|  | ||||
| 	// Extract 1+flt.mantbits bits from the 64-bit mantissa. | ||||
| 	mant := f.mant >> (63 - flt.mantbits) | ||||
| 	if f.mant&(1<<(62-flt.mantbits)) != 0 { | ||||
| 		// Round up. | ||||
| 		mant += 1 | ||||
| 	} | ||||
|  | ||||
| 	// Rounding might have added a bit; shift down. | ||||
| 	if mant == 2<<flt.mantbits { | ||||
| 		mant >>= 1 | ||||
| 		exp++ | ||||
| 	} | ||||
|  | ||||
| 	// Infinities. | ||||
| 	if exp-flt.bias >= 1<<flt.expbits-1 { | ||||
| 		// ±Inf | ||||
| 		mant = 0 | ||||
| 		exp = 1<<flt.expbits - 1 + flt.bias | ||||
| 		overflow = true | ||||
| 	} else if mant&(1<<flt.mantbits) == 0 { | ||||
| 		// Denormalized? | ||||
| 		exp = flt.bias | ||||
| 	} | ||||
| 	// Assemble bits. | ||||
| 	bits = mant & (uint64(1)<<flt.mantbits - 1) | ||||
| 	bits |= uint64((exp-flt.bias)&(1<<flt.expbits-1)) << flt.mantbits | ||||
| 	if f.neg { | ||||
| 		bits |= 1 << (flt.mantbits + flt.expbits) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // AssignComputeBounds sets f to the floating point value | ||||
| // defined by mant, exp and precision given by flt. It returns | ||||
| // lower, upper such that any number in the closed interval | ||||
| // [lower, upper] is converted back to the same floating point number. | ||||
| func (f *extFloat) AssignComputeBounds(mant uint64, exp int, neg bool, flt *floatInfo) (lower, upper extFloat) { | ||||
| 	f.mant = mant | ||||
| 	f.exp = exp - int(flt.mantbits) | ||||
| 	f.neg = neg | ||||
| 	if f.exp <= 0 && mant == (mant>>uint(-f.exp))<<uint(-f.exp) { | ||||
| 		// An exact integer | ||||
| 		f.mant >>= uint(-f.exp) | ||||
| 		f.exp = 0 | ||||
| 		return *f, *f | ||||
| 	} | ||||
| 	expBiased := exp - flt.bias | ||||
|  | ||||
| 	upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} | ||||
| 	if mant != 1<<flt.mantbits || expBiased == 1 { | ||||
| 		lower = extFloat{mant: 2*f.mant - 1, exp: f.exp - 1, neg: f.neg} | ||||
| 	} else { | ||||
| 		lower = extFloat{mant: 4*f.mant - 1, exp: f.exp - 2, neg: f.neg} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Normalize normalizes f so that the highest bit of the mantissa is | ||||
| // set, and returns the number by which the mantissa was left-shifted. | ||||
| func (f *extFloat) Normalize() (shift uint) { | ||||
| 	mant, exp := f.mant, f.exp | ||||
| 	if mant == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	if mant>>(64-32) == 0 { | ||||
| 		mant <<= 32 | ||||
| 		exp -= 32 | ||||
| 	} | ||||
| 	if mant>>(64-16) == 0 { | ||||
| 		mant <<= 16 | ||||
| 		exp -= 16 | ||||
| 	} | ||||
| 	if mant>>(64-8) == 0 { | ||||
| 		mant <<= 8 | ||||
| 		exp -= 8 | ||||
| 	} | ||||
| 	if mant>>(64-4) == 0 { | ||||
| 		mant <<= 4 | ||||
| 		exp -= 4 | ||||
| 	} | ||||
| 	if mant>>(64-2) == 0 { | ||||
| 		mant <<= 2 | ||||
| 		exp -= 2 | ||||
| 	} | ||||
| 	if mant>>(64-1) == 0 { | ||||
| 		mant <<= 1 | ||||
| 		exp -= 1 | ||||
| 	} | ||||
| 	shift = uint(f.exp - exp) | ||||
| 	f.mant, f.exp = mant, exp | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Multiply sets f to the product f*g: the result is correctly rounded, | ||||
| // but not normalized. | ||||
| func (f *extFloat) Multiply(g extFloat) { | ||||
| 	fhi, flo := f.mant>>32, uint64(uint32(f.mant)) | ||||
| 	ghi, glo := g.mant>>32, uint64(uint32(g.mant)) | ||||
|  | ||||
| 	// Cross products. | ||||
| 	cross1 := fhi * glo | ||||
| 	cross2 := flo * ghi | ||||
|  | ||||
| 	// f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo | ||||
| 	f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) | ||||
| 	rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) | ||||
| 	// Round up. | ||||
| 	rem += (1 << 31) | ||||
|  | ||||
| 	f.mant += (rem >> 32) | ||||
| 	f.exp = f.exp + g.exp + 64 | ||||
| } | ||||
|  | ||||
| var uint64pow10 = [...]uint64{ | ||||
| 	1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, | ||||
| 	1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, | ||||
| } | ||||
|  | ||||
| // AssignDecimal sets f to an approximate value mantissa*10^exp. It | ||||
| // returns true if the value represented by f is guaranteed to be the | ||||
| // best approximation of d after being rounded to a float64 or | ||||
| // float32 depending on flt. | ||||
| func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { | ||||
| 	const uint64digits = 19 | ||||
| 	const errorscale = 8 | ||||
| 	errors := 0 // An upper bound for error, computed in errorscale*ulp. | ||||
| 	if trunc { | ||||
| 		// the decimal number was truncated. | ||||
| 		errors += errorscale / 2 | ||||
| 	} | ||||
|  | ||||
| 	f.mant = mantissa | ||||
| 	f.exp = 0 | ||||
| 	f.neg = neg | ||||
|  | ||||
| 	// Multiply by powers of ten. | ||||
| 	i := (exp10 - firstPowerOfTen) / stepPowerOfTen | ||||
| 	if exp10 < firstPowerOfTen || i >= len(powersOfTen) { | ||||
| 		return false | ||||
| 	} | ||||
| 	adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen | ||||
|  | ||||
| 	// We multiply by exp%step | ||||
| 	if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { | ||||
| 		// We can multiply the mantissa exactly. | ||||
| 		f.mant *= uint64pow10[adjExp] | ||||
| 		f.Normalize() | ||||
| 	} else { | ||||
| 		f.Normalize() | ||||
| 		f.Multiply(smallPowersOfTen[adjExp]) | ||||
| 		errors += errorscale / 2 | ||||
| 	} | ||||
|  | ||||
| 	// We multiply by 10 to the exp - exp%step. | ||||
| 	f.Multiply(powersOfTen[i]) | ||||
| 	if errors > 0 { | ||||
| 		errors += 1 | ||||
| 	} | ||||
| 	errors += errorscale / 2 | ||||
|  | ||||
| 	// Normalize | ||||
| 	shift := f.Normalize() | ||||
| 	errors <<= shift | ||||
|  | ||||
| 	// Now f is a good approximation of the decimal. | ||||
| 	// Check whether the error is too large: that is, if the mantissa | ||||
| 	// is perturbated by the error, the resulting float64 will change. | ||||
| 	// The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. | ||||
| 	// | ||||
| 	// In many cases the approximation will be good enough. | ||||
| 	denormalExp := flt.bias - 63 | ||||
| 	var extrabits uint | ||||
| 	if f.exp <= denormalExp { | ||||
| 		// f.mant * 2^f.exp is smaller than 2^(flt.bias+1). | ||||
| 		extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) | ||||
| 	} else { | ||||
| 		extrabits = uint(63 - flt.mantbits) | ||||
| 	} | ||||
|  | ||||
| 	halfway := uint64(1) << (extrabits - 1) | ||||
| 	mant_extra := f.mant & (1<<extrabits - 1) | ||||
|  | ||||
| 	// Do a signed comparison here! If the error estimate could make | ||||
| 	// the mantissa round differently for the conversion to double, | ||||
| 	// then we can't give a definite answer. | ||||
| 	if int64(halfway)-int64(errors) < int64(mant_extra) && | ||||
| 		int64(mant_extra) < int64(halfway)+int64(errors) { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Frexp10 is an analogue of math.Frexp for decimal powers. It scales | ||||
| // f by an approximate power of ten 10^-exp, and returns exp10, so | ||||
| // that f*10^exp10 has the same value as the old f, up to an ulp, | ||||
| // as well as the index of 10^-exp in the powersOfTen table. | ||||
| func (f *extFloat) frexp10() (exp10, index int) { | ||||
| 	// The constants expMin and expMax constrain the final value of the | ||||
| 	// binary exponent of f. We want a small integral part in the result | ||||
| 	// because finding digits of an integer requires divisions, whereas | ||||
| 	// digits of the fractional part can be found by repeatedly multiplying | ||||
| 	// by 10. | ||||
| 	const expMin = -60 | ||||
| 	const expMax = -32 | ||||
| 	// Find power of ten such that x * 10^n has a binary exponent | ||||
| 	// between expMin and expMax. | ||||
| 	approxExp10 := ((expMin+expMax)/2 - f.exp) * 28 / 93 // log(10)/log(2) is close to 93/28. | ||||
| 	i := (approxExp10 - firstPowerOfTen) / stepPowerOfTen | ||||
| Loop: | ||||
| 	for { | ||||
| 		exp := f.exp + powersOfTen[i].exp + 64 | ||||
| 		switch { | ||||
| 		case exp < expMin: | ||||
| 			i++ | ||||
| 		case exp > expMax: | ||||
| 			i-- | ||||
| 		default: | ||||
| 			break Loop | ||||
| 		} | ||||
| 	} | ||||
| 	// Apply the desired decimal shift on f. It will have exponent | ||||
| 	// in the desired range. This is multiplication by 10^-exp10. | ||||
| 	f.Multiply(powersOfTen[i]) | ||||
|  | ||||
| 	return -(firstPowerOfTen + i*stepPowerOfTen), i | ||||
| } | ||||
|  | ||||
| // frexp10Many applies a common shift by a power of ten to a, b, c. | ||||
| func frexp10Many(a, b, c *extFloat) (exp10 int) { | ||||
| 	exp10, i := c.frexp10() | ||||
| 	a.Multiply(powersOfTen[i]) | ||||
| 	b.Multiply(powersOfTen[i]) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // FixedDecimal stores in d the first n significant digits | ||||
| // of the decimal representation of f. It returns false | ||||
| // if it cannot be sure of the answer. | ||||
| func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { | ||||
| 	if f.mant == 0 { | ||||
| 		d.nd = 0 | ||||
| 		d.dp = 0 | ||||
| 		d.neg = f.neg | ||||
| 		return true | ||||
| 	} | ||||
| 	if n == 0 { | ||||
| 		panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") | ||||
| 	} | ||||
| 	// Multiply by an appropriate power of ten to have a reasonable | ||||
| 	// number to process. | ||||
| 	f.Normalize() | ||||
| 	exp10, _ := f.frexp10() | ||||
|  | ||||
| 	shift := uint(-f.exp) | ||||
| 	integer := uint32(f.mant >> shift) | ||||
| 	fraction := f.mant - (uint64(integer) << shift) | ||||
| 	ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. | ||||
|  | ||||
| 	// Write exactly n digits to d. | ||||
| 	needed := n        // how many digits are left to write. | ||||
| 	integerDigits := 0 // the number of decimal digits of integer. | ||||
| 	pow10 := uint64(1) // the power of ten by which f was scaled. | ||||
| 	for i, pow := 0, uint64(1); i < 20; i++ { | ||||
| 		if pow > uint64(integer) { | ||||
| 			integerDigits = i | ||||
| 			break | ||||
| 		} | ||||
| 		pow *= 10 | ||||
| 	} | ||||
| 	rest := integer | ||||
| 	if integerDigits > needed { | ||||
| 		// the integral part is already large, trim the last digits. | ||||
| 		pow10 = uint64pow10[integerDigits-needed] | ||||
| 		integer /= uint32(pow10) | ||||
| 		rest -= integer * uint32(pow10) | ||||
| 	} else { | ||||
| 		rest = 0 | ||||
| 	} | ||||
|  | ||||
| 	// Write the digits of integer: the digits of rest are omitted. | ||||
| 	var buf [32]byte | ||||
| 	pos := len(buf) | ||||
| 	for v := integer; v > 0; { | ||||
| 		v1 := v / 10 | ||||
| 		v -= 10 * v1 | ||||
| 		pos-- | ||||
| 		buf[pos] = byte(v + '0') | ||||
| 		v = v1 | ||||
| 	} | ||||
| 	for i := pos; i < len(buf); i++ { | ||||
| 		d.d[i-pos] = buf[i] | ||||
| 	} | ||||
| 	nd := len(buf) - pos | ||||
| 	d.nd = nd | ||||
| 	d.dp = integerDigits + exp10 | ||||
| 	needed -= nd | ||||
|  | ||||
| 	if needed > 0 { | ||||
| 		if rest != 0 || pow10 != 1 { | ||||
| 			panic("strconv: internal error, rest != 0 but needed > 0") | ||||
| 		} | ||||
| 		// Emit digits for the fractional part. Each time, 10*fraction | ||||
| 		// fits in a uint64 without overflow. | ||||
| 		for needed > 0 { | ||||
| 			fraction *= 10 | ||||
| 			ε *= 10 // the uncertainty scales as we multiply by ten. | ||||
| 			if 2*ε > 1<<shift { | ||||
| 				// the error is so large it could modify which digit to write, abort. | ||||
| 				return false | ||||
| 			} | ||||
| 			digit := fraction >> shift | ||||
| 			d.d[nd] = byte(digit + '0') | ||||
| 			fraction -= digit << shift | ||||
| 			nd++ | ||||
| 			needed-- | ||||
| 		} | ||||
| 		d.nd = nd | ||||
| 	} | ||||
|  | ||||
| 	// We have written a truncation of f (a numerator / 10^d.dp). The remaining part | ||||
| 	// can be interpreted as a small number (< 1) to be added to the last digit of the | ||||
| 	// numerator. | ||||
| 	// | ||||
| 	// If rest > 0, the amount is: | ||||
| 	//    (rest<<shift | fraction) / (pow10 << shift) | ||||
| 	//    fraction being known with a ±ε uncertainty. | ||||
| 	//    The fact that n > 0 guarantees that pow10 << shift does not overflow a uint64. | ||||
| 	// | ||||
| 	// If rest = 0, pow10 == 1 and the amount is | ||||
| 	//    fraction / (1 << shift) | ||||
| 	//    fraction being known with a ±ε uncertainty. | ||||
| 	// | ||||
| 	// We pass this information to the rounding routine for adjustment. | ||||
|  | ||||
| 	ok := adjustLastDigitFixed(d, uint64(rest)<<shift|fraction, pow10, shift, ε) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	// Trim trailing zeros. | ||||
| 	for i := d.nd - 1; i >= 0; i-- { | ||||
| 		if d.d[i] != '0' { | ||||
| 			d.nd = i + 1 | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // adjustLastDigitFixed assumes d contains the representation of the integral part | ||||
| // of some number, whose fractional part is num / (den << shift). The numerator | ||||
| // num is only known up to an uncertainty of size ε, assumed to be less than | ||||
| // (den << shift)/2. | ||||
| // | ||||
| // It will increase the last digit by one to account for correct rounding, typically | ||||
| // when the fractional part is greater than 1/2, and will return false if ε is such | ||||
| // that no correct answer can be given. | ||||
| func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { | ||||
| 	if num > den<<shift { | ||||
| 		panic("strconv: num > den<<shift in adjustLastDigitFixed") | ||||
| 	} | ||||
| 	if 2*ε > den<<shift { | ||||
| 		panic("strconv: ε > (den<<shift)/2") | ||||
| 	} | ||||
| 	if 2*(num+ε) < den<<shift { | ||||
| 		return true | ||||
| 	} | ||||
| 	if 2*(num-ε) > den<<shift { | ||||
| 		// increment d by 1. | ||||
| 		i := d.nd - 1 | ||||
| 		for ; i >= 0; i-- { | ||||
| 			if d.d[i] == '9' { | ||||
| 				d.nd-- | ||||
| 			} else { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if i < 0 { | ||||
| 			d.d[0] = '1' | ||||
| 			d.nd = 1 | ||||
| 			d.dp++ | ||||
| 		} else { | ||||
| 			d.d[i]++ | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // ShortestDecimal stores in d the shortest decimal representation of f | ||||
| // which belongs to the open interval (lower, upper), where f is supposed | ||||
| // to lie. It returns false whenever the result is unsure. The implementation | ||||
| // uses the Grisu3 algorithm. | ||||
| func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { | ||||
| 	if f.mant == 0 { | ||||
| 		d.nd = 0 | ||||
| 		d.dp = 0 | ||||
| 		d.neg = f.neg | ||||
| 		return true | ||||
| 	} | ||||
| 	if f.exp == 0 && *lower == *f && *lower == *upper { | ||||
| 		// an exact integer. | ||||
| 		var buf [24]byte | ||||
| 		n := len(buf) - 1 | ||||
| 		for v := f.mant; v > 0; { | ||||
| 			v1 := v / 10 | ||||
| 			v -= 10 * v1 | ||||
| 			buf[n] = byte(v + '0') | ||||
| 			n-- | ||||
| 			v = v1 | ||||
| 		} | ||||
| 		nd := len(buf) - n - 1 | ||||
| 		for i := 0; i < nd; i++ { | ||||
| 			d.d[i] = buf[n+1+i] | ||||
| 		} | ||||
| 		d.nd, d.dp = nd, nd | ||||
| 		for d.nd > 0 && d.d[d.nd-1] == '0' { | ||||
| 			d.nd-- | ||||
| 		} | ||||
| 		if d.nd == 0 { | ||||
| 			d.dp = 0 | ||||
| 		} | ||||
| 		d.neg = f.neg | ||||
| 		return true | ||||
| 	} | ||||
| 	upper.Normalize() | ||||
| 	// Uniformize exponents. | ||||
| 	if f.exp > upper.exp { | ||||
| 		f.mant <<= uint(f.exp - upper.exp) | ||||
| 		f.exp = upper.exp | ||||
| 	} | ||||
| 	if lower.exp > upper.exp { | ||||
| 		lower.mant <<= uint(lower.exp - upper.exp) | ||||
| 		lower.exp = upper.exp | ||||
| 	} | ||||
|  | ||||
| 	exp10 := frexp10Many(lower, f, upper) | ||||
| 	// Take a safety margin due to rounding in frexp10Many, but we lose precision. | ||||
| 	upper.mant++ | ||||
| 	lower.mant-- | ||||
|  | ||||
| 	// The shortest representation of f is either rounded up or down, but | ||||
| 	// in any case, it is a truncation of upper. | ||||
| 	shift := uint(-upper.exp) | ||||
| 	integer := uint32(upper.mant >> shift) | ||||
| 	fraction := upper.mant - (uint64(integer) << shift) | ||||
|  | ||||
| 	// How far we can go down from upper until the result is wrong. | ||||
| 	allowance := upper.mant - lower.mant | ||||
| 	// How far we should go to get a very precise result. | ||||
| 	targetDiff := upper.mant - f.mant | ||||
|  | ||||
| 	// Count integral digits: there are at most 10. | ||||
| 	var integerDigits int | ||||
| 	for i, pow := 0, uint64(1); i < 20; i++ { | ||||
| 		if pow > uint64(integer) { | ||||
| 			integerDigits = i | ||||
| 			break | ||||
| 		} | ||||
| 		pow *= 10 | ||||
| 	} | ||||
| 	for i := 0; i < integerDigits; i++ { | ||||
| 		pow := uint64pow10[integerDigits-i-1] | ||||
| 		digit := integer / uint32(pow) | ||||
| 		d.d[i] = byte(digit + '0') | ||||
| 		integer -= digit * uint32(pow) | ||||
| 		// evaluate whether we should stop. | ||||
| 		if currentDiff := uint64(integer)<<shift + fraction; currentDiff < allowance { | ||||
| 			d.nd = i + 1 | ||||
| 			d.dp = integerDigits + exp10 | ||||
| 			d.neg = f.neg | ||||
| 			// Sometimes allowance is so large the last digit might need to be | ||||
| 			// decremented to get closer to f. | ||||
| 			return adjustLastDigit(d, currentDiff, targetDiff, allowance, pow<<shift, 2) | ||||
| 		} | ||||
| 	} | ||||
| 	d.nd = integerDigits | ||||
| 	d.dp = d.nd + exp10 | ||||
| 	d.neg = f.neg | ||||
|  | ||||
| 	// Compute digits of the fractional part. At each step fraction does not | ||||
| 	// overflow. The choice of minExp implies that fraction is less than 2^60. | ||||
| 	var digit int | ||||
| 	multiplier := uint64(1) | ||||
| 	for { | ||||
| 		fraction *= 10 | ||||
| 		multiplier *= 10 | ||||
| 		digit = int(fraction >> shift) | ||||
| 		d.d[d.nd] = byte(digit + '0') | ||||
| 		d.nd++ | ||||
| 		fraction -= uint64(digit) << shift | ||||
| 		if fraction < allowance*multiplier { | ||||
| 			// We are in the admissible range. Note that if allowance is about to | ||||
| 			// overflow, that is, allowance > 2^64/10, the condition is automatically | ||||
| 			// true due to the limited range of fraction. | ||||
| 			return adjustLastDigit(d, | ||||
| 				fraction, targetDiff*multiplier, allowance*multiplier, | ||||
| 				1<<shift, multiplier*2) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // adjustLastDigit modifies d = x-currentDiff*ε, to get closest to | ||||
| // d = x-targetDiff*ε, without becoming smaller than x-maxDiff*ε. | ||||
| // It assumes that a decimal digit is worth ulpDecimal*ε, and that | ||||
| // all data is known with a error estimate of ulpBinary*ε. | ||||
| func adjustLastDigit(d *decimalSlice, currentDiff, targetDiff, maxDiff, ulpDecimal, ulpBinary uint64) bool { | ||||
| 	if ulpDecimal < 2*ulpBinary { | ||||
| 		// Approximation is too wide. | ||||
| 		return false | ||||
| 	} | ||||
| 	for currentDiff+ulpDecimal/2+ulpBinary < targetDiff { | ||||
| 		d.d[d.nd-1]-- | ||||
| 		currentDiff += ulpDecimal | ||||
| 	} | ||||
| 	if currentDiff+ulpDecimal <= targetDiff+ulpDecimal/2+ulpBinary { | ||||
| 		// we have two choices, and don't know what to do. | ||||
| 		return false | ||||
| 	} | ||||
| 	if currentDiff < ulpBinary || currentDiff > maxDiff-ulpBinary { | ||||
| 		// we went too far | ||||
| 		return false | ||||
| 	} | ||||
| 	if d.nd == 1 && d.d[0] == '0' { | ||||
| 		// the number has actually reached zero. | ||||
| 		d.nd = 0 | ||||
| 		d.dp = 0 | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
							
								
								
									
										475
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										475
									
								
								vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,475 @@ | ||||
| // Copyright 2009 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Binary to decimal floating point conversion. | ||||
| // Algorithm: | ||||
| //   1) store mantissa in multiprecision decimal | ||||
| //   2) shift decimal by exponent | ||||
| //   3) read digits out & format | ||||
|  | ||||
| package internal | ||||
|  | ||||
| import "math" | ||||
|  | ||||
| // TODO: move elsewhere? | ||||
| type floatInfo struct { | ||||
| 	mantbits uint | ||||
| 	expbits  uint | ||||
| 	bias     int | ||||
| } | ||||
|  | ||||
| var float32info = floatInfo{23, 8, -127} | ||||
| var float64info = floatInfo{52, 11, -1023} | ||||
|  | ||||
| // FormatFloat converts the floating-point number f to a string, | ||||
| // according to the format fmt and precision prec.  It rounds the | ||||
| // result assuming that the original was obtained from a floating-point | ||||
| // value of bitSize bits (32 for float32, 64 for float64). | ||||
| // | ||||
| // The format fmt is one of | ||||
| // 'b' (-ddddp±ddd, a binary exponent), | ||||
| // 'e' (-d.dddde±dd, a decimal exponent), | ||||
| // 'E' (-d.ddddE±dd, a decimal exponent), | ||||
| // 'f' (-ddd.dddd, no exponent), | ||||
| // 'g' ('e' for large exponents, 'f' otherwise), or | ||||
| // 'G' ('E' for large exponents, 'f' otherwise). | ||||
| // | ||||
| // The precision prec controls the number of digits | ||||
| // (excluding the exponent) printed by the 'e', 'E', 'f', 'g', and 'G' formats. | ||||
| // For 'e', 'E', and 'f' it is the number of digits after the decimal point. | ||||
| // For 'g' and 'G' it is the total number of digits. | ||||
| // The special precision -1 uses the smallest number of digits | ||||
| // necessary such that ParseFloat will return f exactly. | ||||
| func formatFloat(f float64, fmt byte, prec, bitSize int) string { | ||||
| 	return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize)) | ||||
| } | ||||
|  | ||||
| // AppendFloat appends the string form of the floating-point number f, | ||||
| // as generated by FormatFloat, to dst and returns the extended buffer. | ||||
| func appendFloat(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte { | ||||
| 	return genericFtoa(dst, f, fmt, prec, bitSize) | ||||
| } | ||||
|  | ||||
| func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { | ||||
| 	var bits uint64 | ||||
| 	var flt *floatInfo | ||||
| 	switch bitSize { | ||||
| 	case 32: | ||||
| 		bits = uint64(math.Float32bits(float32(val))) | ||||
| 		flt = &float32info | ||||
| 	case 64: | ||||
| 		bits = math.Float64bits(val) | ||||
| 		flt = &float64info | ||||
| 	default: | ||||
| 		panic("strconv: illegal AppendFloat/FormatFloat bitSize") | ||||
| 	} | ||||
|  | ||||
| 	neg := bits>>(flt.expbits+flt.mantbits) != 0 | ||||
| 	exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1) | ||||
| 	mant := bits & (uint64(1)<<flt.mantbits - 1) | ||||
|  | ||||
| 	switch exp { | ||||
| 	case 1<<flt.expbits - 1: | ||||
| 		// Inf, NaN | ||||
| 		var s string | ||||
| 		switch { | ||||
| 		case mant != 0: | ||||
| 			s = "NaN" | ||||
| 		case neg: | ||||
| 			s = "-Inf" | ||||
| 		default: | ||||
| 			s = "+Inf" | ||||
| 		} | ||||
| 		return append(dst, s...) | ||||
|  | ||||
| 	case 0: | ||||
| 		// denormalized | ||||
| 		exp++ | ||||
|  | ||||
| 	default: | ||||
| 		// add implicit top bit | ||||
| 		mant |= uint64(1) << flt.mantbits | ||||
| 	} | ||||
| 	exp += flt.bias | ||||
|  | ||||
| 	// Pick off easy binary format. | ||||
| 	if fmt == 'b' { | ||||
| 		return fmtB(dst, neg, mant, exp, flt) | ||||
| 	} | ||||
|  | ||||
| 	if !optimize { | ||||
| 		return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) | ||||
| 	} | ||||
|  | ||||
| 	var digs decimalSlice | ||||
| 	ok := false | ||||
| 	// Negative precision means "only as much as needed to be exact." | ||||
| 	shortest := prec < 0 | ||||
| 	if shortest { | ||||
| 		// Try Grisu3 algorithm. | ||||
| 		f := new(extFloat) | ||||
| 		lower, upper := f.AssignComputeBounds(mant, exp, neg, flt) | ||||
| 		var buf [32]byte | ||||
| 		digs.d = buf[:] | ||||
| 		ok = f.ShortestDecimal(&digs, &lower, &upper) | ||||
| 		if !ok { | ||||
| 			return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) | ||||
| 		} | ||||
| 		// Precision for shortest representation mode. | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			prec = digs.nd - 1 | ||||
| 		case 'f': | ||||
| 			prec = max(digs.nd-digs.dp, 0) | ||||
| 		case 'g', 'G': | ||||
| 			prec = digs.nd | ||||
| 		} | ||||
| 	} else if fmt != 'f' { | ||||
| 		// Fixed number of digits. | ||||
| 		digits := prec | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			digits++ | ||||
| 		case 'g', 'G': | ||||
| 			if prec == 0 { | ||||
| 				prec = 1 | ||||
| 			} | ||||
| 			digits = prec | ||||
| 		} | ||||
| 		if digits <= 15 { | ||||
| 			// try fast algorithm when the number of digits is reasonable. | ||||
| 			var buf [24]byte | ||||
| 			digs.d = buf[:] | ||||
| 			f := extFloat{mant, exp - int(flt.mantbits), neg} | ||||
| 			ok = f.FixedDecimal(&digs, digits) | ||||
| 		} | ||||
| 	} | ||||
| 	if !ok { | ||||
| 		return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) | ||||
| 	} | ||||
| 	return formatDigits(dst, shortest, neg, digs, prec, fmt) | ||||
| } | ||||
|  | ||||
| // bigFtoa uses multiprecision computations to format a float. | ||||
| func bigFtoa(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte { | ||||
| 	d := new(decimal) | ||||
| 	d.Assign(mant) | ||||
| 	d.Shift(exp - int(flt.mantbits)) | ||||
| 	var digs decimalSlice | ||||
| 	shortest := prec < 0 | ||||
| 	if shortest { | ||||
| 		roundShortest(d, mant, exp, flt) | ||||
| 		digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp} | ||||
| 		// Precision for shortest representation mode. | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			prec = digs.nd - 1 | ||||
| 		case 'f': | ||||
| 			prec = max(digs.nd-digs.dp, 0) | ||||
| 		case 'g', 'G': | ||||
| 			prec = digs.nd | ||||
| 		} | ||||
| 	} else { | ||||
| 		// Round appropriately. | ||||
| 		switch fmt { | ||||
| 		case 'e', 'E': | ||||
| 			d.Round(prec + 1) | ||||
| 		case 'f': | ||||
| 			d.Round(d.dp + prec) | ||||
| 		case 'g', 'G': | ||||
| 			if prec == 0 { | ||||
| 				prec = 1 | ||||
| 			} | ||||
| 			d.Round(prec) | ||||
| 		} | ||||
| 		digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp} | ||||
| 	} | ||||
| 	return formatDigits(dst, shortest, neg, digs, prec, fmt) | ||||
| } | ||||
|  | ||||
| func formatDigits(dst []byte, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) []byte { | ||||
| 	switch fmt { | ||||
| 	case 'e', 'E': | ||||
| 		return fmtE(dst, neg, digs, prec, fmt) | ||||
| 	case 'f': | ||||
| 		return fmtF(dst, neg, digs, prec) | ||||
| 	case 'g', 'G': | ||||
| 		// trailing fractional zeros in 'e' form will be trimmed. | ||||
| 		eprec := prec | ||||
| 		if eprec > digs.nd && digs.nd >= digs.dp { | ||||
| 			eprec = digs.nd | ||||
| 		} | ||||
| 		// %e is used if the exponent from the conversion | ||||
| 		// is less than -4 or greater than or equal to the precision. | ||||
| 		// if precision was the shortest possible, use precision 6 for this decision. | ||||
| 		if shortest { | ||||
| 			eprec = 6 | ||||
| 		} | ||||
| 		exp := digs.dp - 1 | ||||
| 		if exp < -4 || exp >= eprec { | ||||
| 			if prec > digs.nd { | ||||
| 				prec = digs.nd | ||||
| 			} | ||||
| 			return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') | ||||
| 		} | ||||
| 		if prec > digs.dp { | ||||
| 			prec = digs.nd | ||||
| 		} | ||||
| 		return fmtF(dst, neg, digs, max(prec-digs.dp, 0)) | ||||
| 	} | ||||
|  | ||||
| 	// unknown format | ||||
| 	return append(dst, '%', fmt) | ||||
| } | ||||
|  | ||||
| // Round d (= mant * 2^exp) to the shortest number of digits | ||||
| // that will let the original floating point value be precisely | ||||
| // reconstructed.  Size is original floating point size (64 or 32). | ||||
| func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { | ||||
| 	// If mantissa is zero, the number is zero; stop now. | ||||
| 	if mant == 0 { | ||||
| 		d.nd = 0 | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Compute upper and lower such that any decimal number | ||||
| 	// between upper and lower (possibly inclusive) | ||||
| 	// will round to the original floating point number. | ||||
|  | ||||
| 	// We may see at once that the number is already shortest. | ||||
| 	// | ||||
| 	// Suppose d is not denormal, so that 2^exp <= d < 10^dp. | ||||
| 	// The closest shorter number is at least 10^(dp-nd) away. | ||||
| 	// The lower/upper bounds computed below are at distance | ||||
| 	// at most 2^(exp-mantbits). | ||||
| 	// | ||||
| 	// So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), | ||||
| 	// or equivalently log2(10)*(dp-nd) > exp-mantbits. | ||||
| 	// It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). | ||||
| 	minexp := flt.bias + 1 // minimum possible exponent | ||||
| 	if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { | ||||
| 		// The number is already shortest. | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// d = mant << (exp - mantbits) | ||||
| 	// Next highest floating point number is mant+1 << exp-mantbits. | ||||
| 	// Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. | ||||
| 	upper := new(decimal) | ||||
| 	upper.Assign(mant*2 + 1) | ||||
| 	upper.Shift(exp - int(flt.mantbits) - 1) | ||||
|  | ||||
| 	// d = mant << (exp - mantbits) | ||||
| 	// Next lowest floating point number is mant-1 << exp-mantbits, | ||||
| 	// unless mant-1 drops the significant bit and exp is not the minimum exp, | ||||
| 	// in which case the next lowest is mant*2-1 << exp-mantbits-1. | ||||
| 	// Either way, call it mantlo << explo-mantbits. | ||||
| 	// Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. | ||||
| 	var mantlo uint64 | ||||
| 	var explo int | ||||
| 	if mant > 1<<flt.mantbits || exp == minexp { | ||||
| 		mantlo = mant - 1 | ||||
| 		explo = exp | ||||
| 	} else { | ||||
| 		mantlo = mant*2 - 1 | ||||
| 		explo = exp - 1 | ||||
| 	} | ||||
| 	lower := new(decimal) | ||||
| 	lower.Assign(mantlo*2 + 1) | ||||
| 	lower.Shift(explo - int(flt.mantbits) - 1) | ||||
|  | ||||
| 	// The upper and lower bounds are possible outputs only if | ||||
| 	// the original mantissa is even, so that IEEE round-to-even | ||||
| 	// would round to the original mantissa and not the neighbors. | ||||
| 	inclusive := mant%2 == 0 | ||||
|  | ||||
| 	// Now we can figure out the minimum number of digits required. | ||||
| 	// Walk along until d has distinguished itself from upper and lower. | ||||
| 	for i := 0; i < d.nd; i++ { | ||||
| 		var l, m, u byte // lower, middle, upper digits | ||||
| 		if i < lower.nd { | ||||
| 			l = lower.d[i] | ||||
| 		} else { | ||||
| 			l = '0' | ||||
| 		} | ||||
| 		m = d.d[i] | ||||
| 		if i < upper.nd { | ||||
| 			u = upper.d[i] | ||||
| 		} else { | ||||
| 			u = '0' | ||||
| 		} | ||||
|  | ||||
| 		// Okay to round down (truncate) if lower has a different digit | ||||
| 		// or if lower is inclusive and is exactly the result of rounding down. | ||||
| 		okdown := l != m || (inclusive && l == m && i+1 == lower.nd) | ||||
|  | ||||
| 		// Okay to round up if upper has a different digit and | ||||
| 		// either upper is inclusive or upper is bigger than the result of rounding up. | ||||
| 		okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd) | ||||
|  | ||||
| 		// If it's okay to do either, then round to the nearest one. | ||||
| 		// If it's okay to do only one, do it. | ||||
| 		switch { | ||||
| 		case okdown && okup: | ||||
| 			d.Round(i + 1) | ||||
| 			return | ||||
| 		case okdown: | ||||
| 			d.RoundDown(i + 1) | ||||
| 			return | ||||
| 		case okup: | ||||
| 			d.RoundUp(i + 1) | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type decimalSlice struct { | ||||
| 	d      []byte | ||||
| 	nd, dp int | ||||
| 	neg    bool | ||||
| } | ||||
|  | ||||
| // %e: -d.ddddde±dd | ||||
| func fmtE(dst []byte, neg bool, d decimalSlice, prec int, fmt byte) []byte { | ||||
| 	// sign | ||||
| 	if neg { | ||||
| 		dst = append(dst, '-') | ||||
| 	} | ||||
|  | ||||
| 	// first digit | ||||
| 	ch := byte('0') | ||||
| 	if d.nd != 0 { | ||||
| 		ch = d.d[0] | ||||
| 	} | ||||
| 	dst = append(dst, ch) | ||||
|  | ||||
| 	// .moredigits | ||||
| 	if prec > 0 { | ||||
| 		dst = append(dst, '.') | ||||
| 		i := 1 | ||||
| 		m := d.nd + prec + 1 - max(d.nd, prec+1) | ||||
| 		for i < m { | ||||
| 			dst = append(dst, d.d[i]) | ||||
| 			i++ | ||||
| 		} | ||||
| 		for i <= prec { | ||||
| 			dst = append(dst, '0') | ||||
| 			i++ | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// e± | ||||
| 	dst = append(dst, fmt) | ||||
| 	exp := d.dp - 1 | ||||
| 	if d.nd == 0 { // special case: 0 has exponent 0 | ||||
| 		exp = 0 | ||||
| 	} | ||||
| 	if exp < 0 { | ||||
| 		ch = '-' | ||||
| 		exp = -exp | ||||
| 	} else { | ||||
| 		ch = '+' | ||||
| 	} | ||||
| 	dst = append(dst, ch) | ||||
|  | ||||
| 	// dddd | ||||
| 	var buf [3]byte | ||||
| 	i := len(buf) | ||||
| 	for exp >= 10 { | ||||
| 		i-- | ||||
| 		buf[i] = byte(exp%10 + '0') | ||||
| 		exp /= 10 | ||||
| 	} | ||||
| 	// exp < 10 | ||||
| 	i-- | ||||
| 	buf[i] = byte(exp + '0') | ||||
|  | ||||
| 	switch i { | ||||
| 	case 0: | ||||
| 		dst = append(dst, buf[0], buf[1], buf[2]) | ||||
| 	case 1: | ||||
| 		dst = append(dst, buf[1], buf[2]) | ||||
| 	case 2: | ||||
| 		// leading zeroes | ||||
| 		dst = append(dst, '0', buf[2]) | ||||
| 	} | ||||
| 	return dst | ||||
| } | ||||
|  | ||||
| // %f: -ddddddd.ddddd | ||||
| func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte { | ||||
| 	// sign | ||||
| 	if neg { | ||||
| 		dst = append(dst, '-') | ||||
| 	} | ||||
|  | ||||
| 	// integer, padded with zeros as needed. | ||||
| 	if d.dp > 0 { | ||||
| 		var i int | ||||
| 		for i = 0; i < d.dp && i < d.nd; i++ { | ||||
| 			dst = append(dst, d.d[i]) | ||||
| 		} | ||||
| 		for ; i < d.dp; i++ { | ||||
| 			dst = append(dst, '0') | ||||
| 		} | ||||
| 	} else { | ||||
| 		dst = append(dst, '0') | ||||
| 	} | ||||
|  | ||||
| 	// fraction | ||||
| 	if prec > 0 { | ||||
| 		dst = append(dst, '.') | ||||
| 		for i := 0; i < prec; i++ { | ||||
| 			ch := byte('0') | ||||
| 			if j := d.dp + i; 0 <= j && j < d.nd { | ||||
| 				ch = d.d[j] | ||||
| 			} | ||||
| 			dst = append(dst, ch) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return dst | ||||
| } | ||||
|  | ||||
| // %b: -ddddddddp+ddd | ||||
| func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte { | ||||
| 	var buf [50]byte | ||||
| 	w := len(buf) | ||||
| 	exp -= int(flt.mantbits) | ||||
| 	esign := byte('+') | ||||
| 	if exp < 0 { | ||||
| 		esign = '-' | ||||
| 		exp = -exp | ||||
| 	} | ||||
| 	n := 0 | ||||
| 	for exp > 0 || n < 1 { | ||||
| 		n++ | ||||
| 		w-- | ||||
| 		buf[w] = byte(exp%10 + '0') | ||||
| 		exp /= 10 | ||||
| 	} | ||||
| 	w-- | ||||
| 	buf[w] = esign | ||||
| 	w-- | ||||
| 	buf[w] = 'p' | ||||
| 	n = 0 | ||||
| 	for mant > 0 || n < 1 { | ||||
| 		n++ | ||||
| 		w-- | ||||
| 		buf[w] = byte(mant%10 + '0') | ||||
| 		mant /= 10 | ||||
| 	} | ||||
| 	if neg { | ||||
| 		w-- | ||||
| 		buf[w] = '-' | ||||
| 	} | ||||
| 	return append(dst, buf[w:]...) | ||||
| } | ||||
|  | ||||
| func max(a, b int) int { | ||||
| 	if a > b { | ||||
| 		return a | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user