21
vendor/github.com/actions/workflow-parser/LICENSE
generated
vendored
21
vendor/github.com/actions/workflow-parser/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 GitHub
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
38
vendor/github.com/actions/workflow-parser/model/command.go
generated
vendored
38
vendor/github.com/actions/workflow-parser/model/command.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Command represents the optional "runs" and "args" attributes.
|
||||
// Each one takes one of two forms:
|
||||
// - runs="entrypoint arg1 arg2 ..."
|
||||
// - runs=[ "entrypoint", "arg1", "arg2", ... ]
|
||||
type Command interface {
|
||||
isCommand()
|
||||
Split() []string
|
||||
}
|
||||
|
||||
// StringCommand represents the string based form of the "runs" or "args"
|
||||
// attribute.
|
||||
// - runs="entrypoint arg1 arg2 ..."
|
||||
type StringCommand struct {
|
||||
Value string
|
||||
}
|
||||
|
||||
// ListCommand represents the list based form of the "runs" or "args" attribute.
|
||||
// - runs=[ "entrypoint", "arg1", "arg2", ... ]
|
||||
type ListCommand struct {
|
||||
Values []string
|
||||
}
|
||||
|
||||
func (s *StringCommand) isCommand() {}
|
||||
func (l *ListCommand) isCommand() {}
|
||||
|
||||
func (s *StringCommand) Split() []string {
|
||||
return strings.Fields(s.Value)
|
||||
}
|
||||
|
||||
func (l *ListCommand) Split() []string {
|
||||
return l.Values
|
||||
}
|
||||
64
vendor/github.com/actions/workflow-parser/model/configuration.go
generated
vendored
64
vendor/github.com/actions/workflow-parser/model/configuration.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Configuration is a parsed main.workflow file
|
||||
type Configuration struct {
|
||||
Actions []*Action
|
||||
Workflows []*Workflow
|
||||
}
|
||||
|
||||
// Action represents a single "action" stanza in a .workflow file.
|
||||
type Action struct {
|
||||
Identifier string
|
||||
Uses Uses
|
||||
Runs, Args Command
|
||||
Needs []string
|
||||
Env map[string]string
|
||||
Secrets []string
|
||||
}
|
||||
|
||||
// Workflow represents a single "workflow" stanza in a .workflow file.
|
||||
type Workflow struct {
|
||||
Identifier string
|
||||
On string
|
||||
Resolves []string
|
||||
}
|
||||
|
||||
// GetAction looks up action by identifier.
|
||||
//
|
||||
// If the action is not found, nil is returned.
|
||||
func (c *Configuration) GetAction(id string) *Action {
|
||||
for _, action := range c.Actions {
|
||||
if action.Identifier == id {
|
||||
return action
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetWorkflow looks up a workflow by identifier.
|
||||
//
|
||||
// If the workflow is not found, nil is returned.
|
||||
func (c *Configuration) GetWorkflow(id string) *Workflow {
|
||||
for _, workflow := range c.Workflows {
|
||||
if workflow.Identifier == id {
|
||||
return workflow
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetWorkflows gets all Workflow structures that match a given type of event.
|
||||
// e.g., GetWorkflows("push")
|
||||
func (c *Configuration) GetWorkflows(eventType string) []*Workflow {
|
||||
var ret []*Workflow
|
||||
for _, workflow := range c.Workflows {
|
||||
if strings.EqualFold(workflow.On, eventType) {
|
||||
ret = append(ret, workflow)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
57
vendor/github.com/actions/workflow-parser/model/uses.go
generated
vendored
57
vendor/github.com/actions/workflow-parser/model/uses.go
generated
vendored
@@ -1,57 +0,0 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type Uses interface {
|
||||
fmt.Stringer
|
||||
isUses()
|
||||
}
|
||||
|
||||
// UsesDockerImage represents `uses = "docker://<image>"`
|
||||
type UsesDockerImage struct {
|
||||
Image string
|
||||
}
|
||||
|
||||
// UsesRepository represents `uses = "<owner>/<repo>[/<path>]@<ref>"`
|
||||
type UsesRepository struct {
|
||||
Repository string
|
||||
Path string
|
||||
Ref string
|
||||
}
|
||||
|
||||
// UsesPath represents `uses = "./<path>"`
|
||||
type UsesPath struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// UsesInvalid represents any invalid `uses = "<raw>"` value
|
||||
type UsesInvalid struct {
|
||||
Raw string
|
||||
}
|
||||
|
||||
func (u *UsesDockerImage) isUses() {}
|
||||
func (u *UsesRepository) isUses() {}
|
||||
func (u *UsesPath) isUses() {}
|
||||
func (u *UsesInvalid) isUses() {}
|
||||
|
||||
func (u *UsesDockerImage) String() string {
|
||||
return fmt.Sprintf("docker://%s", u.Image)
|
||||
}
|
||||
|
||||
func (u *UsesRepository) String() string {
|
||||
if u.Path == "" {
|
||||
return fmt.Sprintf("%s@%s", u.Repository, u.Ref)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s@%s", u.Repository, u.Path, u.Ref)
|
||||
}
|
||||
|
||||
func (u *UsesPath) String() string {
|
||||
return fmt.Sprintf("./%s", u.Path)
|
||||
}
|
||||
|
||||
func (u *UsesInvalid) String() string {
|
||||
return u.Raw
|
||||
}
|
||||
136
vendor/github.com/actions/workflow-parser/parser/errors.go
generated
vendored
136
vendor/github.com/actions/workflow-parser/parser/errors.go
generated
vendored
@@ -1,136 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/workflow-parser/model"
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
message string
|
||||
Errors []*ParseError
|
||||
Actions []*model.Action
|
||||
Workflows []*model.Workflow
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
buffer.WriteString(e.message)
|
||||
for _, pe := range e.Errors {
|
||||
buffer.WriteString("\n ")
|
||||
buffer.WriteString(pe.Error())
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// FirstError searches a Configuration for the first error at or above a
|
||||
// given severity level. Checking the return value against nil is a good
|
||||
// way to see if the file has any errors at or above the given severity.
|
||||
// A caller intending to execute the file might check for
|
||||
// `errors.FirstError(parser.WARNING)`, while a caller intending to
|
||||
// display the file might check for `errors.FirstError(parser.FATAL)`.
|
||||
func (e *Error) FirstError(severity Severity) error {
|
||||
for _, pe := range e.Errors {
|
||||
if pe.Severity >= severity {
|
||||
return pe
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseError represents an error identified by the parser, either syntactic
|
||||
// (HCL) or semantic (.workflow) in nature. There are fields for location
|
||||
// (File, Line, Column), severity, and base error string. The `Error()`
|
||||
// function on this type concatenates whatever bits of the location are
|
||||
// available with the message. The severity is only used for filtering.
|
||||
type ParseError struct {
|
||||
message string
|
||||
Pos ErrorPos
|
||||
Severity Severity
|
||||
}
|
||||
|
||||
// ErrorPos represents the location of an error in a user's workflow
|
||||
// file(s).
|
||||
type ErrorPos struct {
|
||||
File string
|
||||
Line int
|
||||
Column int
|
||||
}
|
||||
|
||||
// newFatal creates a new error at the FATAL level, indicating that the
|
||||
// file is so broken it should not be displayed.
|
||||
func newFatal(pos ErrorPos, format string, a ...interface{}) *ParseError {
|
||||
return &ParseError{
|
||||
message: fmt.Sprintf(format, a...),
|
||||
Pos: pos,
|
||||
Severity: FATAL,
|
||||
}
|
||||
}
|
||||
|
||||
// newError creates a new error at the ERROR level, indicating that the
|
||||
// file can be displayed but cannot be run.
|
||||
func newError(pos ErrorPos, format string, a ...interface{}) *ParseError {
|
||||
return &ParseError{
|
||||
message: fmt.Sprintf(format, a...),
|
||||
Pos: pos,
|
||||
Severity: ERROR,
|
||||
}
|
||||
}
|
||||
|
||||
// newWarning creates a new error at the WARNING level, indicating that
|
||||
// the file might be runnable but might not execute as intended.
|
||||
func newWarning(pos ErrorPos, format string, a ...interface{}) *ParseError {
|
||||
return &ParseError{
|
||||
message: fmt.Sprintf(format, a...),
|
||||
Pos: pos,
|
||||
Severity: WARNING,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
var sb strings.Builder
|
||||
if e.Pos.Line != 0 {
|
||||
sb.WriteString("Line ") // nolint: errcheck
|
||||
sb.WriteString(strconv.Itoa(e.Pos.Line)) // nolint: errcheck
|
||||
sb.WriteString(": ") // nolint: errcheck
|
||||
}
|
||||
if sb.Len() > 0 {
|
||||
sb.WriteString(e.message) // nolint: errcheck
|
||||
return sb.String()
|
||||
}
|
||||
return e.message
|
||||
}
|
||||
|
||||
const (
|
||||
_ = iota
|
||||
|
||||
// WARNING indicates a mistake that might affect correctness
|
||||
WARNING
|
||||
|
||||
// ERROR indicates a mistake that prevents execution of any workflows in the file
|
||||
ERROR
|
||||
|
||||
// FATAL indicates a mistake that prevents even drawing the file
|
||||
FATAL
|
||||
)
|
||||
|
||||
// Severity represents the level of an error encountered while parsing a
|
||||
// workflow file. See the comments for WARNING, ERROR, and FATAL, above.
|
||||
type Severity int
|
||||
|
||||
type errorList []*ParseError
|
||||
|
||||
func (a errorList) Len() int { return len(a) }
|
||||
func (a errorList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a errorList) Less(i, j int) bool { return a[i].Pos.Line < a[j].Pos.Line }
|
||||
|
||||
// sortErrors sorts the errors reported by the parser. Do this after
|
||||
// parsing is complete. The sort is stable, so order is preserved within
|
||||
// a single line: left to right, syntax errors before validation errors.
|
||||
func (errors errorList) sort() {
|
||||
sort.Stable(errors)
|
||||
}
|
||||
42
vendor/github.com/actions/workflow-parser/parser/events.go
generated
vendored
42
vendor/github.com/actions/workflow-parser/parser/events.go
generated
vendored
@@ -1,42 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// isAllowedEventType returns true if the event type is supported.
|
||||
func isAllowedEventType(eventType string) bool {
|
||||
_, ok := eventTypeWhitelist[strings.ToLower(eventType)]
|
||||
return ok
|
||||
}
|
||||
|
||||
// https://developer.github.com/actions/creating-workflows/workflow-configuration-options/#events-supported-in-workflow-files
|
||||
var eventTypeWhitelist = map[string]struct{}{
|
||||
"check_run": {},
|
||||
"check_suite": {},
|
||||
"commit_comment": {},
|
||||
"create": {},
|
||||
"delete": {},
|
||||
"deployment": {},
|
||||
"deployment_status": {},
|
||||
"fork": {},
|
||||
"gollum": {},
|
||||
"issue_comment": {},
|
||||
"issues": {},
|
||||
"label": {},
|
||||
"member": {},
|
||||
"milestone": {},
|
||||
"page_build": {},
|
||||
"project_card": {},
|
||||
"project_column": {},
|
||||
"project": {},
|
||||
"public": {},
|
||||
"pull_request_review_comment": {},
|
||||
"pull_request_review": {},
|
||||
"pull_request": {},
|
||||
"push": {},
|
||||
"release": {},
|
||||
"repository_dispatch": {},
|
||||
"status": {},
|
||||
"watch": {},
|
||||
}
|
||||
15
vendor/github.com/actions/workflow-parser/parser/opts.go
generated
vendored
15
vendor/github.com/actions/workflow-parser/parser/opts.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
package parser
|
||||
|
||||
type OptionFunc func(*Parser)
|
||||
|
||||
func WithSuppressWarnings() OptionFunc {
|
||||
return func(ps *Parser) {
|
||||
ps.suppressSeverity = WARNING
|
||||
}
|
||||
}
|
||||
|
||||
func WithSuppressErrors() OptionFunc {
|
||||
return func(ps *Parser) {
|
||||
ps.suppressSeverity = ERROR
|
||||
}
|
||||
}
|
||||
807
vendor/github.com/actions/workflow-parser/parser/parser.go
generated
vendored
807
vendor/github.com/actions/workflow-parser/parser/parser.go
generated
vendored
@@ -1,807 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/workflow-parser/model"
|
||||
"github.com/hashicorp/hcl"
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hclparser "github.com/hashicorp/hcl/hcl/parser"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
"github.com/soniakeys/graph"
|
||||
)
|
||||
|
||||
const minVersion = 0
|
||||
const maxVersion = 0
|
||||
const maxSecrets = 100
|
||||
|
||||
type Parser struct {
|
||||
version int
|
||||
actions []*model.Action
|
||||
workflows []*model.Workflow
|
||||
errors errorList
|
||||
|
||||
posMap map[interface{}]ast.Node
|
||||
suppressSeverity Severity
|
||||
}
|
||||
|
||||
// Parse parses a .workflow file and return the actions and global variables found within.
|
||||
func Parse(reader io.Reader, options ...OptionFunc) (*model.Configuration, error) {
|
||||
// FIXME - check context for deadline?
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root, err := hcl.ParseBytes(b)
|
||||
if err != nil {
|
||||
if pe, ok := err.(*hclparser.PosError); ok {
|
||||
pos := ErrorPos{File: pe.Pos.Filename, Line: pe.Pos.Line, Column: pe.Pos.Column}
|
||||
errors := errorList{newFatal(pos, pe.Err.Error())}
|
||||
return nil, &Error{
|
||||
message: "unable to parse",
|
||||
Errors: errors,
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := parseAndValidate(root.Node, options...)
|
||||
if len(p.errors) > 0 {
|
||||
return nil, &Error{
|
||||
message: "unable to parse and validate",
|
||||
Errors: p.errors,
|
||||
Actions: p.actions,
|
||||
Workflows: p.workflows,
|
||||
}
|
||||
}
|
||||
|
||||
return &model.Configuration{
|
||||
Actions: p.actions,
|
||||
Workflows: p.workflows,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseAndValidate converts a HCL AST into a Parser and validates
|
||||
// high-level structure.
|
||||
// Parameters:
|
||||
// - root - the contents of a .workflow file, as AST
|
||||
// Returns:
|
||||
// - a Parser structure containing actions and workflow definitions
|
||||
func parseAndValidate(root ast.Node, options ...OptionFunc) *Parser {
|
||||
p := &Parser{
|
||||
posMap: make(map[interface{}]ast.Node),
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
p.parseRoot(root)
|
||||
p.validate()
|
||||
p.errors.sort()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Parser) validate() {
|
||||
p.analyzeDependencies()
|
||||
p.checkCircularDependencies()
|
||||
p.checkActions()
|
||||
p.checkFlows()
|
||||
}
|
||||
|
||||
func uniqStrings(items []string) []string {
|
||||
seen := make(map[string]bool)
|
||||
ret := make([]string, 0, len(items))
|
||||
for _, item := range items {
|
||||
if !seen[item] {
|
||||
seen[item] = true
|
||||
ret = append(ret, item)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// checkCircularDependencies finds loops in the action graph.
|
||||
// It emits a fatal error for each cycle it finds, in the order (top to
|
||||
// bottom, left to right) they appear in the .workflow file.
|
||||
func (p *Parser) checkCircularDependencies() {
|
||||
// make a map from action name to node ID, which is the index in the p.actions array
|
||||
// That is, p.actions[actionmap[X]].Identifier == X
|
||||
actionmap := make(map[string]graph.NI)
|
||||
for i, action := range p.actions {
|
||||
actionmap[action.Identifier] = graph.NI(i)
|
||||
}
|
||||
|
||||
// make an adjacency list representation of the action dependency graph
|
||||
adjList := make(graph.AdjacencyList, len(p.actions))
|
||||
for i, action := range p.actions {
|
||||
adjList[i] = make([]graph.NI, 0, len(action.Needs))
|
||||
for _, depName := range action.Needs {
|
||||
if depIdx, ok := actionmap[depName]; ok {
|
||||
adjList[i] = append(adjList[i], depIdx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find cycles, and print a fatal error for each one
|
||||
g := graph.Directed{AdjacencyList: adjList}
|
||||
g.Cycles(func(cycle []graph.NI) bool {
|
||||
node := p.posMap[&p.actions[cycle[len(cycle)-1]].Needs]
|
||||
p.addFatal(node, "Circular dependency on `%s'", p.actions[cycle[0]].Identifier)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// checkActions returns error if any actions are syntactically correct but
|
||||
// have structural errors
|
||||
func (p *Parser) checkActions() {
|
||||
secrets := make(map[string]bool)
|
||||
for _, t := range p.actions {
|
||||
// Ensure the Action has a `uses` attribute
|
||||
if t.Uses == nil {
|
||||
p.addError(p.posMap[t], "Action `%s' must have a `uses' attribute", t.Identifier)
|
||||
// continue, checking other actions
|
||||
}
|
||||
|
||||
// Ensure there aren't too many secrets
|
||||
for _, str := range t.Secrets {
|
||||
if !secrets[str] {
|
||||
secrets[str] = true
|
||||
if len(secrets) == maxSecrets+1 {
|
||||
p.addError(p.posMap[&t.Secrets], "All actions combined must not have more than %d unique secrets", maxSecrets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that no environment variable or secret begins with
|
||||
// "GITHUB_", unless it's "GITHUB_TOKEN".
|
||||
// Also ensure that all environment variable names come from the legal
|
||||
// form for environment variable names.
|
||||
// Finally, ensure that the same key name isn't used more than once
|
||||
// between env and secrets, combined.
|
||||
for k := range t.Env {
|
||||
p.checkEnvironmentVariable(k, p.posMap[&t.Env])
|
||||
}
|
||||
secretVars := make(map[string]bool)
|
||||
for _, k := range t.Secrets {
|
||||
p.checkEnvironmentVariable(k, p.posMap[&t.Secrets])
|
||||
if _, found := t.Env[k]; found {
|
||||
p.addError(p.posMap[&t.Secrets], "Secret `%s' conflicts with an environment variable with the same name", k)
|
||||
}
|
||||
if secretVars[k] {
|
||||
p.addWarning(p.posMap[&t.Secrets], "Secret `%s' redefined", k)
|
||||
}
|
||||
secretVars[k] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var envVarChecker = regexp.MustCompile(`\A[A-Za-z_][A-Za-z_0-9]*\z`)
|
||||
|
||||
func (p *Parser) checkEnvironmentVariable(key string, node ast.Node) {
|
||||
if key != "GITHUB_TOKEN" && strings.HasPrefix(key, "GITHUB_") {
|
||||
p.addWarning(node, "Environment variables and secrets beginning with `GITHUB_' are reserved")
|
||||
}
|
||||
if !envVarChecker.MatchString(key) {
|
||||
p.addWarning(node, "Environment variables and secrets must contain only A-Z, a-z, 0-9, and _ characters, got `%s'", key)
|
||||
}
|
||||
}
|
||||
|
||||
// checkFlows appends an error if any workflows are syntactically correct but
|
||||
// have structural errors
|
||||
func (p *Parser) checkFlows() {
|
||||
actionmap := makeActionMap(p.actions)
|
||||
for _, f := range p.workflows {
|
||||
// make sure there's an `on` attribute
|
||||
if f.On == "" {
|
||||
p.addError(p.posMap[f], "Workflow `%s' must have an `on' attribute", f.Identifier)
|
||||
// continue, checking other workflows
|
||||
} else if !isAllowedEventType(f.On) {
|
||||
p.addError(p.posMap[&f.On], "Workflow `%s' has unknown `on' value `%s'", f.Identifier, f.On)
|
||||
// continue, checking other workflows
|
||||
}
|
||||
|
||||
// make sure that the actions that are resolved all exist
|
||||
for _, actionID := range f.Resolves {
|
||||
_, ok := actionmap[actionID]
|
||||
if !ok {
|
||||
p.addError(p.posMap[&f.Resolves], "Workflow `%s' resolves unknown action `%s'", f.Identifier, actionID)
|
||||
// continue, checking other workflows
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeActionMap(actions []*model.Action) map[string]*model.Action {
|
||||
actionmap := make(map[string]*model.Action)
|
||||
for _, action := range actions {
|
||||
actionmap[action.Identifier] = action
|
||||
}
|
||||
return actionmap
|
||||
}
|
||||
|
||||
// Fill in Action dependencies for all actions based on explicit dependencies
|
||||
// declarations.
|
||||
//
|
||||
// p.actions is an array of Action objects, as parsed. The Action objects in
|
||||
// this array are mutated, by setting Action.dependencies for each.
|
||||
func (p *Parser) analyzeDependencies() {
|
||||
actionmap := makeActionMap(p.actions)
|
||||
for _, action := range p.actions {
|
||||
// analyze explicit dependencies for each "needs" keyword
|
||||
p.analyzeNeeds(action, actionmap)
|
||||
}
|
||||
|
||||
// uniq all the dependencies lists
|
||||
for _, action := range p.actions {
|
||||
if len(action.Needs) >= 2 {
|
||||
action.Needs = uniqStrings(action.Needs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) analyzeNeeds(action *model.Action, actionmap map[string]*model.Action) {
|
||||
for _, need := range action.Needs {
|
||||
_, ok := actionmap[need]
|
||||
if !ok {
|
||||
p.addError(p.posMap[&action.Needs], "Action `%s' needs nonexistent action `%s'", action.Identifier, need)
|
||||
// continue, checking other actions
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// literalToStringMap converts a object value from the AST to a
|
||||
// map[string]string. For example, the HCL `{ a="b" c="d" }` becomes the
|
||||
// Go expression map[string]string{ "a": "b", "c": "d" }.
|
||||
// If the value doesn't adhere to that format -- e.g.,
|
||||
// if it's not an object, or it has non-assignment attributes, or if any
|
||||
// of its values are anything other than a string, the function appends an
|
||||
// appropriate error.
|
||||
func (p *Parser) literalToStringMap(node ast.Node) map[string]string {
|
||||
obj, ok := node.(*ast.ObjectType)
|
||||
|
||||
if !ok {
|
||||
p.addError(node, "Expected object, got %s", typename(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
p.checkAssignmentsOnly(obj.List, "")
|
||||
|
||||
ret := make(map[string]string)
|
||||
for _, item := range obj.List.Items {
|
||||
if !isAssignment(item) {
|
||||
continue
|
||||
}
|
||||
str, ok := p.literalToString(item.Val)
|
||||
if ok {
|
||||
key := p.identString(item.Keys[0].Token)
|
||||
if key != "" {
|
||||
if _, found := ret[key]; found {
|
||||
p.addWarning(node, "Environment variable `%s' redefined", key)
|
||||
}
|
||||
ret[key] = str
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *Parser) identString(t token.Token) string {
|
||||
switch t.Type {
|
||||
case token.STRING:
|
||||
return t.Value().(string)
|
||||
case token.IDENT:
|
||||
return t.Text
|
||||
default:
|
||||
p.addErrorFromToken(t,
|
||||
"Each identifier should be a string, got %s",
|
||||
strings.ToLower(t.Type.String()))
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// literalToStringArray converts a list value from the AST to a []string.
|
||||
// For example, the HCL `[ "a", "b", "c" ]` becomes the Go expression
|
||||
// []string{ "a", "b", "c" }.
|
||||
// If the value doesn't adhere to that format -- it's not a list, or it
|
||||
// contains anything other than strings, the function appends an
|
||||
// appropriate error.
|
||||
// If promoteScalars is true, then values that are scalar strings are
|
||||
// promoted to a single-entry string array. E.g., "foo" becomes the Go
|
||||
// expression []string{ "foo" }.
|
||||
func (p *Parser) literalToStringArray(node ast.Node, promoteScalars bool) ([]string, bool) {
|
||||
literal, ok := node.(*ast.LiteralType)
|
||||
if ok {
|
||||
if promoteScalars && literal.Token.Type == token.STRING {
|
||||
return []string{literal.Token.Value().(string)}, true
|
||||
}
|
||||
p.addError(node, "Expected list, got %s", typename(node))
|
||||
return nil, false
|
||||
}
|
||||
|
||||
list, ok := node.(*ast.ListType)
|
||||
if !ok {
|
||||
p.addError(node, "Expected list, got %s", typename(node))
|
||||
return nil, false
|
||||
}
|
||||
|
||||
ret := make([]string, 0, len(list.List))
|
||||
for _, literal := range list.List {
|
||||
str, ok := p.literalToString(literal)
|
||||
if ok {
|
||||
ret = append(ret, str)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, true
|
||||
}
|
||||
|
||||
// literalToString converts a literal value from the AST into a string.
|
||||
// If the value isn't a scalar or isn't a string, the function appends an
|
||||
// appropriate error and returns "", false.
|
||||
func (p *Parser) literalToString(node ast.Node) (string, bool) {
|
||||
val := p.literalCast(node, token.STRING)
|
||||
if val == nil {
|
||||
return "", false
|
||||
}
|
||||
return val.(string), true
|
||||
}
|
||||
|
||||
// literalToInt converts a literal value from the AST into an int64.
|
||||
// Supported number formats are: 123, 0x123, and 0123.
|
||||
// Exponents (1e6) and floats (123.456) generate errors.
|
||||
// If the value isn't a scalar or isn't a number, the function appends an
|
||||
// appropriate error and returns 0, false.
|
||||
func (p *Parser) literalToInt(node ast.Node) (int64, bool) {
|
||||
val := p.literalCast(node, token.NUMBER)
|
||||
if val == nil {
|
||||
return 0, false
|
||||
}
|
||||
return val.(int64), true
|
||||
}
|
||||
|
||||
func (p *Parser) literalCast(node ast.Node, t token.Type) interface{} {
|
||||
literal, ok := node.(*ast.LiteralType)
|
||||
if !ok {
|
||||
p.addError(node, "Expected %s, got %s", strings.ToLower(t.String()), typename(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
if literal.Token.Type != t {
|
||||
p.addError(node, "Expected %s, got %s", strings.ToLower(t.String()), typename(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
return literal.Token.Value()
|
||||
}
|
||||
|
||||
// parseRoot parses the root of the AST, filling in p.version, p.actions,
|
||||
// and p.workflows.
|
||||
func (p *Parser) parseRoot(node ast.Node) {
|
||||
objectList, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
// It should be impossible for HCL to return anything other than an
|
||||
// ObjectList as the root node. This error should never happen.
|
||||
p.addError(node, "Internal error: root node must be an ObjectList")
|
||||
return
|
||||
}
|
||||
|
||||
p.actions = make([]*model.Action, 0, len(objectList.Items))
|
||||
p.workflows = make([]*model.Workflow, 0, len(objectList.Items))
|
||||
identifiers := make(map[string]bool)
|
||||
for idx, item := range objectList.Items {
|
||||
if item.Assign.IsValid() {
|
||||
p.parseVersion(idx, item)
|
||||
continue
|
||||
}
|
||||
p.parseBlock(item, identifiers)
|
||||
}
|
||||
}
|
||||
|
||||
// parseBlock parses a single, top-level "action" or "workflow" block,
|
||||
// appending it to p.actions or p.workflows as appropriate.
|
||||
func (p *Parser) parseBlock(item *ast.ObjectItem, identifiers map[string]bool) {
|
||||
if len(item.Keys) != 2 {
|
||||
p.addError(item, "Invalid toplevel declaration")
|
||||
return
|
||||
}
|
||||
|
||||
cmd := p.identString(item.Keys[0].Token)
|
||||
var id string
|
||||
|
||||
switch cmd {
|
||||
case "action":
|
||||
action := p.actionifyItem(item)
|
||||
if action != nil {
|
||||
id = action.Identifier
|
||||
p.actions = append(p.actions, action)
|
||||
}
|
||||
case "workflow":
|
||||
workflow := p.workflowifyItem(item)
|
||||
if workflow != nil {
|
||||
id = workflow.Identifier
|
||||
p.workflows = append(p.workflows, workflow)
|
||||
}
|
||||
default:
|
||||
p.addError(item, "Invalid toplevel keyword, `%s'", cmd)
|
||||
return
|
||||
}
|
||||
|
||||
if identifiers[id] {
|
||||
p.addError(item, "Identifier `%s' redefined", id)
|
||||
}
|
||||
|
||||
identifiers[id] = true
|
||||
}
|
||||
|
||||
// parseVersion parses a top-level `version=N` statement, filling in
|
||||
// p.version.
|
||||
func (p *Parser) parseVersion(idx int, item *ast.ObjectItem) {
|
||||
if len(item.Keys) != 1 || p.identString(item.Keys[0].Token) != "version" {
|
||||
// not a valid `version` declaration
|
||||
p.addError(item.Val, "Toplevel declarations cannot be assignments")
|
||||
return
|
||||
}
|
||||
if idx != 0 {
|
||||
p.addError(item.Val, "`version` must be the first declaration")
|
||||
return
|
||||
}
|
||||
version, ok := p.literalToInt(item.Val)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if version < minVersion || version > maxVersion {
|
||||
p.addError(item.Val, "`version = %d` is not supported", version)
|
||||
return
|
||||
}
|
||||
p.version = int(version)
|
||||
}
|
||||
|
||||
// parseIdentifier parses the double-quoted identifier (name) for a
|
||||
// "workflow" or "action" block.
|
||||
func (p *Parser) parseIdentifier(key *ast.ObjectKey) string {
|
||||
id := key.Token.Text
|
||||
if len(id) < 3 || id[0] != '"' || id[len(id)-1] != '"' {
|
||||
p.addError(key, "Invalid format for identifier `%s'", id)
|
||||
return ""
|
||||
}
|
||||
return id[1 : len(id)-1]
|
||||
}
|
||||
|
||||
// parseRequiredString parses a string value, setting its value into the
|
||||
// out-parameter `value` and returning true if successful.
|
||||
func (p *Parser) parseRequiredString(value *string, val ast.Node, nodeType, name, id string) bool {
|
||||
if *value != "" {
|
||||
p.addWarning(val, "`%s' redefined in %s `%s'", name, nodeType, id)
|
||||
// continue, allowing the redefinition
|
||||
}
|
||||
|
||||
newVal, ok := p.literalToString(val)
|
||||
if !ok {
|
||||
p.addError(val, "Invalid format for `%s' in %s `%s', expected string", name, nodeType, id)
|
||||
return false
|
||||
}
|
||||
|
||||
if newVal == "" {
|
||||
p.addError(val, "`%s' value in %s `%s' cannot be blank", name, nodeType, id)
|
||||
return false
|
||||
}
|
||||
|
||||
*value = newVal
|
||||
return true
|
||||
}
|
||||
|
||||
// parseBlockPreamble parses the beginning of a "workflow" or "action"
|
||||
// block.
|
||||
func (p *Parser) parseBlockPreamble(item *ast.ObjectItem, nodeType string) (string, *ast.ObjectType) {
|
||||
id := p.parseIdentifier(item.Keys[1])
|
||||
if id == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
node := item.Val
|
||||
obj, ok := node.(*ast.ObjectType)
|
||||
if !ok {
|
||||
p.addError(node, "Each %s must have an { ... } block", nodeType)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
p.checkAssignmentsOnly(obj.List, id)
|
||||
|
||||
return id, obj
|
||||
}
|
||||
|
||||
// actionifyItem converts an AST block to an Action object.
|
||||
func (p *Parser) actionifyItem(item *ast.ObjectItem) *model.Action {
|
||||
id, obj := p.parseBlockPreamble(item, "action")
|
||||
if obj == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
action := &model.Action{
|
||||
Identifier: id,
|
||||
}
|
||||
p.posMap[action] = item
|
||||
|
||||
for _, item := range obj.List.Items {
|
||||
p.parseActionAttribute(p.identString(item.Keys[0].Token), action, item.Val)
|
||||
}
|
||||
|
||||
return action
|
||||
}
|
||||
|
||||
// parseActionAttribute parses a single key-value pair from an "action"
|
||||
// block. This function rejects any unknown keys and enforces formatting
|
||||
// requirements on all values.
|
||||
// It also has higher-than-normal cyclomatic complexity, so we ask the
|
||||
// gocyclo linter to ignore it.
|
||||
// nolint: gocyclo
|
||||
func (p *Parser) parseActionAttribute(name string, action *model.Action, val ast.Node) {
|
||||
switch name {
|
||||
case "uses":
|
||||
p.parseUses(action, val)
|
||||
case "needs":
|
||||
if needs, ok := p.literalToStringArray(val, true); ok {
|
||||
action.Needs = needs
|
||||
p.posMap[&action.Needs] = val
|
||||
}
|
||||
case "runs":
|
||||
if runs := p.parseCommand(action, action.Runs, name, val, false); runs != nil {
|
||||
action.Runs = runs
|
||||
}
|
||||
case "args":
|
||||
if args := p.parseCommand(action, action.Args, name, val, true); args != nil {
|
||||
action.Args = args
|
||||
}
|
||||
case "env":
|
||||
if env := p.literalToStringMap(val); env != nil {
|
||||
action.Env = env
|
||||
}
|
||||
p.posMap[&action.Env] = val
|
||||
case "secrets":
|
||||
if secrets, ok := p.literalToStringArray(val, false); ok {
|
||||
action.Secrets = secrets
|
||||
p.posMap[&action.Secrets] = val
|
||||
}
|
||||
default:
|
||||
p.addWarning(val, "Unknown action attribute `%s'", name)
|
||||
}
|
||||
}
|
||||
|
||||
// parseUses sets the action.Uses value based on the contents of the AST
|
||||
// node. This function enforces formatting requirements on the value.
|
||||
func (p *Parser) parseUses(action *model.Action, node ast.Node) {
|
||||
if action.Uses != nil {
|
||||
p.addWarning(node, "`uses' redefined in action `%s'", action.Identifier)
|
||||
// continue, allowing the redefinition
|
||||
}
|
||||
strVal, ok := p.literalToString(node)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if strVal == "" {
|
||||
action.Uses = &model.UsesInvalid{}
|
||||
p.addError(node, "`uses' value in action `%s' cannot be blank", action.Identifier)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(strVal, "./") {
|
||||
action.Uses = &model.UsesPath{Path: strings.TrimPrefix(strVal, "./")}
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strVal, "docker://") {
|
||||
action.Uses = &model.UsesDockerImage{Image: strings.TrimPrefix(strVal, "docker://")}
|
||||
return
|
||||
}
|
||||
|
||||
tok := strings.Split(strVal, "@")
|
||||
if len(tok) != 2 {
|
||||
action.Uses = &model.UsesInvalid{Raw: strVal}
|
||||
p.addError(node, "The `uses' attribute must be a path, a Docker image, or owner/repo@ref")
|
||||
return
|
||||
}
|
||||
ref := tok[1]
|
||||
tok = strings.SplitN(tok[0], "/", 3)
|
||||
if len(tok) < 2 {
|
||||
action.Uses = &model.UsesInvalid{Raw: strVal}
|
||||
p.addError(node, "The `uses' attribute must be a path, a Docker image, or owner/repo@ref")
|
||||
return
|
||||
}
|
||||
usesRepo := &model.UsesRepository{Repository: tok[0] + "/" + tok[1], Ref: ref}
|
||||
action.Uses = usesRepo
|
||||
if len(tok) == 3 {
|
||||
usesRepo.Path = tok[2]
|
||||
}
|
||||
}
|
||||
|
||||
// parseUses sets the action.Runs or action.Args value based on the
|
||||
// contents of the AST node. This function enforces formatting
|
||||
// requirements on the value.
|
||||
func (p *Parser) parseCommand(action *model.Action, cmd model.Command, name string, node ast.Node, allowBlank bool) model.Command {
|
||||
if cmd != nil {
|
||||
p.addWarning(node, "`%s' redefined in action `%s'", name, action.Identifier)
|
||||
// continue, allowing the redefinition
|
||||
}
|
||||
|
||||
// Is it a list?
|
||||
if _, ok := node.(*ast.ListType); ok {
|
||||
if parsed, ok := p.literalToStringArray(node, false); ok {
|
||||
return &model.ListCommand{Values: parsed}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If not, parse a whitespace-separated string into a list.
|
||||
var raw string
|
||||
var ok bool
|
||||
if raw, ok = p.literalToString(node); !ok {
|
||||
p.addError(node, "The `%s' attribute must be a string or a list", name)
|
||||
return nil
|
||||
}
|
||||
if raw == "" && !allowBlank {
|
||||
p.addError(node, "`%s' value in action `%s' cannot be blank", name, action.Identifier)
|
||||
return nil
|
||||
}
|
||||
return &model.StringCommand{Value: raw}
|
||||
}
|
||||
|
||||
func typename(val interface{}) string {
|
||||
switch cast := val.(type) {
|
||||
case *ast.ListType:
|
||||
return "list"
|
||||
case *ast.LiteralType:
|
||||
return strings.ToLower(cast.Token.Type.String())
|
||||
case *ast.ObjectType:
|
||||
return "object"
|
||||
default:
|
||||
return fmt.Sprintf("%T", val)
|
||||
}
|
||||
}
|
||||
|
||||
// workflowifyItem converts an AST block to a Workflow object.
|
||||
func (p *Parser) workflowifyItem(item *ast.ObjectItem) *model.Workflow {
|
||||
id, obj := p.parseBlockPreamble(item, "workflow")
|
||||
if obj == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ok bool
|
||||
workflow := &model.Workflow{Identifier: id}
|
||||
for _, item := range obj.List.Items {
|
||||
name := p.identString(item.Keys[0].Token)
|
||||
|
||||
switch name {
|
||||
case "on":
|
||||
ok = p.parseRequiredString(&workflow.On, item.Val, "workflow", name, id)
|
||||
if ok {
|
||||
p.posMap[&workflow.On] = item
|
||||
}
|
||||
case "resolves":
|
||||
if workflow.Resolves != nil {
|
||||
p.addWarning(item.Val, "`resolves' redefined in workflow `%s'", id)
|
||||
// continue, allowing the redefinition
|
||||
}
|
||||
workflow.Resolves, ok = p.literalToStringArray(item.Val, true)
|
||||
p.posMap[&workflow.Resolves] = item
|
||||
if !ok {
|
||||
p.addError(item.Val, "Invalid format for `resolves' in workflow `%s', expected list of strings", id)
|
||||
// continue, allowing workflow with no `resolves`
|
||||
}
|
||||
default:
|
||||
p.addWarning(item.Val, "Unknown workflow attribute `%s'", name)
|
||||
// continue, treat as no-op
|
||||
}
|
||||
}
|
||||
|
||||
p.posMap[workflow] = item
|
||||
return workflow
|
||||
}
|
||||
|
||||
func isAssignment(item *ast.ObjectItem) bool {
|
||||
return len(item.Keys) == 1 && item.Assign.IsValid()
|
||||
}
|
||||
|
||||
// checkAssignmentsOnly ensures that all elements in the object are "key =
|
||||
// value" pairs.
|
||||
func (p *Parser) checkAssignmentsOnly(objectList *ast.ObjectList, actionID string) {
|
||||
for _, item := range objectList.Items {
|
||||
if !isAssignment(item) {
|
||||
var desc string
|
||||
if actionID == "" {
|
||||
desc = "the object"
|
||||
} else {
|
||||
desc = fmt.Sprintf("action `%s'", actionID)
|
||||
}
|
||||
p.addErrorFromObjectItem(item, "Each attribute of %s must be an assignment", desc)
|
||||
continue
|
||||
}
|
||||
|
||||
child, ok := item.Val.(*ast.ObjectType)
|
||||
if ok {
|
||||
p.checkAssignmentsOnly(child.List, actionID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) addWarning(node ast.Node, format string, a ...interface{}) {
|
||||
if p.suppressSeverity < WARNING {
|
||||
p.errors = append(p.errors, newWarning(posFromNode(node), format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) addError(node ast.Node, format string, a ...interface{}) {
|
||||
if p.suppressSeverity < ERROR {
|
||||
p.errors = append(p.errors, newError(posFromNode(node), format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) addErrorFromToken(t token.Token, format string, a ...interface{}) {
|
||||
if p.suppressSeverity < ERROR {
|
||||
p.errors = append(p.errors, newError(posFromToken(t), format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) addErrorFromObjectItem(objectItem *ast.ObjectItem, format string, a ...interface{}) {
|
||||
if p.suppressSeverity < ERROR {
|
||||
p.errors = append(p.errors, newError(posFromObjectItem(objectItem), format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) addFatal(node ast.Node, format string, a ...interface{}) {
|
||||
if p.suppressSeverity < FATAL {
|
||||
p.errors = append(p.errors, newFatal(posFromNode(node), format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
// posFromNode returns an ErrorPos (file, line, and column) from an AST
|
||||
// node, so we can report specific locations for each parse error.
|
||||
func posFromNode(node ast.Node) ErrorPos {
|
||||
var pos *token.Pos
|
||||
switch cast := node.(type) {
|
||||
case *ast.ObjectList:
|
||||
if len(cast.Items) > 0 {
|
||||
if len(cast.Items[0].Keys) > 0 {
|
||||
pos = &cast.Items[0].Keys[0].Token.Pos
|
||||
}
|
||||
}
|
||||
case *ast.ObjectItem:
|
||||
return posFromNode(cast.Val)
|
||||
case *ast.ObjectType:
|
||||
pos = &cast.Lbrace
|
||||
case *ast.LiteralType:
|
||||
pos = &cast.Token.Pos
|
||||
case *ast.ListType:
|
||||
pos = &cast.Lbrack
|
||||
case *ast.ObjectKey:
|
||||
pos = &cast.Token.Pos
|
||||
}
|
||||
|
||||
if pos == nil {
|
||||
return ErrorPos{}
|
||||
}
|
||||
return ErrorPos{File: pos.Filename, Line: pos.Line, Column: pos.Column}
|
||||
}
|
||||
|
||||
// posFromObjectItem returns an ErrorPos from an ObjectItem. This is for
|
||||
// cases where posFromNode(item) would fail because the item has no Val
|
||||
// set.
|
||||
func posFromObjectItem(item *ast.ObjectItem) ErrorPos {
|
||||
if len(item.Keys) > 0 {
|
||||
return posFromNode(item.Keys[0])
|
||||
}
|
||||
return ErrorPos{}
|
||||
}
|
||||
|
||||
// posFromToken returns an ErrorPos from a Token. We can't use
|
||||
// posFromNode here because Tokens aren't Nodes.
|
||||
func posFromToken(token token.Token) ErrorPos {
|
||||
return ErrorPos{File: token.Pos.Filename, Line: token.Pos.Line, Column: token.Pos.Column}
|
||||
}
|
||||
97
vendor/github.com/docker/docker/pkg/archive/example_changes.go
generated
vendored
97
vendor/github.com/docker/docker/pkg/archive/example_changes.go
generated
vendored
@@ -1,97 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
||||
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
9
vendor/github.com/hashicorp/hcl/.gitignore
generated
vendored
@@ -1,9 +0,0 @@
|
||||
y.output
|
||||
|
||||
# ignore intellij files
|
||||
.idea
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
*.test
|
||||
13
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
13
vendor/github.com/hashicorp/hcl/.travis.yml
generated
vendored
@@ -1,13 +0,0 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
script: make test
|
||||
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
354
vendor/github.com/hashicorp/hcl/LICENSE
generated
vendored
@@ -1,354 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
||||
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
18
vendor/github.com/hashicorp/hcl/Makefile
generated
vendored
@@ -1,18 +0,0 @@
|
||||
TEST?=./...
|
||||
|
||||
default: test
|
||||
|
||||
fmt: generate
|
||||
go fmt ./...
|
||||
|
||||
test: generate
|
||||
go get -t ./...
|
||||
go test $(TEST) $(TESTARGS)
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
|
||||
updatedeps:
|
||||
go get -u golang.org/x/tools/cmd/stringer
|
||||
|
||||
.PHONY: default generate test updatedeps
|
||||
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
125
vendor/github.com/hashicorp/hcl/README.md
generated
vendored
@@ -1,125 +0,0 @@
|
||||
# HCL
|
||||
|
||||
[](https://godoc.org/github.com/hashicorp/hcl) [](https://travis-ci.org/hashicorp/hcl)
|
||||
|
||||
HCL (HashiCorp Configuration Language) is a configuration language built
|
||||
by HashiCorp. The goal of HCL is to build a structured configuration language
|
||||
that is both human and machine friendly for use with command-line tools, but
|
||||
specifically targeted towards DevOps tools, servers, etc.
|
||||
|
||||
HCL is also fully JSON compatible. That is, JSON can be used as completely
|
||||
valid input to a system expecting HCL. This helps makes systems
|
||||
interoperable with other systems.
|
||||
|
||||
HCL is heavily inspired by
|
||||
[libucl](https://github.com/vstakhov/libucl),
|
||||
nginx configuration, and others similar.
|
||||
|
||||
## Why?
|
||||
|
||||
A common question when viewing HCL is to ask the question: why not
|
||||
JSON, YAML, etc.?
|
||||
|
||||
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
|
||||
used a variety of configuration languages from full programming languages
|
||||
such as Ruby to complete data structure languages such as JSON. What we
|
||||
learned is that some people wanted human-friendly configuration languages
|
||||
and some people wanted machine-friendly languages.
|
||||
|
||||
JSON fits a nice balance in this, but is fairly verbose and most
|
||||
importantly doesn't support comments. With YAML, we found that beginners
|
||||
had a really hard time determining what the actual structure was, and
|
||||
ended up guessing more often than not whether to use a hyphen, colon, etc.
|
||||
in order to represent some configuration key.
|
||||
|
||||
Full programming languages such as Ruby enable complex behavior
|
||||
a configuration language shouldn't usually allow, and also forces
|
||||
people to learn some set of Ruby.
|
||||
|
||||
Because of this, we decided to create our own configuration language
|
||||
that is JSON-compatible. Our configuration language (HCL) is designed
|
||||
to be written and modified by humans. The API for HCL allows JSON
|
||||
as an input so that it is also machine-friendly (machines can generate
|
||||
JSON instead of trying to generate HCL).
|
||||
|
||||
Our goal with HCL is not to alienate other configuration languages.
|
||||
It is instead to provide HCL as a specialized language for our tools,
|
||||
and JSON as the interoperability layer.
|
||||
|
||||
## Syntax
|
||||
|
||||
For a complete grammar, please see the parser itself. A high-level overview
|
||||
of the syntax and grammar is listed here.
|
||||
|
||||
* Single line comments start with `#` or `//`
|
||||
|
||||
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
|
||||
are not allowed. A multi-line comment (also known as a block comment)
|
||||
terminates at the first `*/` found.
|
||||
|
||||
* Values are assigned with the syntax `key = value` (whitespace doesn't
|
||||
matter). The value can be any primitive: a string, number, boolean,
|
||||
object, or list.
|
||||
|
||||
* Strings are double-quoted and can contain any UTF-8 characters.
|
||||
Example: `"Hello, World"`
|
||||
|
||||
* Multi-line strings start with `<<EOF` at the end of a line, and end
|
||||
with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
|
||||
Any text may be used in place of `EOF`. Example:
|
||||
```
|
||||
<<FOO
|
||||
hello
|
||||
world
|
||||
FOO
|
||||
```
|
||||
|
||||
* Numbers are assumed to be base 10. If you prefix a number with 0x,
|
||||
it is treated as a hexadecimal. If it is prefixed with 0, it is
|
||||
treated as an octal. Numbers can be in scientific notation: "1e10".
|
||||
|
||||
* Boolean values: `true`, `false`
|
||||
|
||||
* Arrays can be made by wrapping it in `[]`. Example:
|
||||
`["foo", "bar", 42]`. Arrays can contain primitives,
|
||||
other arrays, and objects. As an alternative, lists
|
||||
of objects can be created with repeated blocks, using
|
||||
this structure:
|
||||
|
||||
```hcl
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
|
||||
service {
|
||||
key = "value"
|
||||
}
|
||||
```
|
||||
|
||||
Objects and nested objects are created using the structure shown below:
|
||||
|
||||
```
|
||||
variable "ami" {
|
||||
description = "the AMI to use"
|
||||
}
|
||||
```
|
||||
This would be equivalent to the following json:
|
||||
``` json
|
||||
{
|
||||
"variable": {
|
||||
"ami": {
|
||||
"description": "the AMI to use"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Thanks
|
||||
|
||||
Thanks to:
|
||||
|
||||
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
|
||||
and syntax that HCL was based off of.
|
||||
|
||||
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
|
||||
in pure Go (no goyacc) and support for a printer.
|
||||
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
19
vendor/github.com/hashicorp/hcl/appveyor.yml
generated
vendored
@@ -1,19 +0,0 @@
|
||||
version: "build-{branch}-{build}"
|
||||
image: Visual Studio 2015
|
||||
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
init:
|
||||
- git config --global core.autocrlf false
|
||||
install:
|
||||
- cmd: >-
|
||||
echo %Path%
|
||||
|
||||
go version
|
||||
|
||||
go env
|
||||
|
||||
go get -t ./...
|
||||
|
||||
build_script:
|
||||
- cmd: go test -v ./...
|
||||
729
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
729
vendor/github.com/hashicorp/hcl/decoder.go
generated
vendored
@@ -1,729 +0,0 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/parser"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// This is the tag to use with structures to have settings for HCL
|
||||
const tagName = "hcl"
|
||||
|
||||
var (
|
||||
// nodeType holds a reference to the type of ast.Node
|
||||
nodeType reflect.Type = findNodeType()
|
||||
)
|
||||
|
||||
// Unmarshal accepts a byte slice as input and writes the
|
||||
// data to the value pointed to by v.
|
||||
func Unmarshal(bs []byte, v interface{}) error {
|
||||
root, err := parse(bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(v, root)
|
||||
}
|
||||
|
||||
// Decode reads the given input and decodes it into the structure
|
||||
// given by `out`.
|
||||
func Decode(out interface{}, in string) error {
|
||||
obj, err := Parse(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return DecodeObject(out, obj)
|
||||
}
|
||||
|
||||
// DecodeObject is a lower-level version of Decode. It decodes a
|
||||
// raw Object into the given output.
|
||||
func DecodeObject(out interface{}, n ast.Node) error {
|
||||
val := reflect.ValueOf(out)
|
||||
if val.Kind() != reflect.Ptr {
|
||||
return errors.New("result must be a pointer")
|
||||
}
|
||||
|
||||
// If we have the file, we really decode the root node
|
||||
if f, ok := n.(*ast.File); ok {
|
||||
n = f.Node
|
||||
}
|
||||
|
||||
var d decoder
|
||||
return d.decode("root", n, val.Elem())
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
stack []reflect.Kind
|
||||
}
|
||||
|
||||
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
|
||||
k := result
|
||||
|
||||
// If we have an interface with a valid value, we use that
|
||||
// for the check.
|
||||
if result.Kind() == reflect.Interface {
|
||||
elem := result.Elem()
|
||||
if elem.IsValid() {
|
||||
k = elem
|
||||
}
|
||||
}
|
||||
|
||||
// Push current onto stack unless it is an interface.
|
||||
if k.Kind() != reflect.Interface {
|
||||
d.stack = append(d.stack, k.Kind())
|
||||
|
||||
// Schedule a pop
|
||||
defer func() {
|
||||
d.stack = d.stack[:len(d.stack)-1]
|
||||
}()
|
||||
}
|
||||
|
||||
switch k.Kind() {
|
||||
case reflect.Bool:
|
||||
return d.decodeBool(name, node, result)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return d.decodeFloat(name, node, result)
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
return d.decodeInt(name, node, result)
|
||||
case reflect.Interface:
|
||||
// When we see an interface, we make our own thing
|
||||
return d.decodeInterface(name, node, result)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(name, node, result)
|
||||
case reflect.Ptr:
|
||||
return d.decodePtr(name, node, result)
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(name, node, result)
|
||||
case reflect.String:
|
||||
return d.decodeString(name, node, result)
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(name, node, result)
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.BOOL {
|
||||
v, err := strconv.ParseBool(n.Token.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
|
||||
v, err := strconv.ParseFloat(n.Token.Text, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(reflect.ValueOf(v).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
case token.STRING:
|
||||
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if result.Kind() == reflect.Interface {
|
||||
result.Set(reflect.ValueOf(int(v)))
|
||||
} else {
|
||||
result.SetInt(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
|
||||
// When we see an ast.Node, we retain the value to enable deferred decoding.
|
||||
// Very useful in situations where we want to preserve ast.Node information
|
||||
// like Pos
|
||||
if result.Type() == nodeType && result.CanSet() {
|
||||
result.Set(reflect.ValueOf(node))
|
||||
return nil
|
||||
}
|
||||
|
||||
var set reflect.Value
|
||||
redecode := true
|
||||
|
||||
// For testing types, ObjectType should just be treated as a list. We
|
||||
// set this to a temporary var because we want to pass in the real node.
|
||||
testNode := node
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
testNode = ot.List
|
||||
}
|
||||
|
||||
switch n := testNode.(type) {
|
||||
case *ast.ObjectList:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
|
||||
set = result
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
// If we're at the root or we're directly within a slice, then we
|
||||
// decode objects into map[string]interface{}, otherwise we decode
|
||||
// them into lists.
|
||||
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
|
||||
var temp map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeMap(
|
||||
reflect.MapOf(
|
||||
reflect.TypeOf(""),
|
||||
tempVal.Type().Elem()))
|
||||
|
||||
set = result
|
||||
} else {
|
||||
var temp []map[string]interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
|
||||
set = result
|
||||
}
|
||||
case *ast.ListType:
|
||||
var temp []interface{}
|
||||
tempVal := reflect.ValueOf(temp)
|
||||
result := reflect.MakeSlice(
|
||||
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
|
||||
set = result
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.BOOL:
|
||||
var result bool
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.FLOAT:
|
||||
var result float64
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.NUMBER:
|
||||
var result int
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
|
||||
case token.STRING, token.HEREDOC:
|
||||
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"%s: cannot decode into interface: %T",
|
||||
name, node)
|
||||
}
|
||||
|
||||
// Set the result to what its supposed to be, then reset
|
||||
// result so we don't reflect into this method anymore.
|
||||
result.Set(set)
|
||||
|
||||
if redecode {
|
||||
// Revisit the node so that we can use the newly instantiated
|
||||
// thing and populate it.
|
||||
if err := d.decode(name, node, result); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
|
||||
if item, ok := node.(*ast.ObjectItem); ok {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
n, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
resultKeyType := resultType.Key()
|
||||
if resultKeyType.Kind() != reflect.String {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Make a map if it is nil
|
||||
resultMap := result
|
||||
if result.IsNil() {
|
||||
resultMap = reflect.MakeMap(
|
||||
reflect.MapOf(resultKeyType, resultElemType))
|
||||
}
|
||||
|
||||
// Go through each element and decode it.
|
||||
done := make(map[string]struct{})
|
||||
for _, item := range n.Items {
|
||||
if item.Val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// github.com/hashicorp/terraform/issue/5740
|
||||
if len(item.Keys) == 0 {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||
}
|
||||
}
|
||||
|
||||
// Get the key we're dealing with, which is the first item
|
||||
keyStr := item.Keys[0].Token.Value().(string)
|
||||
|
||||
// If we've already processed this key, then ignore it
|
||||
if _, ok := done[keyStr]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine the value. If we have more than one key, then we
|
||||
// get the objectlist of only these keys.
|
||||
itemVal := item.Val
|
||||
if len(item.Keys) > 1 {
|
||||
itemVal = n.Filter(keyStr)
|
||||
done[keyStr] = struct{}{}
|
||||
}
|
||||
|
||||
// Make the field name
|
||||
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
|
||||
|
||||
// Get the key/value as reflection values
|
||||
key := reflect.ValueOf(keyStr)
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// If we have a pre-existing value in the map, use that
|
||||
oldVal := resultMap.MapIndex(key)
|
||||
if oldVal.IsValid() {
|
||||
val.Set(oldVal)
|
||||
}
|
||||
|
||||
// Decode!
|
||||
if err := d.decode(fieldName, itemVal, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the value on the map
|
||||
resultMap.SetMapIndex(key, val)
|
||||
}
|
||||
|
||||
// Set the final map if we can
|
||||
set.Set(resultMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
|
||||
// Create an element of the concrete (non pointer) type and decode
|
||||
// into that. Then set the value of the pointer to this type.
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
val := reflect.New(resultElemType)
|
||||
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Set(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
|
||||
// If we have an interface, then we can address the interface,
|
||||
// but not the slice itself, so get the element but set the interface
|
||||
set := result
|
||||
if result.Kind() == reflect.Interface {
|
||||
result = result.Elem()
|
||||
}
|
||||
// Create the slice if it isn't nil
|
||||
resultType := result.Type()
|
||||
resultElemType := resultType.Elem()
|
||||
if result.IsNil() {
|
||||
resultSliceType := reflect.SliceOf(resultElemType)
|
||||
result = reflect.MakeSlice(
|
||||
resultSliceType, 0, 0)
|
||||
}
|
||||
|
||||
// Figure out the items we'll be copying into the slice
|
||||
var items []ast.Node
|
||||
switch n := node.(type) {
|
||||
case *ast.ObjectList:
|
||||
items = make([]ast.Node, len(n.Items))
|
||||
for i, item := range n.Items {
|
||||
items[i] = item
|
||||
}
|
||||
case *ast.ObjectType:
|
||||
items = []ast.Node{n}
|
||||
case *ast.ListType:
|
||||
items = n.List
|
||||
default:
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("unknown slice type: %T", node),
|
||||
}
|
||||
}
|
||||
|
||||
for i, item := range items {
|
||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||
|
||||
// Decode
|
||||
val := reflect.Indirect(reflect.New(resultElemType))
|
||||
|
||||
// if item is an object that was decoded from ambiguous JSON and
|
||||
// flattened, make sure it's expanded if it needs to decode into a
|
||||
// defined structure.
|
||||
item := expandObject(item, val)
|
||||
|
||||
if err := d.decode(fieldName, item, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append it onto the slice
|
||||
result = reflect.Append(result, val)
|
||||
}
|
||||
|
||||
set.Set(result)
|
||||
return nil
|
||||
}
|
||||
|
||||
// expandObject detects if an ambiguous JSON object was flattened to a List which
|
||||
// should be decoded into a struct, and expands the ast to properly deocode.
|
||||
func expandObject(node ast.Node, result reflect.Value) ast.Node {
|
||||
item, ok := node.(*ast.ObjectItem)
|
||||
if !ok {
|
||||
return node
|
||||
}
|
||||
|
||||
elemType := result.Type()
|
||||
|
||||
// our target type must be a struct
|
||||
switch elemType.Kind() {
|
||||
case reflect.Ptr:
|
||||
switch elemType.Elem().Kind() {
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
case reflect.Struct:
|
||||
//OK
|
||||
default:
|
||||
return node
|
||||
}
|
||||
|
||||
// A list value will have a key and field name. If it had more fields,
|
||||
// it wouldn't have been flattened.
|
||||
if len(item.Keys) != 2 {
|
||||
return node
|
||||
}
|
||||
|
||||
keyToken := item.Keys[0].Token
|
||||
item.Keys = item.Keys[1:]
|
||||
|
||||
// we need to un-flatten the ast enough to decode
|
||||
newNode := &ast.ObjectItem{
|
||||
Keys: []*ast.ObjectKey{
|
||||
&ast.ObjectKey{
|
||||
Token: keyToken,
|
||||
},
|
||||
},
|
||||
Val: &ast.ObjectType{
|
||||
List: &ast.ObjectList{
|
||||
Items: []*ast.ObjectItem{item},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newNode
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
|
||||
switch n := node.(type) {
|
||||
case *ast.LiteralType:
|
||||
switch n.Token.Type {
|
||||
case token.NUMBER:
|
||||
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
|
||||
return nil
|
||||
case token.STRING, token.HEREDOC:
|
||||
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
|
||||
var item *ast.ObjectItem
|
||||
if it, ok := node.(*ast.ObjectItem); ok {
|
||||
item = it
|
||||
node = it.Val
|
||||
}
|
||||
|
||||
if ot, ok := node.(*ast.ObjectType); ok {
|
||||
node = ot.List
|
||||
}
|
||||
|
||||
// Handle the special case where the object itself is a literal. Previously
|
||||
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||
// parser does not make the same guarantees, thus we need to convert any
|
||||
// top-level literal elements into a list.
|
||||
if _, ok := node.(*ast.LiteralType); ok && item != nil {
|
||||
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||
}
|
||||
|
||||
list, ok := node.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
|
||||
}
|
||||
}
|
||||
|
||||
// This slice will keep track of all the structs we'll be decoding.
|
||||
// There can be more than one struct if there are embedded structs
|
||||
// that are squashed.
|
||||
structs := make([]reflect.Value, 1, 5)
|
||||
structs[0] = result
|
||||
|
||||
// Compile the list of all the fields that we're going to be decoding
|
||||
// from all the structs.
|
||||
type field struct {
|
||||
field reflect.StructField
|
||||
val reflect.Value
|
||||
}
|
||||
fields := []field{}
|
||||
for len(structs) > 0 {
|
||||
structVal := structs[0]
|
||||
structs = structs[1:]
|
||||
|
||||
structType := structVal.Type()
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
fieldType := structType.Field(i)
|
||||
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
|
||||
|
||||
// Ignore fields with tag name "-"
|
||||
if tagParts[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldType.Anonymous {
|
||||
fieldKind := fieldType.Type.Kind()
|
||||
if fieldKind != reflect.Struct {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: unsupported type to struct: %s",
|
||||
fieldType.Name, fieldKind),
|
||||
}
|
||||
}
|
||||
|
||||
// We have an embedded field. We "squash" the fields down
|
||||
// if specified in the tag.
|
||||
squash := false
|
||||
for _, tag := range tagParts[1:] {
|
||||
if tag == "squash" {
|
||||
squash = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if squash {
|
||||
structs = append(
|
||||
structs, result.FieldByName(fieldType.Name))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Normal struct field, store it away
|
||||
fields = append(fields, field{fieldType, structVal.Field(i)})
|
||||
}
|
||||
}
|
||||
|
||||
usedKeys := make(map[string]struct{})
|
||||
decodedFields := make([]string, 0, len(fields))
|
||||
decodedFieldsVal := make([]reflect.Value, 0)
|
||||
unusedKeysVal := make([]reflect.Value, 0)
|
||||
for _, f := range fields {
|
||||
field, fieldValue := f.field, f.val
|
||||
if !fieldValue.IsValid() {
|
||||
// This should never happen
|
||||
panic("field is not valid")
|
||||
}
|
||||
|
||||
// If we can't set the field, then it is unexported or something,
|
||||
// and we just continue onwards.
|
||||
if !fieldValue.CanSet() {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldName := field.Name
|
||||
|
||||
tagValue := field.Tag.Get(tagName)
|
||||
tagParts := strings.SplitN(tagValue, ",", 2)
|
||||
if len(tagParts) >= 2 {
|
||||
switch tagParts[1] {
|
||||
case "decodedFields":
|
||||
decodedFieldsVal = append(decodedFieldsVal, fieldValue)
|
||||
continue
|
||||
case "key":
|
||||
if item == nil {
|
||||
return &parser.PosError{
|
||||
Pos: node.Pos(),
|
||||
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
|
||||
name, fieldName),
|
||||
}
|
||||
}
|
||||
|
||||
fieldValue.SetString(item.Keys[0].Token.Value().(string))
|
||||
continue
|
||||
case "unusedKeys":
|
||||
unusedKeysVal = append(unusedKeysVal, fieldValue)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if tagParts[0] != "" {
|
||||
fieldName = tagParts[0]
|
||||
}
|
||||
|
||||
// Determine the element we'll use to decode. If it is a single
|
||||
// match (only object with the field), then we decode it exactly.
|
||||
// If it is a prefix match, then we decode the matches.
|
||||
filter := list.Filter(fieldName)
|
||||
|
||||
prefixMatches := filter.Children()
|
||||
matches := filter.Elem()
|
||||
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track the used key
|
||||
usedKeys[fieldName] = struct{}{}
|
||||
|
||||
// Create the field name and decode. We range over the elements
|
||||
// because we actually want the value.
|
||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||
if len(prefixMatches.Items) > 0 {
|
||||
if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, match := range matches.Items {
|
||||
var decodeNode ast.Node = match.Val
|
||||
if ot, ok := decodeNode.(*ast.ObjectType); ok {
|
||||
decodeNode = &ast.ObjectList{Items: ot.List.Items}
|
||||
}
|
||||
|
||||
if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
decodedFields = append(decodedFields, field.Name)
|
||||
}
|
||||
|
||||
if len(decodedFieldsVal) > 0 {
|
||||
// Sort it so that it is deterministic
|
||||
sort.Strings(decodedFields)
|
||||
|
||||
for _, v := range decodedFieldsVal {
|
||||
v.Set(reflect.ValueOf(decodedFields))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNodeType returns the type of ast.Node
|
||||
func findNodeType() reflect.Type {
|
||||
var nodeContainer struct {
|
||||
Node ast.Node
|
||||
}
|
||||
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
|
||||
return value.Type()
|
||||
}
|
||||
3
vendor/github.com/hashicorp/hcl/go.mod
generated
vendored
3
vendor/github.com/hashicorp/hcl/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/hashicorp/hcl
|
||||
|
||||
require github.com/davecgh/go-spew v1.1.1
|
||||
2
vendor/github.com/hashicorp/hcl/go.sum
generated
vendored
2
vendor/github.com/hashicorp/hcl/go.sum
generated
vendored
@@ -1,2 +0,0 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
11
vendor/github.com/hashicorp/hcl/hcl.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
// Package hcl decodes HCL into usable Go structures.
|
||||
//
|
||||
// hcl input can come in either pure HCL format or JSON format.
|
||||
// It can be parsed into an AST, and then decoded into a structure,
|
||||
// or it can be decoded directly from a string into a structure.
|
||||
//
|
||||
// If you choose to parse HCL into a raw AST, the benefit is that you
|
||||
// can write custom visitor implementations to implement custom
|
||||
// semantic checks. By default, HCL does not perform any semantic
|
||||
// checks.
|
||||
package hcl
|
||||
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
219
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
generated
vendored
@@ -1,219 +0,0 @@
|
||||
// Package ast declares the types used to represent syntax trees for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package ast
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Node is an element in the abstract syntax tree.
|
||||
type Node interface {
|
||||
node()
|
||||
Pos() token.Pos
|
||||
}
|
||||
|
||||
func (File) node() {}
|
||||
func (ObjectList) node() {}
|
||||
func (ObjectKey) node() {}
|
||||
func (ObjectItem) node() {}
|
||||
func (Comment) node() {}
|
||||
func (CommentGroup) node() {}
|
||||
func (ObjectType) node() {}
|
||||
func (LiteralType) node() {}
|
||||
func (ListType) node() {}
|
||||
|
||||
// File represents a single HCL file
|
||||
type File struct {
|
||||
Node Node // usually a *ObjectList
|
||||
Comments []*CommentGroup // list of all comments in the source
|
||||
}
|
||||
|
||||
func (f *File) Pos() token.Pos {
|
||||
return f.Node.Pos()
|
||||
}
|
||||
|
||||
// ObjectList represents a list of ObjectItems. An HCL file itself is an
|
||||
// ObjectList.
|
||||
type ObjectList struct {
|
||||
Items []*ObjectItem
|
||||
}
|
||||
|
||||
func (o *ObjectList) Add(item *ObjectItem) {
|
||||
o.Items = append(o.Items, item)
|
||||
}
|
||||
|
||||
// Filter filters out the objects with the given key list as a prefix.
|
||||
//
|
||||
// The returned list of objects contain ObjectItems where the keys have
|
||||
// this prefix already stripped off. This might result in objects with
|
||||
// zero-length key lists if they have no children.
|
||||
//
|
||||
// If no matches are found, an empty ObjectList (non-nil) is returned.
|
||||
func (o *ObjectList) Filter(keys ...string) *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
// If there aren't enough keys, then ignore this
|
||||
if len(item.Keys) < len(keys) {
|
||||
continue
|
||||
}
|
||||
|
||||
match := true
|
||||
for i, key := range item.Keys[:len(keys)] {
|
||||
key := key.Token.Value().(string)
|
||||
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
// Strip off the prefix from the children
|
||||
newItem := *item
|
||||
newItem.Keys = newItem.Keys[len(keys):]
|
||||
result.Add(&newItem)
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Children returns further nested objects (key length > 0) within this
|
||||
// ObjectList. This should be used with Filter to get at child items.
|
||||
func (o *ObjectList) Children() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) > 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
// Elem returns items in the list that are direct element assignments
|
||||
// (key length == 0). This should be used with Filter to get at elements.
|
||||
func (o *ObjectList) Elem() *ObjectList {
|
||||
var result ObjectList
|
||||
for _, item := range o.Items {
|
||||
if len(item.Keys) == 0 {
|
||||
result.Add(item)
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func (o *ObjectList) Pos() token.Pos {
|
||||
// always returns the uninitiliazed position
|
||||
return o.Items[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectItem represents a HCL Object Item. An item is represented with a key
|
||||
// (or keys). It can be an assignment or an object (both normal and nested)
|
||||
type ObjectItem struct {
|
||||
// keys is only one length long if it's of type assignment. If it's a
|
||||
// nested object it can be larger than one. In that case "assign" is
|
||||
// invalid as there is no assignments for a nested object.
|
||||
Keys []*ObjectKey
|
||||
|
||||
// assign contains the position of "=", if any
|
||||
Assign token.Pos
|
||||
|
||||
// val is the item itself. It can be an object,list, number, bool or a
|
||||
// string. If key length is larger than one, val can be only of type
|
||||
// Object.
|
||||
Val Node
|
||||
|
||||
LeadComment *CommentGroup // associated lead comment
|
||||
LineComment *CommentGroup // associated line comment
|
||||
}
|
||||
|
||||
func (o *ObjectItem) Pos() token.Pos {
|
||||
// I'm not entirely sure what causes this, but removing this causes
|
||||
// a test failure. We should investigate at some point.
|
||||
if len(o.Keys) == 0 {
|
||||
return token.Pos{}
|
||||
}
|
||||
|
||||
return o.Keys[0].Pos()
|
||||
}
|
||||
|
||||
// ObjectKeys are either an identifier or of type string.
|
||||
type ObjectKey struct {
|
||||
Token token.Token
|
||||
}
|
||||
|
||||
func (o *ObjectKey) Pos() token.Pos {
|
||||
return o.Token.Pos
|
||||
}
|
||||
|
||||
// LiteralType represents a literal of basic type. Valid types are:
|
||||
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
|
||||
type LiteralType struct {
|
||||
Token token.Token
|
||||
|
||||
// comment types, only used when in a list
|
||||
LeadComment *CommentGroup
|
||||
LineComment *CommentGroup
|
||||
}
|
||||
|
||||
func (l *LiteralType) Pos() token.Pos {
|
||||
return l.Token.Pos
|
||||
}
|
||||
|
||||
// ListStatement represents a HCL List type
|
||||
type ListType struct {
|
||||
Lbrack token.Pos // position of "["
|
||||
Rbrack token.Pos // position of "]"
|
||||
List []Node // the elements in lexical order
|
||||
}
|
||||
|
||||
func (l *ListType) Pos() token.Pos {
|
||||
return l.Lbrack
|
||||
}
|
||||
|
||||
func (l *ListType) Add(node Node) {
|
||||
l.List = append(l.List, node)
|
||||
}
|
||||
|
||||
// ObjectType represents a HCL Object Type
|
||||
type ObjectType struct {
|
||||
Lbrace token.Pos // position of "{"
|
||||
Rbrace token.Pos // position of "}"
|
||||
List *ObjectList // the nodes in lexical order
|
||||
}
|
||||
|
||||
func (o *ObjectType) Pos() token.Pos {
|
||||
return o.Lbrace
|
||||
}
|
||||
|
||||
// Comment node represents a single //, # style or /*- style commment
|
||||
type Comment struct {
|
||||
Start token.Pos // position of / or #
|
||||
Text string
|
||||
}
|
||||
|
||||
func (c *Comment) Pos() token.Pos {
|
||||
return c.Start
|
||||
}
|
||||
|
||||
// CommentGroup node represents a sequence of comments with no other tokens and
|
||||
// no empty lines between.
|
||||
type CommentGroup struct {
|
||||
List []*Comment // len(List) > 0
|
||||
}
|
||||
|
||||
func (c *CommentGroup) Pos() token.Pos {
|
||||
return c.List[0].Pos()
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// GoStringer
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
|
||||
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
52
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
generated
vendored
@@ -1,52 +0,0 @@
|
||||
package ast
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WalkFunc describes a function to be called for each node during a Walk. The
|
||||
// returned node can be used to rewrite the AST. Walking stops the returned
|
||||
// bool is false.
|
||||
type WalkFunc func(Node) (Node, bool)
|
||||
|
||||
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
|
||||
// node must not be nil. If fn returns true, Walk invokes fn recursively for
|
||||
// each of the non-nil children of node, followed by a call of fn(nil). The
|
||||
// returned node of fn can be used to rewrite the passed node to fn.
|
||||
func Walk(node Node, fn WalkFunc) Node {
|
||||
rewritten, ok := fn(node)
|
||||
if !ok {
|
||||
return rewritten
|
||||
}
|
||||
|
||||
switch n := node.(type) {
|
||||
case *File:
|
||||
n.Node = Walk(n.Node, fn)
|
||||
case *ObjectList:
|
||||
for i, item := range n.Items {
|
||||
n.Items[i] = Walk(item, fn).(*ObjectItem)
|
||||
}
|
||||
case *ObjectKey:
|
||||
// nothing to do
|
||||
case *ObjectItem:
|
||||
for i, k := range n.Keys {
|
||||
n.Keys[i] = Walk(k, fn).(*ObjectKey)
|
||||
}
|
||||
|
||||
if n.Val != nil {
|
||||
n.Val = Walk(n.Val, fn)
|
||||
}
|
||||
case *LiteralType:
|
||||
// nothing to do
|
||||
case *ListType:
|
||||
for i, l := range n.List {
|
||||
n.List[i] = Walk(l, fn)
|
||||
}
|
||||
case *ObjectType:
|
||||
n.List = Walk(n.List, fn).(*ObjectList)
|
||||
default:
|
||||
// should we panic here?
|
||||
fmt.Printf("unknown type: %T\n", n)
|
||||
}
|
||||
|
||||
fn(nil)
|
||||
return rewritten
|
||||
}
|
||||
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
17
vendor/github.com/hashicorp/hcl/hcl/parser/error.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// PosError is a parse error that contains a position.
|
||||
type PosError struct {
|
||||
Pos token.Pos
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PosError) Error() string {
|
||||
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
|
||||
}
|
||||
532
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
532
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
generated
vendored
@@ -1,532 +0,0 @@
|
||||
// Package parser implements a parser for HCL (HashiCorp Configuration
|
||||
// Language)
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
"github.com/hashicorp/hcl/hcl/scanner"
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
comments []*ast.CommentGroup
|
||||
leadComment *ast.CommentGroup // last lead comment
|
||||
lineComment *ast.CommentGroup // last line comment
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
// normalize all line endings
|
||||
// since the scanner and output only work with "\n" line endings, we may
|
||||
// end up with dangling "\r" characters in the parsed data.
|
||||
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
|
||||
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
|
||||
}
|
||||
|
||||
f.Node, err = p.objectList(false)
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.Comments = p.comments
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// objectList parses a list of items within an object (generally k/v pairs).
|
||||
// The parameter" obj" tells this whether to we are within an object (braces:
|
||||
// '{', '}') or just at the top level. If we're within an object, we end
|
||||
// at an RBRACE.
|
||||
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
if obj {
|
||||
tok := p.scan()
|
||||
p.unscan()
|
||||
if tok.Type == token.RBRACE {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// object lists can be optionally comma-delimited e.g. when a list of maps
|
||||
// is being expressed, so a comma is allowed here - it's simply consumed
|
||||
tok := p.scan()
|
||||
if tok.Type != token.COMMA {
|
||||
p.unscan()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
// count the endline if it's multiline comment, ie starting with /*
|
||||
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
|
||||
// don't use range here - no need to decode Unicode code points
|
||||
for i := 0; i < len(p.tok.Text); i++ {
|
||||
if p.tok.Text[i] == '\n' {
|
||||
endline++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
|
||||
p.tok = p.sc.Scan()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
|
||||
var list []*ast.Comment
|
||||
endline = p.tok.Pos.Line
|
||||
|
||||
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
|
||||
var comment *ast.Comment
|
||||
comment, endline = p.consumeComment()
|
||||
list = append(list, comment)
|
||||
}
|
||||
|
||||
// add comment group to the comments list
|
||||
comments = &ast.CommentGroup{List: list}
|
||||
p.comments = append(p.comments, comments)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if len(keys) > 0 && err == errEofToken {
|
||||
// We ignore eof token here since it is an error if we didn't
|
||||
// receive a value (but we did receive a key) for the item.
|
||||
err = nil
|
||||
}
|
||||
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||
// This is a strange boolean statement, but what it means is:
|
||||
// We have keys with no value, and we're likely in an object
|
||||
// (since RBrace ends an object). For this, we set err to nil so
|
||||
// we continue and get the error below of having the wrong value
|
||||
// type.
|
||||
err = nil
|
||||
|
||||
// Reset the token type so we don't think it completed fine. See
|
||||
// objectType which uses p.tok.Type to check if we're done with
|
||||
// the object.
|
||||
p.tok.Type = token.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
if p.leadComment != nil {
|
||||
o.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.ASSIGN:
|
||||
o.Assign = p.tok.Pos
|
||||
o.Val, err = p.object()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case token.LBRACE:
|
||||
o.Val, err = p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
keyStr := make([]string, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
keyStr = append(keyStr, k.Token.Text)
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||
strings.Join(keyStr, " ")),
|
||||
}
|
||||
}
|
||||
|
||||
// key=#comment
|
||||
// val
|
||||
if p.lineComment != nil {
|
||||
o.LineComment, p.lineComment = p.lineComment, nil
|
||||
}
|
||||
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
|
||||
o.LineComment = p.lineComment
|
||||
p.lineComment = nil
|
||||
}
|
||||
p.unscan()
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
// It is very important to also return the keys here as well as
|
||||
// the error. This is because we need to be able to tell if we
|
||||
// did parse keys prior to finding the EOF, or if we just found
|
||||
// a bare EOF.
|
||||
return keys, errEofToken
|
||||
case token.ASSIGN:
|
||||
// assignment or object only, but not nested objects. this is not
|
||||
// allowed: `foo bar = {}`
|
||||
if keyCount > 1 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
if keyCount == 0 {
|
||||
return nil, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: errors.New("no object keys found!"),
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
case token.LBRACE:
|
||||
var err error
|
||||
|
||||
// If we have no keys, then it is a syntax error. i.e. {{}} is not
|
||||
// allowed.
|
||||
if len(keys) == 0 {
|
||||
err = &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
// object
|
||||
return keys, err
|
||||
case token.IDENT, token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{Token: p.tok})
|
||||
case token.ILLEGAL:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("illegal character"),
|
||||
}
|
||||
default:
|
||||
return keys, &PosError{
|
||||
Pos: p.tok.Pos,
|
||||
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.COMMENT:
|
||||
// implement comment
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("Unknown token: %+v", tok),
|
||||
}
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{
|
||||
Lbrace: p.tok.Pos,
|
||||
}
|
||||
|
||||
l, err := p.objectList(true)
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No error, scan and expect the ending to be a brace
|
||||
if tok := p.scan(); tok.Type != token.RBRACE {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
|
||||
o.List = l
|
||||
o.Rbrace = p.tok.Pos // advanced via parseObjectList
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{
|
||||
Lbrack: p.tok.Pos,
|
||||
}
|
||||
|
||||
needComma := false
|
||||
for {
|
||||
tok := p.scan()
|
||||
if needComma {
|
||||
switch tok.Type {
|
||||
case token.COMMA, token.RBRACK:
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error parsing list, expected comma or list end, got: %s",
|
||||
tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
switch tok.Type {
|
||||
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there is a lead comment, apply it
|
||||
if p.leadComment != nil {
|
||||
node.LeadComment = p.leadComment
|
||||
p.leadComment = nil
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.COMMA:
|
||||
// get next list item or we are at the end
|
||||
// do a look-ahead for line comment
|
||||
p.scan()
|
||||
if p.lineComment != nil && len(l.List) > 0 {
|
||||
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
|
||||
if ok {
|
||||
lit.LineComment = p.lineComment
|
||||
l.List[len(l.List)-1] = lit
|
||||
p.lineComment = nil
|
||||
}
|
||||
}
|
||||
p.unscan()
|
||||
|
||||
needComma = false
|
||||
continue
|
||||
case token.LBRACE:
|
||||
// Looks like a nested object, so parse it out
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse object within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
needComma = true
|
||||
case token.LBRACK:
|
||||
node, err := p.listType()
|
||||
if err != nil {
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf(
|
||||
"error while trying to parse list within list: %s", err),
|
||||
}
|
||||
}
|
||||
l.Add(node)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
l.Rbrack = p.tok.Pos
|
||||
return l, nil
|
||||
default:
|
||||
return nil, &PosError{
|
||||
Pos: tok.Pos,
|
||||
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead. In the process, it collects any
|
||||
// comment groups encountered, and remembers the last lead and line comments.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// Otherwise read the next token from the scanner and Save it to the buffer
|
||||
// in case we unscan later.
|
||||
prev := p.tok
|
||||
p.tok = p.sc.Scan()
|
||||
|
||||
if p.tok.Type == token.COMMENT {
|
||||
var comment *ast.CommentGroup
|
||||
var endline int
|
||||
|
||||
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
|
||||
// p.tok.Pos.Line, prev.Pos.Line, endline)
|
||||
if p.tok.Pos.Line == prev.Pos.Line {
|
||||
// The comment is on same line as the previous token; it
|
||||
// cannot be a lead comment but may be a line comment.
|
||||
comment, endline = p.consumeCommentGroup(0)
|
||||
if p.tok.Pos.Line != endline {
|
||||
// The next token is on a different line, thus
|
||||
// the last comment group is a line comment.
|
||||
p.lineComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
// consume successor comments, if any
|
||||
endline = -1
|
||||
for p.tok.Type == token.COMMENT {
|
||||
comment, endline = p.consumeCommentGroup(1)
|
||||
}
|
||||
|
||||
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
|
||||
switch p.tok.Type {
|
||||
case token.RBRACE, token.RBRACK:
|
||||
// Do not count for these cases
|
||||
default:
|
||||
// The next token is following on the line immediately after the
|
||||
// comment group, thus the last comment group is a lead comment.
|
||||
p.leadComment = comment
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
||||
652
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
652
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
generated
vendored
@@ -1,652 +0,0 @@
|
||||
// Package scanner implements a scanner for HCL (HashiCorp Configuration
|
||||
// Language) source text.
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
if ch == '\x00' {
|
||||
s.err("unexpected null character (0x00)")
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == '\uE123' {
|
||||
s.err("unicode code point U+E123 reserved for internal use")
|
||||
return utf8.RuneError
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
tok = token.IDENT
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '#', '/':
|
||||
tok = token.COMMENT
|
||||
s.scanComment(ch)
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '<':
|
||||
tok = token.HEREDOC
|
||||
s.scanHeredoc()
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case '=':
|
||||
tok = token.ASSIGN
|
||||
case '+':
|
||||
tok = token.ADD
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
tok = token.SUB
|
||||
}
|
||||
default:
|
||||
s.err("illegal char")
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanComment(ch rune) {
|
||||
// single line comments
|
||||
if ch == '#' || (ch == '/' && s.peek() != '*') {
|
||||
if ch == '/' && s.peek() != '/' {
|
||||
s.err("expected '/' for comment")
|
||||
return
|
||||
}
|
||||
|
||||
ch = s.next()
|
||||
for ch != '\n' && ch >= 0 && ch != eof {
|
||||
ch = s.next()
|
||||
}
|
||||
if ch != eof && ch >= 0 {
|
||||
s.unread()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// be sure we get the character after /* This allows us to find comment's
|
||||
// that are not erminated
|
||||
if ch == '/' {
|
||||
s.next()
|
||||
ch = s.next() // read character after "/*"
|
||||
}
|
||||
|
||||
// look for /* - style comments
|
||||
for {
|
||||
if ch < 0 || ch == eof {
|
||||
s.err("comment not terminated")
|
||||
break
|
||||
}
|
||||
|
||||
ch0 := ch
|
||||
ch = s.next()
|
||||
if ch0 == '*' && ch == '/' {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
if ch == '0' {
|
||||
// check for hexadecimal, octal or float
|
||||
ch = s.next()
|
||||
if ch == 'x' || ch == 'X' {
|
||||
// hexadecimal
|
||||
ch = s.next()
|
||||
found := false
|
||||
for isHexadecimal(ch) {
|
||||
ch = s.next()
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
s.err("illegal hexadecimal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// now it's either something like: 0421(octal) or 0.1231(float)
|
||||
illegalOctal := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
if ch == '8' || ch == '9' {
|
||||
// this is just a possibility. For example 0159 is illegal, but
|
||||
// 0159.23 is valid. So we mark a possible illegal octal. If
|
||||
// the next character is not a period, we'll print the error.
|
||||
illegalOctal = true
|
||||
}
|
||||
}
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if illegalOctal {
|
||||
s.err("illegal octal number")
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanHeredoc scans a heredoc string
|
||||
func (s *Scanner) scanHeredoc() {
|
||||
// Scan the second '<' in example: '<<EOF'
|
||||
if s.next() != '<' {
|
||||
s.err("heredoc expected second '<', didn't see it")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the original offset so we can read just the heredoc ident
|
||||
offs := s.srcPos.Offset
|
||||
|
||||
// Scan the identifier
|
||||
ch := s.next()
|
||||
|
||||
// Indented heredoc syntax
|
||||
if ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
for isLetter(ch) || isDigit(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
// If we reached an EOF then that is not good
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore the '\r' in Windows line endings
|
||||
if ch == '\r' {
|
||||
if s.peek() == '\n' {
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't reach a newline then that is also not good
|
||||
if ch != '\n' {
|
||||
s.err("invalid characters in heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
// Read the identifier
|
||||
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||
if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
|
||||
s.err("zero-length heredoc anchor")
|
||||
return
|
||||
}
|
||||
|
||||
var identRegexp *regexp.Regexp
|
||||
if identBytes[0] == '-' {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
|
||||
} else {
|
||||
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
|
||||
}
|
||||
|
||||
// Read the actual string value
|
||||
lineStart := s.srcPos.Offset
|
||||
for {
|
||||
ch := s.next()
|
||||
|
||||
// Special newline handling.
|
||||
if ch == '\n' {
|
||||
// Math is fast, so we first compare the byte counts to see if we have a chance
|
||||
// of seeing the same identifier - if the length is less than the number of bytes
|
||||
// in the identifier, this cannot be a valid terminator.
|
||||
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
||||
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||
break
|
||||
}
|
||||
|
||||
// Not an anchor match, record the start of a new line
|
||||
lineStart = s.srcPos.Offset
|
||||
}
|
||||
|
||||
if ch == eof {
|
||||
s.err("heredoc not terminated")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' && braces == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
start := n
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
if ch == eof {
|
||||
// If we see an EOF, we halt any more scanning of digits
|
||||
// immediately.
|
||||
break
|
||||
}
|
||||
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
if n != start && ch != eof {
|
||||
// we scanned all digits, put the last non digit char back,
|
||||
// only if we read anything at all
|
||||
s.unread()
|
||||
}
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isDigit returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isDecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
||||
241
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
241
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
generated
vendored
@@ -1,241 +0,0 @@
|
||||
package strconv
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ErrSyntax indicates that a value does not have the right syntax for the target type.
|
||||
var ErrSyntax = errors.New("invalid syntax")
|
||||
|
||||
// Unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
func Unquote(s string) (t string, err error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote != '"' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
|
||||
switch quote {
|
||||
case '"':
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
// If we're starting a '${}' then let it through un-unquoted.
|
||||
// Specifically: we don't unquote any characters within the `${}`
|
||||
// section.
|
||||
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
|
||||
buf = append(buf, '$', '{')
|
||||
s = s[2:]
|
||||
|
||||
// Continue reading until we find the closing brace, copying as-is
|
||||
braces := 1
|
||||
for len(s) > 0 && braces > 0 {
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
s = s[size:]
|
||||
|
||||
n := utf8.EncodeRune(runeTmp[:], r)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
|
||||
switch r {
|
||||
case '{':
|
||||
braces++
|
||||
case '}':
|
||||
braces--
|
||||
}
|
||||
}
|
||||
if braces != 0 {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
if len(s) == 0 {
|
||||
// If there's no string left, we're done!
|
||||
break
|
||||
} else {
|
||||
// If there's more left, we need to pop back up to the top of the loop
|
||||
// in case there's another interpolation in this string.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if s[0] == '\n' {
|
||||
return "", ErrSyntax
|
||||
}
|
||||
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", ErrSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"'):
|
||||
err = ErrSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"':
|
||||
if c != quote {
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = ErrSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
||||
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
46
vendor/github.com/hashicorp/hcl/hcl/token/position.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
||||
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
219
vendor/github.com/hashicorp/hcl/hcl/token/token.go
generated
vendored
@@ -1,219 +0,0 @@
|
||||
// Package token defines constants representing the lexical tokens for HCL
|
||||
// (HashiCorp Configuration Language)
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
JSON bool
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
COMMENT
|
||||
|
||||
identifier_beg
|
||||
IDENT // literals
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
HEREDOC // <<FOO\nbar\nFOO
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
ASSIGN // =
|
||||
ADD // +
|
||||
SUB // -
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
COMMENT: "COMMENT",
|
||||
|
||||
IDENT: "IDENT",
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
HEREDOC: "HEREDOC",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
|
||||
ASSIGN: "ASSIGN",
|
||||
ADD: "ADD",
|
||||
SUB: "SUB",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// Value returns the properly typed value for this token. The type of
|
||||
// the returned interface{} is guaranteed based on the Type field.
|
||||
//
|
||||
// This can only be called for literal types. If it is called for any other
|
||||
// type, this will panic.
|
||||
func (t Token) Value() interface{} {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
if t.Text == "true" {
|
||||
return true
|
||||
} else if t.Text == "false" {
|
||||
return false
|
||||
}
|
||||
|
||||
panic("unknown bool value: " + t.Text)
|
||||
case FLOAT:
|
||||
v, err := strconv.ParseFloat(t.Text, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return float64(v)
|
||||
case NUMBER:
|
||||
v, err := strconv.ParseInt(t.Text, 0, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return int64(v)
|
||||
case IDENT:
|
||||
return t.Text
|
||||
case HEREDOC:
|
||||
return unindentHeredoc(t.Text)
|
||||
case STRING:
|
||||
// Determine the Unquote method to use. If it came from JSON,
|
||||
// then we need to use the built-in unquote since we have to
|
||||
// escape interpolations there.
|
||||
f := hclstrconv.Unquote
|
||||
if t.JSON {
|
||||
f = strconv.Unquote
|
||||
}
|
||||
|
||||
// This case occurs if json null is used
|
||||
if t.Text == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
v, err := f(t.Text)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
|
||||
}
|
||||
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
||||
}
|
||||
}
|
||||
|
||||
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
|
||||
// and the content of a HEREDOC with the hanging indent removed if it is started with
|
||||
// a <<-, and the terminating line is at least as indented as the least indented line.
|
||||
func unindentHeredoc(heredoc string) string {
|
||||
// We need to find the end of the marker
|
||||
idx := strings.IndexByte(heredoc, '\n')
|
||||
if idx == -1 {
|
||||
panic("heredoc doesn't contain newline")
|
||||
}
|
||||
|
||||
unindent := heredoc[2] == '-'
|
||||
|
||||
// We can optimize if the heredoc isn't marked for indentation
|
||||
if !unindent {
|
||||
return string(heredoc[idx+1 : len(heredoc)-idx+1])
|
||||
}
|
||||
|
||||
// We need to unindent each line based on the indentation level of the marker
|
||||
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
|
||||
whitespacePrefix := lines[len(lines)-1]
|
||||
|
||||
isIndented := true
|
||||
for _, v := range lines {
|
||||
if strings.HasPrefix(v, whitespacePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
isIndented = false
|
||||
break
|
||||
}
|
||||
|
||||
// If all lines are not at least as indented as the terminating mark, return the
|
||||
// heredoc as is, but trim the leading space from the marker on the final line.
|
||||
if !isIndented {
|
||||
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
|
||||
}
|
||||
|
||||
unindentedLines := make([]string, len(lines))
|
||||
for k, v := range lines {
|
||||
if k == len(lines)-1 {
|
||||
unindentedLines[k] = ""
|
||||
break
|
||||
}
|
||||
|
||||
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
|
||||
}
|
||||
|
||||
return strings.Join(unindentedLines, "\n")
|
||||
}
|
||||
117
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
117
vendor/github.com/hashicorp/hcl/json/parser/flatten.go
generated
vendored
@@ -1,117 +0,0 @@
|
||||
package parser
|
||||
|
||||
import "github.com/hashicorp/hcl/hcl/ast"
|
||||
|
||||
// flattenObjects takes an AST node, walks it, and flattens
|
||||
func flattenObjects(node ast.Node) {
|
||||
ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
|
||||
// We only care about lists, because this is what we modify
|
||||
list, ok := n.(*ast.ObjectList)
|
||||
if !ok {
|
||||
return n, true
|
||||
}
|
||||
|
||||
// Rebuild the item list
|
||||
items := make([]*ast.ObjectItem, 0, len(list.Items))
|
||||
frontier := make([]*ast.ObjectItem, len(list.Items))
|
||||
copy(frontier, list.Items)
|
||||
for len(frontier) > 0 {
|
||||
// Pop the current item
|
||||
n := len(frontier)
|
||||
item := frontier[n-1]
|
||||
frontier = frontier[:n-1]
|
||||
|
||||
switch v := item.Val.(type) {
|
||||
case *ast.ObjectType:
|
||||
items, frontier = flattenObjectType(v, item, items, frontier)
|
||||
case *ast.ListType:
|
||||
items, frontier = flattenListType(v, item, items, frontier)
|
||||
default:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse the list since the frontier model runs things backwards
|
||||
for i := len(items)/2 - 1; i >= 0; i-- {
|
||||
opp := len(items) - 1 - i
|
||||
items[i], items[opp] = items[opp], items[i]
|
||||
}
|
||||
|
||||
// Done! Set the original items
|
||||
list.Items = items
|
||||
return n, true
|
||||
})
|
||||
}
|
||||
|
||||
func flattenListType(
|
||||
ot *ast.ListType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list is empty, keep the original list
|
||||
if len(ot.List) == 0 {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List {
|
||||
if _, ok := subitem.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, elem := range ot.List {
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: item.Keys,
|
||||
Assign: item.Assign,
|
||||
Val: elem,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
func flattenObjectType(
|
||||
ot *ast.ObjectType,
|
||||
item *ast.ObjectItem,
|
||||
items []*ast.ObjectItem,
|
||||
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
|
||||
// If the list has no items we do not have to flatten anything
|
||||
if ot.List.Items == nil {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
|
||||
// All the elements of this object must also be objects!
|
||||
for _, subitem := range ot.List.Items {
|
||||
if _, ok := subitem.Val.(*ast.ObjectType); !ok {
|
||||
items = append(items, item)
|
||||
return items, frontier
|
||||
}
|
||||
}
|
||||
|
||||
// Great! We have a match go through all the items and flatten
|
||||
for _, subitem := range ot.List.Items {
|
||||
// Copy the new key
|
||||
keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
|
||||
copy(keys, item.Keys)
|
||||
copy(keys[len(item.Keys):], subitem.Keys)
|
||||
|
||||
// Add it to the frontier so that we can recurse
|
||||
frontier = append(frontier, &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
Assign: item.Assign,
|
||||
Val: subitem.Val,
|
||||
LeadComment: item.LeadComment,
|
||||
LineComment: item.LineComment,
|
||||
})
|
||||
}
|
||||
|
||||
return items, frontier
|
||||
}
|
||||
313
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
313
vendor/github.com/hashicorp/hcl/json/parser/parser.go
generated
vendored
@@ -1,313 +0,0 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
"github.com/hashicorp/hcl/json/scanner"
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
sc *scanner.Scanner
|
||||
|
||||
// Last read token
|
||||
tok token.Token
|
||||
commaPrev token.Token
|
||||
|
||||
enableTrace bool
|
||||
indent int
|
||||
n int // buffer size (max = 1)
|
||||
}
|
||||
|
||||
func newParser(src []byte) *Parser {
|
||||
return &Parser{
|
||||
sc: scanner.New(src),
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func Parse(src []byte) (*ast.File, error) {
|
||||
p := newParser(src)
|
||||
return p.Parse()
|
||||
}
|
||||
|
||||
var errEofToken = errors.New("EOF token found")
|
||||
|
||||
// Parse returns the fully parsed source and returns the abstract syntax tree.
|
||||
func (p *Parser) Parse() (*ast.File, error) {
|
||||
f := &ast.File{}
|
||||
var err, scerr error
|
||||
p.sc.Error = func(pos token.Pos, msg string) {
|
||||
scerr = fmt.Errorf("%s: %s", pos, msg)
|
||||
}
|
||||
|
||||
// The root must be an object in JSON
|
||||
object, err := p.object()
|
||||
if scerr != nil {
|
||||
return nil, scerr
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We make our final node an object list so it is more HCL compatible
|
||||
f.Node = object.List
|
||||
|
||||
// Flatten it, which finds patterns and turns them into more HCL-like
|
||||
// AST trees.
|
||||
flattenObjects(f.Node)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (p *Parser) objectList() (*ast.ObjectList, error) {
|
||||
defer un(trace(p, "ParseObjectList"))
|
||||
node := &ast.ObjectList{}
|
||||
|
||||
for {
|
||||
n, err := p.objectItem()
|
||||
if err == errEofToken {
|
||||
break // we are finished
|
||||
}
|
||||
|
||||
// we don't return a nil node, because might want to use already
|
||||
// collected items.
|
||||
if err != nil {
|
||||
return node, err
|
||||
}
|
||||
|
||||
node.Add(n)
|
||||
|
||||
// Check for a followup comma. If it isn't a comma, then we're done
|
||||
if tok := p.scan(); tok.Type != token.COMMA {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// objectItem parses a single object item
|
||||
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||
defer un(trace(p, "ParseObjectItem"))
|
||||
|
||||
keys, err := p.objectKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := &ast.ObjectItem{
|
||||
Keys: keys,
|
||||
}
|
||||
|
||||
switch p.tok.Type {
|
||||
case token.COLON:
|
||||
pos := p.tok.Pos
|
||||
o.Assign = hcltoken.Pos{
|
||||
Filename: pos.Filename,
|
||||
Offset: pos.Offset,
|
||||
Line: pos.Line,
|
||||
Column: pos.Column,
|
||||
}
|
||||
|
||||
o.Val, err = p.objectValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// objectKey parses an object key and returns a ObjectKey AST
|
||||
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||
keyCount := 0
|
||||
keys := make([]*ast.ObjectKey, 0)
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
case token.STRING:
|
||||
keyCount++
|
||||
keys = append(keys, &ast.ObjectKey{
|
||||
Token: p.tok.HCLToken(),
|
||||
})
|
||||
case token.COLON:
|
||||
// If we have a zero keycount it means that we never got
|
||||
// an object key, i.e. `{ :`. This is a syntax error.
|
||||
if keyCount == 0 {
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
|
||||
// Done
|
||||
return keys, nil
|
||||
case token.ILLEGAL:
|
||||
return nil, errors.New("illegal")
|
||||
default:
|
||||
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) objectValue() (ast.Node, error) {
|
||||
defer un(trace(p, "ParseObjectValue"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
|
||||
return p.literalType()
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.LBRACK:
|
||||
return p.listType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// object parses any type of object, such as number, bool, string, object or
|
||||
// list.
|
||||
func (p *Parser) object() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseType"))
|
||||
tok := p.scan()
|
||||
|
||||
switch tok.Type {
|
||||
case token.LBRACE:
|
||||
return p.objectType()
|
||||
case token.EOF:
|
||||
return nil, errEofToken
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
|
||||
}
|
||||
|
||||
// objectType parses an object type and returns a ObjectType AST
|
||||
func (p *Parser) objectType() (*ast.ObjectType, error) {
|
||||
defer un(trace(p, "ParseObjectType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACE
|
||||
o := &ast.ObjectType{}
|
||||
|
||||
l, err := p.objectList()
|
||||
|
||||
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
|
||||
// not a RBRACE, it's an syntax error and we just return it.
|
||||
if err != nil && p.tok.Type != token.RBRACE {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.List = l
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listType parses a list type and returns a ListType AST
|
||||
func (p *Parser) listType() (*ast.ListType, error) {
|
||||
defer un(trace(p, "ParseListType"))
|
||||
|
||||
// we assume that the currently scanned token is a LBRACK
|
||||
l := &ast.ListType{}
|
||||
|
||||
for {
|
||||
tok := p.scan()
|
||||
switch tok.Type {
|
||||
case token.NUMBER, token.FLOAT, token.STRING:
|
||||
node, err := p.literalType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.COMMA:
|
||||
continue
|
||||
case token.LBRACE:
|
||||
node, err := p.objectType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Add(node)
|
||||
case token.BOOL:
|
||||
// TODO(arslan) should we support? not supported by HCL yet
|
||||
case token.LBRACK:
|
||||
// TODO(arslan) should we support nested lists? Even though it's
|
||||
// written in README of HCL, it's not a part of the grammar
|
||||
// (not defined in parse.y)
|
||||
case token.RBRACK:
|
||||
// finished
|
||||
return l, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// literalType parses a literal type and returns a LiteralType AST
|
||||
func (p *Parser) literalType() (*ast.LiteralType, error) {
|
||||
defer un(trace(p, "ParseLiteral"))
|
||||
|
||||
return &ast.LiteralType{
|
||||
Token: p.tok.HCLToken(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// scan returns the next token from the underlying scanner. If a token has
|
||||
// been unscanned then read that instead.
|
||||
func (p *Parser) scan() token.Token {
|
||||
// If we have a token on the buffer, then return it.
|
||||
if p.n != 0 {
|
||||
p.n = 0
|
||||
return p.tok
|
||||
}
|
||||
|
||||
p.tok = p.sc.Scan()
|
||||
return p.tok
|
||||
}
|
||||
|
||||
// unscan pushes the previously read token back onto the buffer.
|
||||
func (p *Parser) unscan() {
|
||||
p.n = 1
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parsing support
|
||||
|
||||
func (p *Parser) printTrace(a ...interface{}) {
|
||||
if !p.enableTrace {
|
||||
return
|
||||
}
|
||||
|
||||
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||
const n = len(dots)
|
||||
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
|
||||
|
||||
i := 2 * p.indent
|
||||
for i > n {
|
||||
fmt.Print(dots)
|
||||
i -= n
|
||||
}
|
||||
// i <= n
|
||||
fmt.Print(dots[0:i])
|
||||
fmt.Println(a...)
|
||||
}
|
||||
|
||||
func trace(p *Parser, msg string) *Parser {
|
||||
p.printTrace(msg, "(")
|
||||
p.indent++
|
||||
return p
|
||||
}
|
||||
|
||||
// Usage pattern: defer un(trace(p, "..."))
|
||||
func un(p *Parser) {
|
||||
p.indent--
|
||||
p.printTrace(")")
|
||||
}
|
||||
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
451
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
generated
vendored
@@ -1,451 +0,0 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/hashicorp/hcl/json/token"
|
||||
)
|
||||
|
||||
// eof represents a marker rune for the end of the reader.
|
||||
const eof = rune(0)
|
||||
|
||||
// Scanner defines a lexical scanner
|
||||
type Scanner struct {
|
||||
buf *bytes.Buffer // Source buffer for advancing and scanning
|
||||
src []byte // Source buffer for immutable access
|
||||
|
||||
// Source Position
|
||||
srcPos token.Pos // current position
|
||||
prevPos token.Pos // previous position, used for peek() method
|
||||
|
||||
lastCharLen int // length of last character in bytes
|
||||
lastLineLen int // length of last line in characters (for correct column reporting)
|
||||
|
||||
tokStart int // token text start position
|
||||
tokEnd int // token text end position
|
||||
|
||||
// Error is called for each error encountered. If no Error
|
||||
// function is set, the error is reported to os.Stderr.
|
||||
Error func(pos token.Pos, msg string)
|
||||
|
||||
// ErrorCount is incremented by one for each error encountered.
|
||||
ErrorCount int
|
||||
|
||||
// tokPos is the start position of most recently scanned token; set by
|
||||
// Scan. The Filename field is always left untouched by the Scanner. If
|
||||
// an error is reported (via Error) and Position is invalid, the scanner is
|
||||
// not inside a token.
|
||||
tokPos token.Pos
|
||||
}
|
||||
|
||||
// New creates and initializes a new instance of Scanner using src as
|
||||
// its source content.
|
||||
func New(src []byte) *Scanner {
|
||||
// even though we accept a src, we read from a io.Reader compatible type
|
||||
// (*bytes.Buffer). So in the future we might easily change it to streaming
|
||||
// read.
|
||||
b := bytes.NewBuffer(src)
|
||||
s := &Scanner{
|
||||
buf: b,
|
||||
src: src,
|
||||
}
|
||||
|
||||
// srcPosition always starts with 1
|
||||
s.srcPos.Line = 1
|
||||
return s
|
||||
}
|
||||
|
||||
// next reads the next rune from the bufferred reader. Returns the rune(0) if
|
||||
// an error occurs (or io.EOF is returned).
|
||||
func (s *Scanner) next() rune {
|
||||
ch, size, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
// advance for error reporting
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
return eof
|
||||
}
|
||||
|
||||
if ch == utf8.RuneError && size == 1 {
|
||||
s.srcPos.Column++
|
||||
s.srcPos.Offset += size
|
||||
s.lastCharLen = size
|
||||
s.err("illegal UTF-8 encoding")
|
||||
return ch
|
||||
}
|
||||
|
||||
// remember last position
|
||||
s.prevPos = s.srcPos
|
||||
|
||||
s.srcPos.Column++
|
||||
s.lastCharLen = size
|
||||
s.srcPos.Offset += size
|
||||
|
||||
if ch == '\n' {
|
||||
s.srcPos.Line++
|
||||
s.lastLineLen = s.srcPos.Column
|
||||
s.srcPos.Column = 0
|
||||
}
|
||||
|
||||
// debug
|
||||
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
|
||||
return ch
|
||||
}
|
||||
|
||||
// unread unreads the previous read Rune and updates the source position
|
||||
func (s *Scanner) unread() {
|
||||
if err := s.buf.UnreadRune(); err != nil {
|
||||
panic(err) // this is user fault, we should catch it
|
||||
}
|
||||
s.srcPos = s.prevPos // put back last position
|
||||
}
|
||||
|
||||
// peek returns the next rune without advancing the reader.
|
||||
func (s *Scanner) peek() rune {
|
||||
peek, _, err := s.buf.ReadRune()
|
||||
if err != nil {
|
||||
return eof
|
||||
}
|
||||
|
||||
s.buf.UnreadRune()
|
||||
return peek
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token.
|
||||
func (s *Scanner) Scan() token.Token {
|
||||
ch := s.next()
|
||||
|
||||
// skip white space
|
||||
for isWhitespace(ch) {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
var tok token.Type
|
||||
|
||||
// token text markings
|
||||
s.tokStart = s.srcPos.Offset - s.lastCharLen
|
||||
|
||||
// token position, initial next() is moving the offset by one(size of rune
|
||||
// actually), though we are interested with the starting point
|
||||
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
if s.srcPos.Column > 0 {
|
||||
// common case: last character was not a '\n'
|
||||
s.tokPos.Line = s.srcPos.Line
|
||||
s.tokPos.Column = s.srcPos.Column
|
||||
} else {
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
s.tokPos.Line = s.srcPos.Line - 1
|
||||
s.tokPos.Column = s.lastLineLen
|
||||
}
|
||||
|
||||
switch {
|
||||
case isLetter(ch):
|
||||
lit := s.scanIdentifier()
|
||||
if lit == "true" || lit == "false" {
|
||||
tok = token.BOOL
|
||||
} else if lit == "null" {
|
||||
tok = token.NULL
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
case isDecimal(ch):
|
||||
tok = s.scanNumber(ch)
|
||||
default:
|
||||
switch ch {
|
||||
case eof:
|
||||
tok = token.EOF
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
s.scanString()
|
||||
case '.':
|
||||
tok = token.PERIOD
|
||||
ch = s.peek()
|
||||
if isDecimal(ch) {
|
||||
tok = token.FLOAT
|
||||
ch = s.scanMantissa(ch)
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case '{':
|
||||
tok = token.LBRACE
|
||||
case '}':
|
||||
tok = token.RBRACE
|
||||
case ',':
|
||||
tok = token.COMMA
|
||||
case ':':
|
||||
tok = token.COLON
|
||||
case '-':
|
||||
if isDecimal(s.peek()) {
|
||||
ch := s.next()
|
||||
tok = s.scanNumber(ch)
|
||||
} else {
|
||||
s.err("illegal char")
|
||||
}
|
||||
default:
|
||||
s.err("illegal char: " + string(ch))
|
||||
}
|
||||
}
|
||||
|
||||
// finish token ending
|
||||
s.tokEnd = s.srcPos.Offset
|
||||
|
||||
// create token literal
|
||||
var tokenText string
|
||||
if s.tokStart >= 0 {
|
||||
tokenText = string(s.src[s.tokStart:s.tokEnd])
|
||||
}
|
||||
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
|
||||
|
||||
return token.Token{
|
||||
Type: tok,
|
||||
Pos: s.tokPos,
|
||||
Text: tokenText,
|
||||
}
|
||||
}
|
||||
|
||||
// scanNumber scans a HCL number definition starting with the given rune
|
||||
func (s *Scanner) scanNumber(ch rune) token.Type {
|
||||
zero := ch == '0'
|
||||
pos := s.srcPos
|
||||
|
||||
s.scanMantissa(ch)
|
||||
ch = s.next() // seek forward
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.scanExponent(ch)
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch == '.' {
|
||||
ch = s.scanFraction(ch)
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
ch = s.scanExponent(ch)
|
||||
}
|
||||
return token.FLOAT
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
|
||||
// If we have a larger number and this is zero, error
|
||||
if zero && pos != s.srcPos {
|
||||
s.err("numbers cannot start with 0")
|
||||
}
|
||||
|
||||
return token.NUMBER
|
||||
}
|
||||
|
||||
// scanMantissa scans the mantissa beginning from the rune. It returns the next
|
||||
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
|
||||
func (s *Scanner) scanMantissa(ch rune) rune {
|
||||
scanned := false
|
||||
for isDecimal(ch) {
|
||||
ch = s.next()
|
||||
scanned = true
|
||||
}
|
||||
|
||||
if scanned && ch != eof {
|
||||
s.unread()
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanFraction scans the fraction after the '.' rune
|
||||
func (s *Scanner) scanFraction(ch rune) rune {
|
||||
if ch == '.' {
|
||||
ch = s.peek() // we peek just to see if we can move forward
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
|
||||
// rune.
|
||||
func (s *Scanner) scanExponent(ch rune) rune {
|
||||
if ch == 'e' || ch == 'E' {
|
||||
ch = s.next()
|
||||
if ch == '-' || ch == '+' {
|
||||
ch = s.next()
|
||||
}
|
||||
ch = s.scanMantissa(ch)
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanString scans a quoted string
|
||||
func (s *Scanner) scanString() {
|
||||
braces := 0
|
||||
for {
|
||||
// '"' opening already consumed
|
||||
// read character after quote
|
||||
ch := s.next()
|
||||
|
||||
if ch == '\n' || ch < 0 || ch == eof {
|
||||
s.err("literal not terminated")
|
||||
return
|
||||
}
|
||||
|
||||
if ch == '"' {
|
||||
break
|
||||
}
|
||||
|
||||
// If we're going into a ${} then we can ignore quotes for awhile
|
||||
if braces == 0 && ch == '$' && s.peek() == '{' {
|
||||
braces++
|
||||
s.next()
|
||||
} else if braces > 0 && ch == '{' {
|
||||
braces++
|
||||
}
|
||||
if braces > 0 && ch == '}' {
|
||||
braces--
|
||||
}
|
||||
|
||||
if ch == '\\' {
|
||||
s.scanEscape()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scanEscape scans an escape sequence
|
||||
func (s *Scanner) scanEscape() rune {
|
||||
// http://en.cppreference.com/w/cpp/language/escape
|
||||
ch := s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
|
||||
// nothing to do
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
// octal notation
|
||||
ch = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
// hexademical notation
|
||||
ch = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
// universal character name
|
||||
ch = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanDigits scans a rune with the given base for n times. For example an
|
||||
// octal notation \184 would yield in scanDigits(ch, 8, 3)
|
||||
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.err("illegal char escape")
|
||||
}
|
||||
|
||||
// we scanned all digits, put the last non digit char back
|
||||
s.unread()
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanIdentifier scans an identifier and returns the literal string
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.srcPos.Offset - s.lastCharLen
|
||||
ch := s.next()
|
||||
for isLetter(ch) || isDigit(ch) || ch == '-' {
|
||||
ch = s.next()
|
||||
}
|
||||
|
||||
if ch != eof {
|
||||
s.unread() // we got identifier, put back latest char
|
||||
}
|
||||
|
||||
return string(s.src[offs:s.srcPos.Offset])
|
||||
}
|
||||
|
||||
// recentPosition returns the position of the character immediately after the
|
||||
// character or token returned by the last call to Scan.
|
||||
func (s *Scanner) recentPosition() (pos token.Pos) {
|
||||
pos.Offset = s.srcPos.Offset - s.lastCharLen
|
||||
switch {
|
||||
case s.srcPos.Column > 0:
|
||||
// common case: last character was not a '\n'
|
||||
pos.Line = s.srcPos.Line
|
||||
pos.Column = s.srcPos.Column
|
||||
case s.lastLineLen > 0:
|
||||
// last character was a '\n'
|
||||
// (we cannot be at the beginning of the source
|
||||
// since we have called next() at least once)
|
||||
pos.Line = s.srcPos.Line - 1
|
||||
pos.Column = s.lastLineLen
|
||||
default:
|
||||
// at the beginning of the source
|
||||
pos.Line = 1
|
||||
pos.Column = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// err prints the error of any scanning to s.Error function. If the function is
|
||||
// not defined, by default it prints them to os.Stderr
|
||||
func (s *Scanner) err(msg string) {
|
||||
s.ErrorCount++
|
||||
pos := s.recentPosition()
|
||||
|
||||
if s.Error != nil {
|
||||
s.Error(pos, msg)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a letter
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal digit
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is a decimal number
|
||||
func isDecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9'
|
||||
}
|
||||
|
||||
// isHexadecimal returns true if the given rune is an hexadecimal number
|
||||
func isHexadecimal(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
|
||||
func isWhitespace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
|
||||
}
|
||||
|
||||
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
||||
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
46
vendor/github.com/hashicorp/hcl/json/token/position.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
package token
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Pos describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
type Pos struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p *Pos) IsValid() bool { return p.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
func (p Pos) String() string {
|
||||
s := p.Filename
|
||||
if p.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Before reports whether the position p is before u.
|
||||
func (p Pos) Before(u Pos) bool {
|
||||
return u.Offset > p.Offset || u.Line > p.Line
|
||||
}
|
||||
|
||||
// After reports whether the position p is after u.
|
||||
func (p Pos) After(u Pos) bool {
|
||||
return u.Offset < p.Offset || u.Line < p.Line
|
||||
}
|
||||
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
118
vendor/github.com/hashicorp/hcl/json/token/token.go
generated
vendored
@@ -1,118 +0,0 @@
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
hcltoken "github.com/hashicorp/hcl/hcl/token"
|
||||
)
|
||||
|
||||
// Token defines a single HCL token which can be obtained via the Scanner
|
||||
type Token struct {
|
||||
Type Type
|
||||
Pos Pos
|
||||
Text string
|
||||
}
|
||||
|
||||
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Type = iota
|
||||
EOF
|
||||
|
||||
identifier_beg
|
||||
literal_beg
|
||||
NUMBER // 12345
|
||||
FLOAT // 123.45
|
||||
BOOL // true,false
|
||||
STRING // "abc"
|
||||
NULL // null
|
||||
literal_end
|
||||
identifier_end
|
||||
|
||||
operator_beg
|
||||
LBRACK // [
|
||||
LBRACE // {
|
||||
COMMA // ,
|
||||
PERIOD // .
|
||||
COLON // :
|
||||
|
||||
RBRACK // ]
|
||||
RBRACE // }
|
||||
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
|
||||
NUMBER: "NUMBER",
|
||||
FLOAT: "FLOAT",
|
||||
BOOL: "BOOL",
|
||||
STRING: "STRING",
|
||||
NULL: "NULL",
|
||||
|
||||
LBRACK: "LBRACK",
|
||||
LBRACE: "LBRACE",
|
||||
COMMA: "COMMA",
|
||||
PERIOD: "PERIOD",
|
||||
COLON: "COLON",
|
||||
|
||||
RBRACK: "RBRACK",
|
||||
RBRACE: "RBRACE",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
func (t Type) String() string {
|
||||
s := ""
|
||||
if 0 <= t && t < Type(len(tokens)) {
|
||||
s = tokens[t]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(t)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsIdentifier returns true for tokens corresponding to identifiers and basic
|
||||
// type literals; it returns false otherwise.
|
||||
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to basic type literals; it
|
||||
// returns false otherwise.
|
||||
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
|
||||
|
||||
// String returns the token's literal text. Note that this is only
|
||||
// applicable for certain token types, such as token.IDENT,
|
||||
// token.STRING, etc..
|
||||
func (t Token) String() string {
|
||||
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
|
||||
}
|
||||
|
||||
// HCLToken converts this token to an HCL token.
|
||||
//
|
||||
// The token type must be a literal type or this will panic.
|
||||
func (t Token) HCLToken() hcltoken.Token {
|
||||
switch t.Type {
|
||||
case BOOL:
|
||||
return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
|
||||
case FLOAT:
|
||||
return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
|
||||
case NULL:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
|
||||
case NUMBER:
|
||||
return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
|
||||
case STRING:
|
||||
return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
|
||||
}
|
||||
}
|
||||
38
vendor/github.com/hashicorp/hcl/lex.go
generated
vendored
38
vendor/github.com/hashicorp/hcl/lex.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type lexModeValue byte
|
||||
|
||||
const (
|
||||
lexModeUnknown lexModeValue = iota
|
||||
lexModeHcl
|
||||
lexModeJson
|
||||
)
|
||||
|
||||
// lexMode returns whether we're going to be parsing in JSON
|
||||
// mode or HCL mode.
|
||||
func lexMode(v []byte) lexModeValue {
|
||||
var (
|
||||
r rune
|
||||
w int
|
||||
offset int
|
||||
)
|
||||
|
||||
for {
|
||||
r, w = utf8.DecodeRune(v[offset:])
|
||||
offset += w
|
||||
if unicode.IsSpace(r) {
|
||||
continue
|
||||
}
|
||||
if r == '{' {
|
||||
return lexModeJson
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return lexModeHcl
|
||||
}
|
||||
39
vendor/github.com/hashicorp/hcl/parse.go
generated
vendored
39
vendor/github.com/hashicorp/hcl/parse.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
package hcl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
hclParser "github.com/hashicorp/hcl/hcl/parser"
|
||||
jsonParser "github.com/hashicorp/hcl/json/parser"
|
||||
)
|
||||
|
||||
// ParseBytes accepts as input byte slice and returns ast tree.
|
||||
//
|
||||
// Input can be either JSON or HCL
|
||||
func ParseBytes(in []byte) (*ast.File, error) {
|
||||
return parse(in)
|
||||
}
|
||||
|
||||
// ParseString accepts input as a string and returns ast tree.
|
||||
func ParseString(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
||||
|
||||
func parse(in []byte) (*ast.File, error) {
|
||||
switch lexMode(in) {
|
||||
case lexModeHcl:
|
||||
return hclParser.Parse(in)
|
||||
case lexModeJson:
|
||||
return jsonParser.Parse(in)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown config format")
|
||||
}
|
||||
|
||||
// Parse parses the given input and returns the root object.
|
||||
//
|
||||
// The input format can be either HCL or JSON.
|
||||
func Parse(input string) (*ast.File, error) {
|
||||
return parse([]byte(input))
|
||||
}
|
||||
9
vendor/github.com/soniakeys/bits/.travis.yml
generated
vendored
9
vendor/github.com/soniakeys/bits/.travis.yml
generated
vendored
@@ -1,9 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go: master
|
||||
before_script:
|
||||
- go vet
|
||||
- go get github.com/client9/misspell/cmd/misspell
|
||||
- misspell -error *
|
||||
- go get github.com/soniakeys/vetc
|
||||
- vetc
|
||||
463
vendor/github.com/soniakeys/bits/bits.go
generated
vendored
463
vendor/github.com/soniakeys/bits/bits.go
generated
vendored
@@ -1,463 +0,0 @@
|
||||
// Copyright 2017 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
// Bits implements methods on a bit array type.
|
||||
//
|
||||
// The Bits type holds a fixed size array of bits, numbered consecutively
|
||||
// from zero. Some set-like operations are possible, but the API is more
|
||||
// array-like or register-like.
|
||||
package bits
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
mb "math/bits"
|
||||
)
|
||||
|
||||
// Bits holds a fixed number of bits.
|
||||
//
|
||||
// Bit number 0 is stored in the LSB, or bit 0, of the word indexed at 0.
|
||||
//
|
||||
// When Num is not a multiple of 64, the last element of Bits will hold some
|
||||
// bits beyond Num. These bits are undefined. They are not required to be
|
||||
// zero but do not have any meaning. Bits methods are not required to leave
|
||||
// them undisturbed.
|
||||
type Bits struct {
|
||||
Num int // number of bits
|
||||
Bits []uint64
|
||||
}
|
||||
|
||||
// New constructs a Bits value with the given number of bits.
|
||||
//
|
||||
// It panics if num is negative.
|
||||
func New(num int) Bits {
|
||||
if num < 0 {
|
||||
panic("negative number of bits")
|
||||
}
|
||||
return Bits{num, make([]uint64, (num+63)>>6)}
|
||||
}
|
||||
|
||||
// NewGivens constructs a Bits value with the given bits nums set to 1.
|
||||
//
|
||||
// The number of bits will be just enough to hold the largest bit value
|
||||
// listed. That is, the number of bits will be the max bit number plus one.
|
||||
//
|
||||
// It panics if any bit number is negative.
|
||||
func NewGivens(nums ...int) Bits {
|
||||
max := -1
|
||||
for _, p := range nums {
|
||||
if p > max {
|
||||
max = p
|
||||
}
|
||||
}
|
||||
b := New(max + 1)
|
||||
for _, p := range nums {
|
||||
b.SetBit(p, 1)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AllOnes returns true if all Num bits are 1.
|
||||
func (b Bits) AllOnes() bool {
|
||||
last := len(b.Bits) - 1
|
||||
for _, w := range b.Bits[:last] {
|
||||
if w != ^uint64(0) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ^b.Bits[last]<<uint(64*len(b.Bits)-b.Num) == 0
|
||||
}
|
||||
|
||||
// AllZeros returns true if all Num bits are 0.
|
||||
func (b Bits) AllZeros() bool {
|
||||
last := len(b.Bits) - 1
|
||||
for _, w := range b.Bits[:last] {
|
||||
if w != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return b.Bits[last]<<uint(64*len(b.Bits)-b.Num) == 0
|
||||
}
|
||||
|
||||
// And sets z = x & y.
|
||||
//
|
||||
// It panics if x and y do not have the same Num.
|
||||
func (z *Bits) And(x, y Bits) {
|
||||
if x.Num != y.Num {
|
||||
panic("arguments have different number of bits")
|
||||
}
|
||||
if z.Num != x.Num {
|
||||
*z = New(x.Num)
|
||||
}
|
||||
for i, w := range y.Bits {
|
||||
z.Bits[i] = x.Bits[i] & w
|
||||
}
|
||||
}
|
||||
|
||||
// AndNot sets z = x &^ y.
|
||||
//
|
||||
// It panics if x and y do not have the same Num.
|
||||
func (z *Bits) AndNot(x, y Bits) {
|
||||
if x.Num != y.Num {
|
||||
panic("arguments have different number of bits")
|
||||
}
|
||||
if z.Num != x.Num {
|
||||
*z = New(x.Num)
|
||||
}
|
||||
for i, w := range y.Bits {
|
||||
z.Bits[i] = x.Bits[i] &^ w
|
||||
}
|
||||
}
|
||||
|
||||
// Bit returns the value of the n'th bit of receiver b.
|
||||
func (b Bits) Bit(n int) int {
|
||||
if n < 0 || n >= b.Num {
|
||||
panic("bit number out of range")
|
||||
}
|
||||
return int(b.Bits[n>>6] >> uint(n&63) & 1)
|
||||
}
|
||||
|
||||
// ClearAll sets all bits to 0.
|
||||
func (b Bits) ClearAll() {
|
||||
for i := range b.Bits {
|
||||
b.Bits[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// ClearBits sets the given bits to 0 in receiver b.
|
||||
//
|
||||
// Other bits of b are left unchanged.
|
||||
//
|
||||
// It panics if any bit number is out of range.
|
||||
// That is, negative or >= the number of bits.
|
||||
func (b Bits) ClearBits(nums ...int) {
|
||||
for _, p := range nums {
|
||||
b.SetBit(p, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Equal returns true if all Num bits of a and b are equal.
|
||||
//
|
||||
// It panics if a and b have different Num.
|
||||
func (a Bits) Equal(b Bits) bool {
|
||||
if a.Num != b.Num {
|
||||
panic("receiver and argument have different number of bits")
|
||||
}
|
||||
if a.Num == 0 {
|
||||
return true
|
||||
}
|
||||
last := len(a.Bits) - 1
|
||||
for i, w := range a.Bits[:last] {
|
||||
if w != b.Bits[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return (a.Bits[last]^b.Bits[last])<<uint(len(a.Bits)*64-a.Num) == 0
|
||||
}
|
||||
|
||||
// IterateOnes calls visitor function v for each bit with a value of 1, in order
|
||||
// from lowest bit to highest bit.
|
||||
//
|
||||
// Iteration continues to the highest bit as long as v returns true.
|
||||
// It stops if v returns false.
|
||||
//
|
||||
// IterateOnes returns true normally. It returns false if v returns false.
|
||||
//
|
||||
// IterateOnes may not be sensitive to changes if bits are changed during
|
||||
// iteration, by the vistor function for example.
|
||||
// See OneFrom for an iteration method sensitive to changes during iteration.
|
||||
func (b Bits) IterateOnes(v func(int) bool) bool {
|
||||
for x, w := range b.Bits {
|
||||
if w != 0 {
|
||||
t := mb.TrailingZeros64(w)
|
||||
i := t // index in w of next 1 bit
|
||||
for {
|
||||
n := x<<6 | i
|
||||
if n >= b.Num {
|
||||
return true
|
||||
}
|
||||
if !v(x<<6 | i) {
|
||||
return false
|
||||
}
|
||||
w >>= uint(t + 1)
|
||||
if w == 0 {
|
||||
break
|
||||
}
|
||||
t = mb.TrailingZeros64(w)
|
||||
i += 1 + t
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IterateZeros calls visitor function v for each bit with a value of 0,
|
||||
// in order from lowest bit to highest bit.
|
||||
//
|
||||
// Iteration continues to the highest bit as long as v returns true.
|
||||
// It stops if v returns false.
|
||||
//
|
||||
// IterateZeros returns true normally. It returns false if v returns false.
|
||||
//
|
||||
// IterateZeros may not be sensitive to changes if bits are changed during
|
||||
// iteration, by the vistor function for example.
|
||||
// See ZeroFrom for an iteration method sensitive to changes during iteration.
|
||||
func (b Bits) IterateZeros(v func(int) bool) bool {
|
||||
for x, w := range b.Bits {
|
||||
w = ^w
|
||||
if w != 0 {
|
||||
t := mb.TrailingZeros64(w)
|
||||
i := t // index in w of next 1 bit
|
||||
for {
|
||||
n := x<<6 | i
|
||||
if n >= b.Num {
|
||||
return true
|
||||
}
|
||||
if !v(x<<6 | i) {
|
||||
return false
|
||||
}
|
||||
w >>= uint(t + 1)
|
||||
if w == 0 {
|
||||
break
|
||||
}
|
||||
t = mb.TrailingZeros64(w)
|
||||
i += 1 + t
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Not sets receiver z to the complement of b.
|
||||
func (z *Bits) Not(b Bits) {
|
||||
if z.Num != b.Num {
|
||||
*z = New(b.Num)
|
||||
}
|
||||
for i, w := range b.Bits {
|
||||
z.Bits[i] = ^w
|
||||
}
|
||||
}
|
||||
|
||||
// OneFrom returns the number of the first 1 bit at or after (from) bit num.
|
||||
//
|
||||
// It returns -1 if there is no one bit at or after num.
|
||||
//
|
||||
// This provides one way to iterate over one bits.
|
||||
// To iterate over the one bits, call OneFrom with n = 0 to get the the first
|
||||
// one bit, then call with the result + 1 to get successive one bits.
|
||||
// Unlike the Iterate method, this technique is stateless and so allows
|
||||
// bits to be changed between successive calls.
|
||||
//
|
||||
// There is no panic for calling OneFrom with an argument >= b.Num.
|
||||
// In this case OneFrom simply returns -1.
|
||||
//
|
||||
// See also Iterate.
|
||||
func (b Bits) OneFrom(num int) int {
|
||||
if num >= b.Num {
|
||||
return -1
|
||||
}
|
||||
x := num >> 6
|
||||
// test for 1 in this word at or after n
|
||||
if wx := b.Bits[x] >> uint(num&63); wx != 0 {
|
||||
num += mb.TrailingZeros64(wx)
|
||||
if num >= b.Num {
|
||||
return -1
|
||||
}
|
||||
return num
|
||||
}
|
||||
x++
|
||||
for y, wy := range b.Bits[x:] {
|
||||
if wy != 0 {
|
||||
num = (x+y)<<6 | mb.TrailingZeros64(wy)
|
||||
if num >= b.Num {
|
||||
return -1
|
||||
}
|
||||
return num
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Or sets z = x | y.
|
||||
//
|
||||
// It panics if x and y do not have the same Num.
|
||||
func (z *Bits) Or(x, y Bits) {
|
||||
if x.Num != y.Num {
|
||||
panic("arguments have different number of bits")
|
||||
}
|
||||
if z.Num != x.Num {
|
||||
*z = New(x.Num)
|
||||
}
|
||||
for i, w := range y.Bits {
|
||||
z.Bits[i] = x.Bits[i] | w
|
||||
}
|
||||
}
|
||||
|
||||
// OnesCount returns the number of 1 bits.
|
||||
func (b Bits) OnesCount() (c int) {
|
||||
if b.Num == 0 {
|
||||
return 0
|
||||
}
|
||||
last := len(b.Bits) - 1
|
||||
for _, w := range b.Bits[:last] {
|
||||
c += mb.OnesCount64(w)
|
||||
}
|
||||
c += mb.OnesCount64(b.Bits[last] << uint(len(b.Bits)*64-b.Num))
|
||||
return
|
||||
}
|
||||
|
||||
// Set sets the bits of z to the bits of x.
|
||||
func (z *Bits) Set(b Bits) {
|
||||
if z.Num != b.Num {
|
||||
*z = New(b.Num)
|
||||
}
|
||||
copy(z.Bits, b.Bits)
|
||||
}
|
||||
|
||||
// SetAll sets z to have all 1 bits.
|
||||
func (b Bits) SetAll() {
|
||||
for i := range b.Bits {
|
||||
b.Bits[i] = ^uint64(0)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBit sets the n'th bit to x, where x is a 0 or 1.
|
||||
//
|
||||
// It panics if n is out of range
|
||||
func (b Bits) SetBit(n, x int) {
|
||||
if n < 0 || n >= b.Num {
|
||||
panic("bit number out of range")
|
||||
}
|
||||
if x == 0 {
|
||||
b.Bits[n>>6] &^= 1 << uint(n&63)
|
||||
} else {
|
||||
b.Bits[n>>6] |= 1 << uint(n&63)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBits sets the given bits to 1 in receiver b.
|
||||
//
|
||||
// Other bits of b are left unchanged.
|
||||
//
|
||||
// It panics if any bit number is out of range, negative or >= the number
|
||||
// of bits.
|
||||
func (b Bits) SetBits(nums ...int) {
|
||||
for _, p := range nums {
|
||||
b.SetBit(p, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Single returns true if b has exactly one 1 bit.
|
||||
func (b Bits) Single() bool {
|
||||
// like OnesCount, but stop as soon as two are found
|
||||
if b.Num == 0 {
|
||||
return false
|
||||
}
|
||||
c := 0
|
||||
last := len(b.Bits) - 1
|
||||
for _, w := range b.Bits[:last] {
|
||||
c += mb.OnesCount64(w)
|
||||
if c > 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
c += mb.OnesCount64(b.Bits[last] << uint(len(b.Bits)*64-b.Num))
|
||||
return c == 1
|
||||
}
|
||||
|
||||
// Slice returns a slice with the bit numbers of each 1 bit.
|
||||
func (b Bits) Slice() (s []int) {
|
||||
for x, w := range b.Bits {
|
||||
if w == 0 {
|
||||
continue
|
||||
}
|
||||
t := mb.TrailingZeros64(w)
|
||||
i := t // index in w of next 1 bit
|
||||
for {
|
||||
n := x<<6 | i
|
||||
if n >= b.Num {
|
||||
break
|
||||
}
|
||||
s = append(s, n)
|
||||
w >>= uint(t + 1)
|
||||
if w == 0 {
|
||||
break
|
||||
}
|
||||
t = mb.TrailingZeros64(w)
|
||||
i += 1 + t
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String returns a readable representation.
|
||||
//
|
||||
// The returned string is big-endian, with the highest number bit first.
|
||||
//
|
||||
// If Num is 0, an empty string is returned.
|
||||
func (b Bits) String() (s string) {
|
||||
if b.Num == 0 {
|
||||
return ""
|
||||
}
|
||||
last := len(b.Bits) - 1
|
||||
for _, w := range b.Bits[:last] {
|
||||
s = fmt.Sprintf("%064b", w) + s
|
||||
}
|
||||
lb := b.Num - 64*last
|
||||
return fmt.Sprintf("%0*b", lb,
|
||||
b.Bits[last]&(^uint64(0)>>uint(64-lb))) + s
|
||||
}
|
||||
|
||||
// Xor sets z = x ^ y.
|
||||
func (z *Bits) Xor(x, y Bits) {
|
||||
if x.Num != y.Num {
|
||||
panic("arguments have different number of bits")
|
||||
}
|
||||
if z.Num != x.Num {
|
||||
*z = New(x.Num)
|
||||
}
|
||||
for i, w := range y.Bits {
|
||||
z.Bits[i] = x.Bits[i] ^ w
|
||||
}
|
||||
}
|
||||
|
||||
// ZeroFrom returns the number of the first 0 bit at or after (from) bit num.
|
||||
//
|
||||
// It returns -1 if there is no zero bit at or after num.
|
||||
//
|
||||
// This provides one way to iterate over zero bits.
|
||||
// To iterate over the zero bits, call ZeroFrom with n = 0 to get the the first
|
||||
// zero bit, then call with the result + 1 to get successive zero bits.
|
||||
// Unlike the IterateZeros method, this technique is stateless and so allows
|
||||
// bits to be changed between successive calls.
|
||||
//
|
||||
// There is no panic for calling ZeroFrom with an argument >= b.Num.
|
||||
// In this case ZeroFrom simply returns -1.
|
||||
//
|
||||
// See also IterateZeros.
|
||||
func (b Bits) ZeroFrom(num int) int {
|
||||
// code much like OneFrom except words are negated before testing
|
||||
if num >= b.Num {
|
||||
return -1
|
||||
}
|
||||
x := num >> 6
|
||||
// negate word to test for 0 at or after n
|
||||
if wx := ^b.Bits[x] >> uint(num&63); wx != 0 {
|
||||
num += mb.TrailingZeros64(wx)
|
||||
if num >= b.Num {
|
||||
return -1
|
||||
}
|
||||
return num
|
||||
}
|
||||
x++
|
||||
for y, wy := range b.Bits[x:] {
|
||||
wy = ^wy
|
||||
if wy != 0 {
|
||||
num = (x+y)<<6 | mb.TrailingZeros64(wy)
|
||||
if num >= b.Num {
|
||||
return -1
|
||||
}
|
||||
return num
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
1
vendor/github.com/soniakeys/bits/go.mod
generated
vendored
1
vendor/github.com/soniakeys/bits/go.mod
generated
vendored
@@ -1 +0,0 @@
|
||||
module "github.com/soniakeys/bits"
|
||||
38
vendor/github.com/soniakeys/bits/readme.adoc
generated
vendored
38
vendor/github.com/soniakeys/bits/readme.adoc
generated
vendored
@@ -1,38 +0,0 @@
|
||||
= Bits
|
||||
|
||||
Bits provides methods on a bit array type.
|
||||
|
||||
The Bits type holds a fixed size array of bits, numbered consecutively
|
||||
from zero. Some set-like operations are possible, but the API is more
|
||||
array-like or register-like.
|
||||
|
||||
image:https://godoc.org/github.com/soniakeys/bits?status.svg[link=https://godoc.org/github.com/soniakeys/bits] image:https://travis-ci.org/soniakeys/bits.svg[link=https://travis-ci.org/soniakeys/bits]
|
||||
|
||||
== Motivation and history
|
||||
|
||||
This package evolved from needs of my library of
|
||||
https://github.com/soniakeys/graph[graph algorithms]. For graph algorithms
|
||||
a common need is to store a single bit of information per node in a way that
|
||||
is both fast and memory efficient. I began by using `big.Int` from the standard
|
||||
library, then wrapped big.Int in a type. From time to time I considered
|
||||
other publicly available bit array or bit set packages, such as Will
|
||||
Fitzgerald's popular https://github.com/willf/bitset[bitset], but there were
|
||||
always little reasons I preferred my own type and methods. My type that
|
||||
wrapped `big.Int` met my needs until some simple benchmarks indicated it
|
||||
might be causing performance problems. Some further experiments supported
|
||||
this hypothesis so I ran further tests with a prototype bit array written
|
||||
from scratch. Then satisfied that my custom bit array was solving the graph
|
||||
performance problems, I decided to move it to a separate package with the
|
||||
idea it might have more general utility. For the initial version of this
|
||||
package I did the following:
|
||||
|
||||
- implemented a few tests to demonstrate fundamental correctness
|
||||
- brought over most methods of my type that wrapped big.Int
|
||||
- changed the index type from the graph-specific node index to a general `int`
|
||||
- replaced some custom bit-twiddling with use of the new `math/bits` package
|
||||
in the standard library
|
||||
- renamed a few methods for clarity
|
||||
- added a few methods for symmetry
|
||||
- added a few new methods I had seen a need for in my graph library
|
||||
- added doc, examples, tests, and more tests for 100% coverage
|
||||
- added this readme
|
||||
2
vendor/github.com/soniakeys/graph/.gitignore
generated
vendored
2
vendor/github.com/soniakeys/graph/.gitignore
generated
vendored
@@ -1,2 +0,0 @@
|
||||
*.dot
|
||||
anecdote/anecdote
|
||||
11
vendor/github.com/soniakeys/graph/.travis.yml
generated
vendored
11
vendor/github.com/soniakeys/graph/.travis.yml
generated
vendored
@@ -1,11 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- "1.9.x"
|
||||
- master
|
||||
before_script:
|
||||
- go tool vet -composites=false -printf=false -shift=false .
|
||||
- go get github.com/client9/misspell/cmd/misspell
|
||||
- go get github.com/soniakeys/vetc
|
||||
- misspell -error * */* */*/*
|
||||
- vetc
|
||||
406
vendor/github.com/soniakeys/graph/adj.go
generated
vendored
406
vendor/github.com/soniakeys/graph/adj.go
generated
vendored
@@ -1,406 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: https://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
// adj.go contains methods on AdjacencyList and LabeledAdjacencyList.
|
||||
//
|
||||
// AdjacencyList methods are placed first and are alphabetized.
|
||||
// LabeledAdjacencyList methods follow, also alphabetized.
|
||||
// Only exported methods need be alphabetized; non-exported methods can
|
||||
// be left near their use.
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
// NI is a "node int"
|
||||
//
|
||||
// It is a node number or node ID. NIs are used extensively as slice indexes.
|
||||
// NIs typically account for a significant fraction of the memory footprint of
|
||||
// a graph.
|
||||
type NI int32
|
||||
|
||||
// AnyParallel identifies if a graph contains parallel arcs, multiple arcs
|
||||
// that lead from a node to the same node.
|
||||
//
|
||||
// If the graph has parallel arcs, the results fr and to represent an example
|
||||
// where there are parallel arcs from node `fr` to node `to`.
|
||||
//
|
||||
// If there are no parallel arcs, the method returns false -1 -1.
|
||||
//
|
||||
// Multiple loops on a node count as parallel arcs.
|
||||
//
|
||||
// See also alt.AnyParallelMap, which can perform better for some large
|
||||
// or dense graphs.
|
||||
func (g AdjacencyList) AnyParallel() (has bool, fr, to NI) {
|
||||
var t []NI
|
||||
for n, to := range g {
|
||||
if len(to) == 0 {
|
||||
continue
|
||||
}
|
||||
// different code in the labeled version, so no code gen.
|
||||
t = append(t[:0], to...)
|
||||
sort.Slice(t, func(i, j int) bool { return t[i] < t[j] })
|
||||
t0 := t[0]
|
||||
for _, to := range t[1:] {
|
||||
if to == t0 {
|
||||
return true, NI(n), t0
|
||||
}
|
||||
t0 = to
|
||||
}
|
||||
}
|
||||
return false, -1, -1
|
||||
}
|
||||
|
||||
// Complement returns the arc-complement of a simple graph.
|
||||
//
|
||||
// The result will have an arc for every pair of distinct nodes where there
|
||||
// is not an arc in g. The complement is valid for both directed and
|
||||
// undirected graphs. If g is undirected, the complement will be undirected.
|
||||
// The result will always be a simple graph, having no loops or parallel arcs.
|
||||
func (g AdjacencyList) Complement() AdjacencyList {
|
||||
c := make(AdjacencyList, len(g))
|
||||
b := bits.New(len(g))
|
||||
for n, to := range g {
|
||||
b.ClearAll()
|
||||
for _, to := range to {
|
||||
b.SetBit(int(to), 1)
|
||||
}
|
||||
b.SetBit(n, 1)
|
||||
ct := make([]NI, len(g)-b.OnesCount())
|
||||
i := 0
|
||||
b.IterateZeros(func(to int) bool {
|
||||
ct[i] = NI(to)
|
||||
i++
|
||||
return true
|
||||
})
|
||||
c[n] = ct
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// IsUndirected returns true if g represents an undirected graph.
|
||||
//
|
||||
// Returns true when all non-loop arcs are paired in reciprocal pairs.
|
||||
// Otherwise returns false and an example unpaired arc.
|
||||
func (g AdjacencyList) IsUndirected() (u bool, from, to NI) {
|
||||
// similar code in dot/writeUndirected
|
||||
unpaired := make(AdjacencyList, len(g))
|
||||
for fr, to := range g {
|
||||
arc: // for each arc in g
|
||||
for _, to := range to {
|
||||
if to == NI(fr) {
|
||||
continue // loop
|
||||
}
|
||||
// search unpaired arcs
|
||||
ut := unpaired[to]
|
||||
for i, u := range ut {
|
||||
if u == NI(fr) { // found reciprocal
|
||||
last := len(ut) - 1
|
||||
ut[i] = ut[last]
|
||||
unpaired[to] = ut[:last]
|
||||
continue arc
|
||||
}
|
||||
}
|
||||
// reciprocal not found
|
||||
unpaired[fr] = append(unpaired[fr], to)
|
||||
}
|
||||
}
|
||||
for fr, to := range unpaired {
|
||||
if len(to) > 0 {
|
||||
return false, NI(fr), to[0]
|
||||
}
|
||||
}
|
||||
return true, -1, -1
|
||||
}
|
||||
|
||||
// SortArcLists sorts the arc lists of each node of receiver g.
|
||||
//
|
||||
// Nodes are not relabeled and the graph remains equivalent.
|
||||
func (g AdjacencyList) SortArcLists() {
|
||||
for _, to := range g {
|
||||
sort.Slice(to, func(i, j int) bool { return to[i] < to[j] })
|
||||
}
|
||||
}
|
||||
|
||||
// ------- Labeled methods below -------
|
||||
|
||||
// ArcsAsEdges constructs an edge list with an edge for each arc, including
|
||||
// reciprocals.
|
||||
//
|
||||
// This is a simple way to construct an edge list for algorithms that allow
|
||||
// the duplication represented by the reciprocal arcs. (e.g. Kruskal)
|
||||
//
|
||||
// See also LabeledUndirected.Edges for the edge list without this duplication.
|
||||
func (g LabeledAdjacencyList) ArcsAsEdges() (el []LabeledEdge) {
|
||||
for fr, to := range g {
|
||||
for _, to := range to {
|
||||
el = append(el, LabeledEdge{Edge{NI(fr), to.To}, to.Label})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DistanceMatrix constructs a distance matrix corresponding to the arcs
|
||||
// of graph g and weight function w.
|
||||
//
|
||||
// An arc from f to t with WeightFunc return w is represented by d[f][t] == w.
|
||||
// In case of parallel arcs, the lowest weight is stored. The distance from
|
||||
// any node to itself d[n][n] is 0, unless the node has a loop with a negative
|
||||
// weight. If g has no arc from f to distinct t, +Inf is stored for d[f][t].
|
||||
//
|
||||
// The returned DistanceMatrix is suitable for DistanceMatrix.FloydWarshall.
|
||||
func (g LabeledAdjacencyList) DistanceMatrix(w WeightFunc) (d DistanceMatrix) {
|
||||
d = newDM(len(g))
|
||||
for fr, to := range g {
|
||||
for _, to := range to {
|
||||
// < to pick min of parallel arcs (also nicely ignores NaN)
|
||||
if wt := w(to.Label); wt < d[fr][to.To] {
|
||||
d[fr][to.To] = wt
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasArcLabel returns true if g has any arc from node `fr` to node `to`
|
||||
// with label `l`.
|
||||
//
|
||||
// Also returned is the index within the slice of arcs from node `fr`.
|
||||
// If no arc from `fr` to `to` with label `l` is present, HasArcLabel returns
|
||||
// false, -1.
|
||||
func (g LabeledAdjacencyList) HasArcLabel(fr, to NI, l LI) (bool, int) {
|
||||
t := Half{to, l}
|
||||
for x, h := range g[fr] {
|
||||
if h == t {
|
||||
return true, x
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
// AnyParallel identifies if a graph contains parallel arcs, multiple arcs
|
||||
// that lead from a node to the same node.
|
||||
//
|
||||
// If the graph has parallel arcs, the results fr and to represent an example
|
||||
// where there are parallel arcs from node `fr` to node `to`.
|
||||
//
|
||||
// If there are no parallel arcs, the method returns -1 -1.
|
||||
//
|
||||
// Multiple loops on a node count as parallel arcs.
|
||||
//
|
||||
// See also alt.AnyParallelMap, which can perform better for some large
|
||||
// or dense graphs.
|
||||
func (g LabeledAdjacencyList) AnyParallel() (has bool, fr, to NI) {
|
||||
var t []NI
|
||||
for n, to := range g {
|
||||
if len(to) == 0 {
|
||||
continue
|
||||
}
|
||||
// slightly different code needed here compared to AdjacencyList
|
||||
t = t[:0]
|
||||
for _, to := range to {
|
||||
t = append(t, to.To)
|
||||
}
|
||||
sort.Slice(t, func(i, j int) bool { return t[i] < t[j] })
|
||||
t0 := t[0]
|
||||
for _, to := range t[1:] {
|
||||
if to == t0 {
|
||||
return true, NI(n), t0
|
||||
}
|
||||
t0 = to
|
||||
}
|
||||
}
|
||||
return false, -1, -1
|
||||
}
|
||||
|
||||
// AnyParallelLabel identifies if a graph contains parallel arcs with the same
|
||||
// label.
|
||||
//
|
||||
// If the graph has parallel arcs with the same label, the results fr and to
|
||||
// represent an example where there are parallel arcs from node `fr`
|
||||
// to node `to`.
|
||||
//
|
||||
// If there are no parallel arcs, the method returns false -1 Half{}.
|
||||
//
|
||||
// Multiple loops on a node count as parallel arcs.
|
||||
func (g LabeledAdjacencyList) AnyParallelLabel() (has bool, fr NI, to Half) {
|
||||
var t []Half
|
||||
for n, to := range g {
|
||||
if len(to) == 0 {
|
||||
continue
|
||||
}
|
||||
// slightly different code needed here compared to AdjacencyList
|
||||
t = t[:0]
|
||||
for _, to := range to {
|
||||
t = append(t, to)
|
||||
}
|
||||
sort.Slice(t, func(i, j int) bool {
|
||||
return t[i].To < t[j].To ||
|
||||
t[i].To == t[j].To && t[i].Label < t[j].Label
|
||||
})
|
||||
t0 := t[0]
|
||||
for _, to := range t[1:] {
|
||||
if to == t0 {
|
||||
return true, NI(n), t0
|
||||
}
|
||||
t0 = to
|
||||
}
|
||||
}
|
||||
return false, -1, Half{}
|
||||
}
|
||||
|
||||
// IsUndirected returns true if g represents an undirected graph.
|
||||
//
|
||||
// Returns true when all non-loop arcs are paired in reciprocal pairs with
|
||||
// matching labels. Otherwise returns false and an example unpaired arc.
|
||||
//
|
||||
// Note the requirement that reciprocal pairs have matching labels is
|
||||
// an additional test not present in the otherwise equivalent unlabeled version
|
||||
// of IsUndirected.
|
||||
func (g LabeledAdjacencyList) IsUndirected() (u bool, from NI, to Half) {
|
||||
// similar code in LabeledAdjacencyList.Edges
|
||||
unpaired := make(LabeledAdjacencyList, len(g))
|
||||
for fr, to := range g {
|
||||
arc: // for each arc in g
|
||||
for _, to := range to {
|
||||
if to.To == NI(fr) {
|
||||
continue // loop
|
||||
}
|
||||
// search unpaired arcs
|
||||
ut := unpaired[to.To]
|
||||
for i, u := range ut {
|
||||
if u.To == NI(fr) && u.Label == to.Label { // found reciprocal
|
||||
last := len(ut) - 1
|
||||
ut[i] = ut[last]
|
||||
unpaired[to.To] = ut[:last]
|
||||
continue arc
|
||||
}
|
||||
}
|
||||
// reciprocal not found
|
||||
unpaired[fr] = append(unpaired[fr], to)
|
||||
}
|
||||
}
|
||||
for fr, to := range unpaired {
|
||||
if len(to) > 0 {
|
||||
return false, NI(fr), to[0]
|
||||
}
|
||||
}
|
||||
return true, -1, to
|
||||
}
|
||||
|
||||
// ArcLabels constructs the multiset of LIs present in g.
|
||||
func (g LabeledAdjacencyList) ArcLabels() map[LI]int {
|
||||
s := map[LI]int{}
|
||||
for _, to := range g {
|
||||
for _, to := range to {
|
||||
s[to.Label]++
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NegativeArc returns true if the receiver graph contains a negative arc.
|
||||
func (g LabeledAdjacencyList) NegativeArc(w WeightFunc) bool {
|
||||
for _, nbs := range g {
|
||||
for _, nb := range nbs {
|
||||
if w(nb.Label) < 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParallelArcsLabel identifies all arcs from node `fr` to node `to` with label `l`.
|
||||
//
|
||||
// The returned slice contains an element for each arc from node `fr` to node `to`
|
||||
// with label `l`. The element value is the index within the slice of arcs from node
|
||||
// `fr`.
|
||||
//
|
||||
// See also the method HasArcLabel, which stops after finding a single arc.
|
||||
func (g LabeledAdjacencyList) ParallelArcsLabel(fr, to NI, l LI) (p []int) {
|
||||
t := Half{to, l}
|
||||
for x, h := range g[fr] {
|
||||
if h == t {
|
||||
p = append(p, x)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Unlabeled constructs the unlabeled graph corresponding to g.
|
||||
func (g LabeledAdjacencyList) Unlabeled() AdjacencyList {
|
||||
a := make(AdjacencyList, len(g))
|
||||
for n, nbs := range g {
|
||||
to := make([]NI, len(nbs))
|
||||
for i, nb := range nbs {
|
||||
to[i] = nb.To
|
||||
}
|
||||
a[n] = to
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// WeightedArcsAsEdges constructs a WeightedEdgeList object from the receiver.
|
||||
//
|
||||
// Internally it calls g.ArcsAsEdges() to obtain the Edges member.
|
||||
// See LabeledAdjacencyList.ArcsAsEdges().
|
||||
func (g LabeledAdjacencyList) WeightedArcsAsEdges(w WeightFunc) *WeightedEdgeList {
|
||||
return &WeightedEdgeList{
|
||||
Order: g.Order(),
|
||||
WeightFunc: w,
|
||||
Edges: g.ArcsAsEdges(),
|
||||
}
|
||||
}
|
||||
|
||||
// WeightedInDegree computes the weighted in-degree of each node in g
|
||||
// for a given weight function w.
|
||||
//
|
||||
// The weighted in-degree of a node is the sum of weights of arcs going to
|
||||
// the node.
|
||||
//
|
||||
// A weighted degree of a node is often termed the "strength" of a node.
|
||||
//
|
||||
// See note for undirected graphs at LabeledAdjacencyList.WeightedOutDegree.
|
||||
func (g LabeledAdjacencyList) WeightedInDegree(w WeightFunc) []float64 {
|
||||
ind := make([]float64, len(g))
|
||||
for _, to := range g {
|
||||
for _, to := range to {
|
||||
ind[to.To] += w(to.Label)
|
||||
}
|
||||
}
|
||||
return ind
|
||||
}
|
||||
|
||||
// WeightedOutDegree computes the weighted out-degree of the specified node
|
||||
// for a given weight function w.
|
||||
//
|
||||
// The weighted out-degree of a node is the sum of weights of arcs going from
|
||||
// the node.
|
||||
//
|
||||
// A weighted degree of a node is often termed the "strength" of a node.
|
||||
//
|
||||
// Note for undirected graphs, the WeightedOutDegree result for a node will
|
||||
// equal the WeightedInDegree for the node. You can use WeightedInDegree if
|
||||
// you have need for the weighted degrees of all nodes or use WeightedOutDegree
|
||||
// to compute the weighted degrees of individual nodes. In either case loops
|
||||
// are counted just once, unlike the (unweighted) UndirectedDegree methods.
|
||||
func (g LabeledAdjacencyList) WeightedOutDegree(n NI, w WeightFunc) (d float64) {
|
||||
for _, to := range g[n] {
|
||||
d += w(to.Label)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// More about loops and strength: I didn't see consensus on this especially
|
||||
// in the case of undirected graphs. Some sources said to add in-degree and
|
||||
// out-degree, which would seemingly double both loops and non-loops.
|
||||
// Some said to double loops. Some said sum the edge weights and had no
|
||||
// comment on loops. R of course makes everything an option. The meaning
|
||||
// of "strength" where loops exist is unclear. So while I could write an
|
||||
// UndirectedWeighted degree function that doubles loops but not edges,
|
||||
// I'm going to just leave this for now.
|
||||
417
vendor/github.com/soniakeys/graph/adj_RO.go
generated
vendored
417
vendor/github.com/soniakeys/graph/adj_RO.go
generated
vendored
@@ -1,417 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
// adj_RO.go is code generated from adj_cg.go by directives in graph.go.
|
||||
// Editing adj_cg.go is okay.
|
||||
// DO NOT EDIT adj_RO.go. The RO is for Read Only.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
// ArcDensity returns density for an simple directed graph.
|
||||
//
|
||||
// See also ArcDensity function.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) ArcDensity() float64 {
|
||||
return ArcDensity(len(g), g.ArcSize())
|
||||
}
|
||||
|
||||
// ArcSize returns the number of arcs in g.
|
||||
//
|
||||
// Note that for an undirected graph without loops, the number of undirected
|
||||
// edges -- the traditional meaning of graph size -- will be ArcSize()/2.
|
||||
// On the other hand, if g is an undirected graph that has or may have loops,
|
||||
// g.ArcSize()/2 is not a meaningful quantity.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) ArcSize() int {
|
||||
m := 0
|
||||
for _, to := range g {
|
||||
m += len(to)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// BoundsOk validates that all arcs in g stay within the slice bounds of g.
|
||||
//
|
||||
// BoundsOk returns true when no arcs point outside the bounds of g.
|
||||
// Otherwise it returns false and an example arc that points outside of g.
|
||||
//
|
||||
// Most methods of this package assume the BoundsOk condition and may
|
||||
// panic when they encounter an arc pointing outside of the graph. This
|
||||
// function can be used to validate a graph when the BoundsOk condition
|
||||
// is unknown.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) BoundsOk() (ok bool, fr NI, to NI) {
|
||||
for fr, to := range g {
|
||||
for _, to := range to {
|
||||
if to < 0 || to >= NI(len(g)) {
|
||||
return false, NI(fr), to
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, -1, to
|
||||
}
|
||||
|
||||
// BreadthFirst traverses a directed or undirected graph in breadth
|
||||
// first order.
|
||||
//
|
||||
// Traversal starts at node start and visits the nodes reachable from
|
||||
// start. The function visit is called for each node visited. Nodes
|
||||
// not reachable from start are not visited.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also alt.BreadthFirst, a variant with more options, and
|
||||
// alt.BreadthFirst2, a direction optimizing variant.
|
||||
func (g AdjacencyList) BreadthFirst(start NI, visit func(NI)) {
|
||||
v := bits.New(len(g))
|
||||
v.SetBit(int(start), 1)
|
||||
visit(start)
|
||||
var next []NI
|
||||
for frontier := []NI{start}; len(frontier) > 0; {
|
||||
for _, n := range frontier {
|
||||
for _, nb := range g[n] {
|
||||
if v.Bit(int(nb)) == 0 {
|
||||
v.SetBit(int(nb), 1)
|
||||
visit(nb)
|
||||
next = append(next, nb)
|
||||
}
|
||||
}
|
||||
}
|
||||
frontier, next = next, frontier[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Copy makes a deep copy of g.
|
||||
// Copy also computes the arc size ma, the number of arcs.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) Copy() (c AdjacencyList, ma int) {
|
||||
c = make(AdjacencyList, len(g))
|
||||
for n, to := range g {
|
||||
c[n] = append([]NI{}, to...)
|
||||
ma += len(to)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DepthFirst traverses a directed or undirected graph in depth
|
||||
// first order.
|
||||
//
|
||||
// Traversal starts at node start and visits the nodes reachable from
|
||||
// start. The function visit is called for each node visited. Nodes
|
||||
// not reachable from start are not visited.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also alt.DepthFirst, a variant with more options.
|
||||
func (g AdjacencyList) DepthFirst(start NI, visit func(NI)) {
|
||||
v := bits.New(len(g))
|
||||
var f func(NI)
|
||||
f = func(n NI) {
|
||||
visit(n)
|
||||
v.SetBit(int(n), 1)
|
||||
for _, to := range g[n] {
|
||||
if v.Bit(int(to)) == 0 {
|
||||
f(to)
|
||||
}
|
||||
}
|
||||
}
|
||||
f(start)
|
||||
}
|
||||
|
||||
// HasArc returns true if g has any arc from node `fr` to node `to`.
|
||||
//
|
||||
// Also returned is the index within the slice of arcs from node `fr`.
|
||||
// If no arc from `fr` to `to` is present, HasArc returns false, -1.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also the method ParallelArcs, which finds all parallel arcs from
|
||||
// `fr` to `to`.
|
||||
func (g AdjacencyList) HasArc(fr, to NI) (bool, int) {
|
||||
for x, h := range g[fr] {
|
||||
if h == to {
|
||||
return true, x
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
// AnyLoop identifies if a graph contains a loop, an arc that leads from a
|
||||
// a node back to the same node.
|
||||
//
|
||||
// If g contains a loop, the method returns true and an example of a node
|
||||
// with a loop. If there are no loops in g, the method returns false, -1.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) AnyLoop() (bool, NI) {
|
||||
for fr, to := range g {
|
||||
for _, to := range to {
|
||||
if NI(fr) == to {
|
||||
return true, to
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
// AddNode maps a node in a supergraph to a subgraph node.
|
||||
//
|
||||
// Argument p must be an NI in supergraph s.Super. AddNode panics if
|
||||
// p is not a valid node index of s.Super.
|
||||
//
|
||||
// AddNode is idempotent in that it does not add a new node to the subgraph if
|
||||
// a subgraph node already exists mapped to supergraph node p.
|
||||
//
|
||||
// The mapped subgraph NI is returned.
|
||||
func (s *Subgraph) AddNode(p NI) (b NI) {
|
||||
if int(p) < 0 || int(p) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph"))
|
||||
}
|
||||
if b, ok := s.SubNI[p]; ok {
|
||||
return b
|
||||
}
|
||||
a := s.AdjacencyList
|
||||
b = NI(len(a))
|
||||
s.AdjacencyList = append(a, nil)
|
||||
s.SuperNI = append(s.SuperNI, p)
|
||||
s.SubNI[p] = b
|
||||
return
|
||||
}
|
||||
|
||||
// AddArc adds an arc to a subgraph.
|
||||
//
|
||||
// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode,
|
||||
// AddArc panics if fr and to are not valid node indexes of s.Super.
|
||||
//
|
||||
// The arc specfied by fr, to must exist in s.Super. Further, the number of
|
||||
// parallel arcs in the subgraph cannot exceed the number of corresponding
|
||||
// parallel arcs in the supergraph. That is, each arc already added to the
|
||||
// subgraph counts against the arcs available in the supergraph. If a matching
|
||||
// arc is not available, AddArc returns an error.
|
||||
//
|
||||
// If a matching arc is available, subgraph nodes are added as needed, the
|
||||
// subgraph arc is added, and the method returns nil.
|
||||
func (s *Subgraph) AddArc(fr NI, to NI) error {
|
||||
// verify supergraph NIs first, but without adding subgraph nodes just yet.
|
||||
if int(fr) < 0 || int(fr) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph"))
|
||||
}
|
||||
if int(to) < 0 || int(to) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddArc: NI ", to, " not in supergraph"))
|
||||
}
|
||||
// count existing matching arcs in subgraph
|
||||
n := 0
|
||||
a := s.AdjacencyList
|
||||
if bf, ok := s.SubNI[fr]; ok {
|
||||
if bt, ok := s.SubNI[to]; ok {
|
||||
// both NIs already exist in subgraph, need to count arcs
|
||||
bTo := to
|
||||
bTo = bt
|
||||
for _, t := range a[bf] {
|
||||
if t == bTo {
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// verify matching arcs are available in supergraph
|
||||
for _, t := range (*s.Super)[fr] {
|
||||
if t == to {
|
||||
if n > 0 {
|
||||
n-- // match existing arc
|
||||
continue
|
||||
}
|
||||
// no more existing arcs need to be matched. nodes can finally
|
||||
// be added as needed and then the arc can be added.
|
||||
bf := s.AddNode(fr)
|
||||
to = s.AddNode(to)
|
||||
s.AdjacencyList[bf] = append(s.AdjacencyList[bf], to)
|
||||
return nil // success
|
||||
}
|
||||
}
|
||||
return errors.New("arc not available in supergraph")
|
||||
}
|
||||
|
||||
func (super AdjacencyList) induceArcs(sub map[NI]NI, sup []NI) AdjacencyList {
|
||||
s := make(AdjacencyList, len(sup))
|
||||
for b, p := range sup {
|
||||
var a []NI
|
||||
for _, to := range super[p] {
|
||||
if bt, ok := sub[to]; ok {
|
||||
to = bt
|
||||
a = append(a, to)
|
||||
}
|
||||
}
|
||||
s[b] = a
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// InduceList constructs a node-induced subgraph.
|
||||
//
|
||||
// The subgraph is induced on receiver graph g. Argument l must be a list of
|
||||
// NIs in receiver graph g. Receiver g becomes the supergraph of the induced
|
||||
// subgraph.
|
||||
//
|
||||
// Duplicate NIs are allowed in list l. The duplicates are effectively removed
|
||||
// and only a single corresponding node is created in the subgraph. Subgraph
|
||||
// NIs are mapped in the order of list l, execpt for ignoring duplicates.
|
||||
// NIs in l that are not in g will panic.
|
||||
//
|
||||
// Returned is the constructed Subgraph object containing the induced subgraph
|
||||
// and the mappings to the supergraph.
|
||||
func (g *AdjacencyList) InduceList(l []NI) *Subgraph {
|
||||
sub, sup := mapList(l)
|
||||
return &Subgraph{
|
||||
Super: g,
|
||||
SubNI: sub,
|
||||
SuperNI: sup,
|
||||
|
||||
AdjacencyList: g.induceArcs(sub, sup)}
|
||||
}
|
||||
|
||||
// InduceBits constructs a node-induced subgraph.
|
||||
//
|
||||
// The subgraph is induced on receiver graph g. Argument t must be a bitmap
|
||||
// representing NIs in receiver graph g. Receiver g becomes the supergraph
|
||||
// of the induced subgraph. NIs in t that are not in g will panic.
|
||||
//
|
||||
// Returned is the constructed Subgraph object containing the induced subgraph
|
||||
// and the mappings to the supergraph.
|
||||
func (g *AdjacencyList) InduceBits(t bits.Bits) *Subgraph {
|
||||
sub, sup := mapBits(t)
|
||||
return &Subgraph{
|
||||
Super: g,
|
||||
SubNI: sub,
|
||||
SuperNI: sup,
|
||||
|
||||
AdjacencyList: g.induceArcs(sub, sup)}
|
||||
}
|
||||
|
||||
// IsSimple checks for loops and parallel arcs.
|
||||
//
|
||||
// A graph is "simple" if it has no loops or parallel arcs.
|
||||
//
|
||||
// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is
|
||||
// found, simple returns false and a node that represents a counterexample
|
||||
// to the graph being simple.
|
||||
//
|
||||
// See also separate methods AnyLoop and AnyParallel.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) IsSimple() (ok bool, n NI) {
|
||||
if lp, n := g.AnyLoop(); lp {
|
||||
return false, n
|
||||
}
|
||||
if pa, n, _ := g.AnyParallel(); pa {
|
||||
return false, n
|
||||
}
|
||||
return true, -1
|
||||
}
|
||||
|
||||
// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g.
|
||||
//
|
||||
// An isolated node is one with no arcs going to or from it.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) IsolatedNodes() (i bits.Bits) {
|
||||
i = bits.New(len(g))
|
||||
i.SetAll()
|
||||
for fr, to := range g {
|
||||
if len(to) > 0 {
|
||||
i.SetBit(fr, 0)
|
||||
for _, to := range to {
|
||||
i.SetBit(int(to), 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Order is the number of nodes in receiver g.
|
||||
//
|
||||
// It is simply a wrapper method for the Go builtin len().
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) Order() int {
|
||||
// Why a wrapper for len()? Mostly for Directed and Undirected.
|
||||
// u.Order() is a little nicer than len(u.LabeledAdjacencyList).
|
||||
return len(g)
|
||||
}
|
||||
|
||||
// ParallelArcs identifies all arcs from node `fr` to node `to`.
|
||||
//
|
||||
// The returned slice contains an element for each arc from node `fr` to node `to`.
|
||||
// The element value is the index within the slice of arcs from node `fr`.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also the method HasArc, which stops after finding a single arc.
|
||||
func (g AdjacencyList) ParallelArcs(fr, to NI) (p []int) {
|
||||
for x, h := range g[fr] {
|
||||
if h == to {
|
||||
p = append(p, x)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Permute permutes the node labeling of receiver g.
|
||||
//
|
||||
// Argument p must be a permutation of the node numbers of the graph,
|
||||
// 0 through len(g)-1. A permutation returned by rand.Perm(len(g)) for
|
||||
// example is acceptable.
|
||||
//
|
||||
// The graph is permuted in place. The graph keeps the same underlying
|
||||
// memory but values of the graph representation are permuted to produce
|
||||
// an isomorphic graph. The node previously labeled 0 becomes p[0] and so on.
|
||||
// See example (or the code) for clarification.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) Permute(p []int) {
|
||||
old := append(AdjacencyList{}, g...) // shallow copy
|
||||
for fr, arcs := range old {
|
||||
for i, to := range arcs {
|
||||
arcs[i] = NI(p[to])
|
||||
}
|
||||
g[p[fr]] = arcs
|
||||
}
|
||||
}
|
||||
|
||||
// ShuffleArcLists shuffles the arc lists of each node of receiver g.
|
||||
//
|
||||
// For example a node with arcs leading to nodes 3 and 7 might have an
|
||||
// arc list of either [3 7] or [7 3] after calling this method. The
|
||||
// connectivity of the graph is not changed. The resulting graph stays
|
||||
// equivalent but a traversal will encounter arcs in a different
|
||||
// order.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g AdjacencyList) ShuffleArcLists(r *rand.Rand) {
|
||||
ri := rand.Intn
|
||||
if r != nil {
|
||||
ri = r.Intn
|
||||
}
|
||||
// Knuth-Fisher-Yates
|
||||
for _, to := range g {
|
||||
for i := len(to); i > 1; {
|
||||
j := ri(i)
|
||||
i--
|
||||
to[i], to[j] = to[j], to[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
417
vendor/github.com/soniakeys/graph/adj_cg.go
generated
vendored
417
vendor/github.com/soniakeys/graph/adj_cg.go
generated
vendored
@@ -1,417 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
// adj_RO.go is code generated from adj_cg.go by directives in graph.go.
|
||||
// Editing adj_cg.go is okay.
|
||||
// DO NOT EDIT adj_RO.go. The RO is for Read Only.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
// ArcDensity returns density for an simple directed graph.
|
||||
//
|
||||
// See also ArcDensity function.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) ArcDensity() float64 {
|
||||
return ArcDensity(len(g), g.ArcSize())
|
||||
}
|
||||
|
||||
// ArcSize returns the number of arcs in g.
|
||||
//
|
||||
// Note that for an undirected graph without loops, the number of undirected
|
||||
// edges -- the traditional meaning of graph size -- will be ArcSize()/2.
|
||||
// On the other hand, if g is an undirected graph that has or may have loops,
|
||||
// g.ArcSize()/2 is not a meaningful quantity.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) ArcSize() int {
|
||||
m := 0
|
||||
for _, to := range g {
|
||||
m += len(to)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// BoundsOk validates that all arcs in g stay within the slice bounds of g.
|
||||
//
|
||||
// BoundsOk returns true when no arcs point outside the bounds of g.
|
||||
// Otherwise it returns false and an example arc that points outside of g.
|
||||
//
|
||||
// Most methods of this package assume the BoundsOk condition and may
|
||||
// panic when they encounter an arc pointing outside of the graph. This
|
||||
// function can be used to validate a graph when the BoundsOk condition
|
||||
// is unknown.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) BoundsOk() (ok bool, fr NI, to Half) {
|
||||
for fr, to := range g {
|
||||
for _, to := range to {
|
||||
if to.To < 0 || to.To >= NI(len(g)) {
|
||||
return false, NI(fr), to
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, -1, to
|
||||
}
|
||||
|
||||
// BreadthFirst traverses a directed or undirected graph in breadth
|
||||
// first order.
|
||||
//
|
||||
// Traversal starts at node start and visits the nodes reachable from
|
||||
// start. The function visit is called for each node visited. Nodes
|
||||
// not reachable from start are not visited.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also alt.BreadthFirst, a variant with more options, and
|
||||
// alt.BreadthFirst2, a direction optimizing variant.
|
||||
func (g LabeledAdjacencyList) BreadthFirst(start NI, visit func(NI)) {
|
||||
v := bits.New(len(g))
|
||||
v.SetBit(int(start), 1)
|
||||
visit(start)
|
||||
var next []NI
|
||||
for frontier := []NI{start}; len(frontier) > 0; {
|
||||
for _, n := range frontier {
|
||||
for _, nb := range g[n] {
|
||||
if v.Bit(int(nb.To)) == 0 {
|
||||
v.SetBit(int(nb.To), 1)
|
||||
visit(nb.To)
|
||||
next = append(next, nb.To)
|
||||
}
|
||||
}
|
||||
}
|
||||
frontier, next = next, frontier[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Copy makes a deep copy of g.
|
||||
// Copy also computes the arc size ma, the number of arcs.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) Copy() (c LabeledAdjacencyList, ma int) {
|
||||
c = make(LabeledAdjacencyList, len(g))
|
||||
for n, to := range g {
|
||||
c[n] = append([]Half{}, to...)
|
||||
ma += len(to)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DepthFirst traverses a directed or undirected graph in depth
|
||||
// first order.
|
||||
//
|
||||
// Traversal starts at node start and visits the nodes reachable from
|
||||
// start. The function visit is called for each node visited. Nodes
|
||||
// not reachable from start are not visited.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also alt.DepthFirst, a variant with more options.
|
||||
func (g LabeledAdjacencyList) DepthFirst(start NI, visit func(NI)) {
|
||||
v := bits.New(len(g))
|
||||
var f func(NI)
|
||||
f = func(n NI) {
|
||||
visit(n)
|
||||
v.SetBit(int(n), 1)
|
||||
for _, to := range g[n] {
|
||||
if v.Bit(int(to.To)) == 0 {
|
||||
f(to.To)
|
||||
}
|
||||
}
|
||||
}
|
||||
f(start)
|
||||
}
|
||||
|
||||
// HasArc returns true if g has any arc from node `fr` to node `to`.
|
||||
//
|
||||
// Also returned is the index within the slice of arcs from node `fr`.
|
||||
// If no arc from `fr` to `to` is present, HasArc returns false, -1.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also the method ParallelArcs, which finds all parallel arcs from
|
||||
// `fr` to `to`.
|
||||
func (g LabeledAdjacencyList) HasArc(fr, to NI) (bool, int) {
|
||||
for x, h := range g[fr] {
|
||||
if h.To == to {
|
||||
return true, x
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
// AnyLoop identifies if a graph contains a loop, an arc that leads from a
|
||||
// a node back to the same node.
|
||||
//
|
||||
// If g contains a loop, the method returns true and an example of a node
|
||||
// with a loop. If there are no loops in g, the method returns false, -1.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) AnyLoop() (bool, NI) {
|
||||
for fr, to := range g {
|
||||
for _, to := range to {
|
||||
if NI(fr) == to.To {
|
||||
return true, to.To
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
// AddNode maps a node in a supergraph to a subgraph node.
|
||||
//
|
||||
// Argument p must be an NI in supergraph s.Super. AddNode panics if
|
||||
// p is not a valid node index of s.Super.
|
||||
//
|
||||
// AddNode is idempotent in that it does not add a new node to the subgraph if
|
||||
// a subgraph node already exists mapped to supergraph node p.
|
||||
//
|
||||
// The mapped subgraph NI is returned.
|
||||
func (s *LabeledSubgraph) AddNode(p NI) (b NI) {
|
||||
if int(p) < 0 || int(p) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph"))
|
||||
}
|
||||
if b, ok := s.SubNI[p]; ok {
|
||||
return b
|
||||
}
|
||||
a := s.LabeledAdjacencyList
|
||||
b = NI(len(a))
|
||||
s.LabeledAdjacencyList = append(a, nil)
|
||||
s.SuperNI = append(s.SuperNI, p)
|
||||
s.SubNI[p] = b
|
||||
return
|
||||
}
|
||||
|
||||
// AddArc adds an arc to a subgraph.
|
||||
//
|
||||
// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode,
|
||||
// AddArc panics if fr and to are not valid node indexes of s.Super.
|
||||
//
|
||||
// The arc specfied by fr, to must exist in s.Super. Further, the number of
|
||||
// parallel arcs in the subgraph cannot exceed the number of corresponding
|
||||
// parallel arcs in the supergraph. That is, each arc already added to the
|
||||
// subgraph counts against the arcs available in the supergraph. If a matching
|
||||
// arc is not available, AddArc returns an error.
|
||||
//
|
||||
// If a matching arc is available, subgraph nodes are added as needed, the
|
||||
// subgraph arc is added, and the method returns nil.
|
||||
func (s *LabeledSubgraph) AddArc(fr NI, to Half) error {
|
||||
// verify supergraph NIs first, but without adding subgraph nodes just yet.
|
||||
if int(fr) < 0 || int(fr) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph"))
|
||||
}
|
||||
if int(to.To) < 0 || int(to.To) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddArc: NI ", to.To, " not in supergraph"))
|
||||
}
|
||||
// count existing matching arcs in subgraph
|
||||
n := 0
|
||||
a := s.LabeledAdjacencyList
|
||||
if bf, ok := s.SubNI[fr]; ok {
|
||||
if bt, ok := s.SubNI[to.To]; ok {
|
||||
// both NIs already exist in subgraph, need to count arcs
|
||||
bTo := to
|
||||
bTo.To = bt
|
||||
for _, t := range a[bf] {
|
||||
if t == bTo {
|
||||
n++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// verify matching arcs are available in supergraph
|
||||
for _, t := range (*s.Super)[fr] {
|
||||
if t == to {
|
||||
if n > 0 {
|
||||
n-- // match existing arc
|
||||
continue
|
||||
}
|
||||
// no more existing arcs need to be matched. nodes can finally
|
||||
// be added as needed and then the arc can be added.
|
||||
bf := s.AddNode(fr)
|
||||
to.To = s.AddNode(to.To)
|
||||
s.LabeledAdjacencyList[bf] = append(s.LabeledAdjacencyList[bf], to)
|
||||
return nil // success
|
||||
}
|
||||
}
|
||||
return errors.New("arc not available in supergraph")
|
||||
}
|
||||
|
||||
func (super LabeledAdjacencyList) induceArcs(sub map[NI]NI, sup []NI) LabeledAdjacencyList {
|
||||
s := make(LabeledAdjacencyList, len(sup))
|
||||
for b, p := range sup {
|
||||
var a []Half
|
||||
for _, to := range super[p] {
|
||||
if bt, ok := sub[to.To]; ok {
|
||||
to.To = bt
|
||||
a = append(a, to)
|
||||
}
|
||||
}
|
||||
s[b] = a
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// InduceList constructs a node-induced subgraph.
|
||||
//
|
||||
// The subgraph is induced on receiver graph g. Argument l must be a list of
|
||||
// NIs in receiver graph g. Receiver g becomes the supergraph of the induced
|
||||
// subgraph.
|
||||
//
|
||||
// Duplicate NIs are allowed in list l. The duplicates are effectively removed
|
||||
// and only a single corresponding node is created in the subgraph. Subgraph
|
||||
// NIs are mapped in the order of list l, execpt for ignoring duplicates.
|
||||
// NIs in l that are not in g will panic.
|
||||
//
|
||||
// Returned is the constructed Subgraph object containing the induced subgraph
|
||||
// and the mappings to the supergraph.
|
||||
func (g *LabeledAdjacencyList) InduceList(l []NI) *LabeledSubgraph {
|
||||
sub, sup := mapList(l)
|
||||
return &LabeledSubgraph{
|
||||
Super: g,
|
||||
SubNI: sub,
|
||||
SuperNI: sup,
|
||||
|
||||
LabeledAdjacencyList: g.induceArcs(sub, sup)}
|
||||
}
|
||||
|
||||
// InduceBits constructs a node-induced subgraph.
|
||||
//
|
||||
// The subgraph is induced on receiver graph g. Argument t must be a bitmap
|
||||
// representing NIs in receiver graph g. Receiver g becomes the supergraph
|
||||
// of the induced subgraph. NIs in t that are not in g will panic.
|
||||
//
|
||||
// Returned is the constructed Subgraph object containing the induced subgraph
|
||||
// and the mappings to the supergraph.
|
||||
func (g *LabeledAdjacencyList) InduceBits(t bits.Bits) *LabeledSubgraph {
|
||||
sub, sup := mapBits(t)
|
||||
return &LabeledSubgraph{
|
||||
Super: g,
|
||||
SubNI: sub,
|
||||
SuperNI: sup,
|
||||
|
||||
LabeledAdjacencyList: g.induceArcs(sub, sup)}
|
||||
}
|
||||
|
||||
// IsSimple checks for loops and parallel arcs.
|
||||
//
|
||||
// A graph is "simple" if it has no loops or parallel arcs.
|
||||
//
|
||||
// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is
|
||||
// found, simple returns false and a node that represents a counterexample
|
||||
// to the graph being simple.
|
||||
//
|
||||
// See also separate methods AnyLoop and AnyParallel.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) IsSimple() (ok bool, n NI) {
|
||||
if lp, n := g.AnyLoop(); lp {
|
||||
return false, n
|
||||
}
|
||||
if pa, n, _ := g.AnyParallel(); pa {
|
||||
return false, n
|
||||
}
|
||||
return true, -1
|
||||
}
|
||||
|
||||
// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g.
|
||||
//
|
||||
// An isolated node is one with no arcs going to or from it.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) IsolatedNodes() (i bits.Bits) {
|
||||
i = bits.New(len(g))
|
||||
i.SetAll()
|
||||
for fr, to := range g {
|
||||
if len(to) > 0 {
|
||||
i.SetBit(fr, 0)
|
||||
for _, to := range to {
|
||||
i.SetBit(int(to.To), 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Order is the number of nodes in receiver g.
|
||||
//
|
||||
// It is simply a wrapper method for the Go builtin len().
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) Order() int {
|
||||
// Why a wrapper for len()? Mostly for Directed and Undirected.
|
||||
// u.Order() is a little nicer than len(u.LabeledAdjacencyList).
|
||||
return len(g)
|
||||
}
|
||||
|
||||
// ParallelArcs identifies all arcs from node `fr` to node `to`.
|
||||
//
|
||||
// The returned slice contains an element for each arc from node `fr` to node `to`.
|
||||
// The element value is the index within the slice of arcs from node `fr`.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
//
|
||||
// See also the method HasArc, which stops after finding a single arc.
|
||||
func (g LabeledAdjacencyList) ParallelArcs(fr, to NI) (p []int) {
|
||||
for x, h := range g[fr] {
|
||||
if h.To == to {
|
||||
p = append(p, x)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Permute permutes the node labeling of receiver g.
|
||||
//
|
||||
// Argument p must be a permutation of the node numbers of the graph,
|
||||
// 0 through len(g)-1. A permutation returned by rand.Perm(len(g)) for
|
||||
// example is acceptable.
|
||||
//
|
||||
// The graph is permuted in place. The graph keeps the same underlying
|
||||
// memory but values of the graph representation are permuted to produce
|
||||
// an isomorphic graph. The node previously labeled 0 becomes p[0] and so on.
|
||||
// See example (or the code) for clarification.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) Permute(p []int) {
|
||||
old := append(LabeledAdjacencyList{}, g...) // shallow copy
|
||||
for fr, arcs := range old {
|
||||
for i, to := range arcs {
|
||||
arcs[i].To = NI(p[to.To])
|
||||
}
|
||||
g[p[fr]] = arcs
|
||||
}
|
||||
}
|
||||
|
||||
// ShuffleArcLists shuffles the arc lists of each node of receiver g.
|
||||
//
|
||||
// For example a node with arcs leading to nodes 3 and 7 might have an
|
||||
// arc list of either [3 7] or [7 3] after calling this method. The
|
||||
// connectivity of the graph is not changed. The resulting graph stays
|
||||
// equivalent but a traversal will encounter arcs in a different
|
||||
// order.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// There are equivalent labeled and unlabeled versions of this method.
|
||||
func (g LabeledAdjacencyList) ShuffleArcLists(r *rand.Rand) {
|
||||
ri := rand.Intn
|
||||
if r != nil {
|
||||
ri = r.Intn
|
||||
}
|
||||
// Knuth-Fisher-Yates
|
||||
for _, to := range g {
|
||||
for i := len(to); i > 1; {
|
||||
j := ri(i)
|
||||
i--
|
||||
to[i], to[j] = to[j], to[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
1059
vendor/github.com/soniakeys/graph/dir.go
generated
vendored
1059
vendor/github.com/soniakeys/graph/dir.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1091
vendor/github.com/soniakeys/graph/dir_RO.go
generated
vendored
1091
vendor/github.com/soniakeys/graph/dir_RO.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1091
vendor/github.com/soniakeys/graph/dir_cg.go
generated
vendored
1091
vendor/github.com/soniakeys/graph/dir_cg.go
generated
vendored
File diff suppressed because it is too large
Load Diff
122
vendor/github.com/soniakeys/graph/doc.go
generated
vendored
122
vendor/github.com/soniakeys/graph/doc.go
generated
vendored
@@ -1,122 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
// Graph algorithms: Dijkstra, A*, Bellman Ford, Floyd Warshall;
|
||||
// Kruskal and Prim minimal spanning tree; topological sort and DAG longest
|
||||
// and shortest paths; Eulerian cycle and path; degeneracy and k-cores;
|
||||
// Bron Kerbosch clique finding; connected components; dominance; and others.
|
||||
//
|
||||
// This is a graph library of integer indexes. To use it with application
|
||||
// data, you associate data with integer indexes, perform searches or other
|
||||
// operations with the library, and then use the integer index results to refer
|
||||
// back to your application data.
|
||||
//
|
||||
// Thus it does not store application data, pointers to application data,
|
||||
// or require you to implement an interface on your application data.
|
||||
// The idea is to keep the library methods fast and lean.
|
||||
//
|
||||
// Representation overview
|
||||
//
|
||||
// The package defines a type for a node index (NI) which is just an integer
|
||||
// type. It defines types for a number of number graph representations using
|
||||
// NI. The fundamental graph type is AdjacencyList, which is the
|
||||
// common "list of lists" graph representation. It is a list as a slice
|
||||
// with one element for each node of the graph. Each element is a list
|
||||
// itself, a list of neighbor nodes, implemented as an NI slice. Methods
|
||||
// on an AdjacencyList generally work on any representable graph, including
|
||||
// directed or undirected graphs, simple graphs or multigraphs.
|
||||
//
|
||||
// The type Undirected embeds an AdjacencyList adding methods specific to
|
||||
// undirected graphs. Similarly the type Directed adds methods meaningful
|
||||
// for directed graphs.
|
||||
//
|
||||
// Similar to NI, the type LI is a "label index" which labels a
|
||||
// node-to-neighbor "arc" or edge. Just as an NI can index arbitrary node
|
||||
// data, an LI can index arbitrary arc or edge data. A number of algorithms
|
||||
// use a "weight" associated with an arc. This package does not represent
|
||||
// weighted arcs explicitly, but instead uses the LI as a more general
|
||||
// mechanism allowing not only weights but arbitrary data to be associated
|
||||
// with arcs. While AdjacencyList represents an arc with simply an NI,
|
||||
// the type LabeledAdjacencyList uses a type that pairs an NI with an LI.
|
||||
// This type is named Half, for half-arc. (A full arc would represent
|
||||
// both ends.) Types LabeledDirected and LabeledUndirected embed a
|
||||
// LabeledAdjacencyList.
|
||||
//
|
||||
// In contrast to Half, the type Edge represents both ends of an edge (but
|
||||
// no label.) The type LabeledEdge adds the label. The type WeightedEdgeList
|
||||
// bundles a list of LabeledEdges with a WeightFunc. (WeightedEdgeList has
|
||||
// few methods. It exists primarily to support the Kruskal algorithm.)
|
||||
//
|
||||
// FromList is a compact rooted tree (or forest) respresentation. Like
|
||||
// AdjacencyList and LabeledAdjacencyList, it is a list with one element for
|
||||
// each node of the graph. Each element contains only a single neighbor
|
||||
// however, its parent in the tree, the "from" node.
|
||||
//
|
||||
// Code generation
|
||||
//
|
||||
// A number of methods on AdjacencyList, Directed, and Undirected are
|
||||
// applicable to LabeledAdjacencyList, LabeledDirected, and LabeledUndirected
|
||||
// simply by ignoring the label. In these cases code generation provides
|
||||
// methods on both types from a single source implementation. These methods
|
||||
// are documented with the sentence "There are equivalent labeled and unlabeled
|
||||
// versions of this method."
|
||||
//
|
||||
// Terminology
|
||||
//
|
||||
// This package uses the term "node" rather than "vertex." It uses "arc"
|
||||
// to mean a directed edge, and uses "from" and "to" to refer to the ends
|
||||
// of an arc. It uses "start" and "end" to refer to endpoints of a search
|
||||
// or traversal.
|
||||
//
|
||||
// The usage of "to" and "from" is perhaps most strange. In common speech
|
||||
// they are prepositions, but throughout this package they are used as
|
||||
// adjectives, for example to refer to the "from node" of an arc or the
|
||||
// "to node". The type "FromList" is named to indicate it stores a list of
|
||||
// "from" values.
|
||||
//
|
||||
// A "half arc" refers to just one end of an arc, either the to or from end.
|
||||
//
|
||||
// Two arcs are "reciprocal" if they connect two distinct nodes n1 and n2,
|
||||
// one arc leading from n1 to n2 and the other arc leading from n2 to n1.
|
||||
// Undirected graphs are represented with reciprocal arcs.
|
||||
//
|
||||
// A node with an arc to itself represents a "loop." Duplicate arcs, where
|
||||
// a node has multiple arcs to another node, are termed "parallel arcs."
|
||||
// A graph with no loops or parallel arcs is "simple." A graph that allows
|
||||
// parallel arcs is a "multigraph"
|
||||
//
|
||||
// The "size" of a graph traditionally means the number of undirected edges.
|
||||
// This package uses "arc size" to mean the number of arcs in a graph. For an
|
||||
// undirected graph without loops, arc size is 2 * size.
|
||||
//
|
||||
// The "order" of a graph is the number of nodes. An "ordering" though means
|
||||
// an ordered list of nodes.
|
||||
//
|
||||
// A number of graph search algorithms use a concept of arc "weights."
|
||||
// The sum of arc weights along a path is a "distance." In contrast, the
|
||||
// number of nodes in a path, including start and end nodes, is the path's
|
||||
// "length." (Yes, mixing weights and lengths would be nonsense physically,
|
||||
// but the terms used here are just distinct terms for abstract values.
|
||||
// The actual meaning to an application is likely to be something else
|
||||
// entirely and is not relevant within this package.)
|
||||
//
|
||||
// Finally, this package documentation takes back the word "object" in some
|
||||
// places to refer to a Go value, especially a value of a type with methods.
|
||||
//
|
||||
// Shortest path searches
|
||||
//
|
||||
// This package implements a number of shortest path searches. Most work
|
||||
// with weighted graphs that are directed or undirected, and with graphs
|
||||
// that may have loops or parallel arcs. For weighted graphs, "shortest"
|
||||
// is defined as the path distance (sum of arc weights) with path length
|
||||
// (number of nodes) breaking ties. If multiple paths have the same minimum
|
||||
// distance with the same minimum length, search methods are free to return
|
||||
// any of them.
|
||||
//
|
||||
// Algorithm Description
|
||||
// Dijkstra Non-negative arc weights, single or all paths.
|
||||
// AStar Non-negative arc weights, heuristic guided, single path.
|
||||
// BellmanFord Negative arc weights allowed, no negative cycles, all paths.
|
||||
// DAGPath O(n) algorithm for DAGs, arc weights of any sign.
|
||||
// FloydWarshall all pairs distances, no negative cycles.
|
||||
package graph
|
||||
498
vendor/github.com/soniakeys/graph/fromlist.go
generated
vendored
498
vendor/github.com/soniakeys/graph/fromlist.go
generated
vendored
@@ -1,498 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
import "github.com/soniakeys/bits"
|
||||
|
||||
// FromList represents a rooted tree (or forest) where each node is associated
|
||||
// with a half arc identifying an arc "from" another node.
|
||||
//
|
||||
// Other terms for this data structure include "parent list",
|
||||
// "predecessor list", "in-tree", "inverse arborescence", and
|
||||
// "spaghetti stack."
|
||||
//
|
||||
// The Paths member represents the tree structure. Leaves and MaxLen are
|
||||
// not always needed. Where Leaves is used it serves as a bitmap where
|
||||
// Leaves.Bit(n) == 1 for each leaf n of the tree. Where MaxLen is used it is
|
||||
// provided primarily as a convenience for functions that might want to
|
||||
// anticipate the maximum path length that would be encountered traversing
|
||||
// the tree.
|
||||
//
|
||||
// Various graph search methods use a FromList to returns search results.
|
||||
// For a start node of a search, From will be -1 and Len will be 1. For other
|
||||
// nodes reached by the search, From represents a half arc in a path back to
|
||||
// start and Len represents the number of nodes in the path. For nodes not
|
||||
// reached by the search, From will be -1 and Len will be 0.
|
||||
//
|
||||
// A single FromList can also represent a forest. In this case paths from
|
||||
// all leaves do not return to a single root node, but multiple root nodes.
|
||||
//
|
||||
// While a FromList generally encodes a tree or forest, it is technically
|
||||
// possible to encode a cyclic graph. A number of FromList methods require
|
||||
// the receiver to be acyclic. Graph methods documented to return a tree or
|
||||
// forest will never return a cyclic FromList. In other cases however,
|
||||
// where a FromList is not known to by cyclic, the Cyclic method can be
|
||||
// useful to validate the acyclic property.
|
||||
type FromList struct {
|
||||
Paths []PathEnd // tree representation
|
||||
Leaves bits.Bits // leaves of tree
|
||||
MaxLen int // length of longest path, max of all PathEnd.Len values
|
||||
}
|
||||
|
||||
// PathEnd associates a half arc and a path length.
|
||||
//
|
||||
// A PathEnd list is an element type of FromList.
|
||||
type PathEnd struct {
|
||||
From NI // a "from" half arc, the node the arc comes from
|
||||
Len int // number of nodes in path from start
|
||||
}
|
||||
|
||||
/* NewFromList could be confusing now with bits also needing allocation.
|
||||
maybe best to not have this function. Maybe a more useful new would be
|
||||
one that took a PathEnd slice and intitialized everything including roots
|
||||
and max len. Maybe its time for a separate []PathEnd type when that's
|
||||
all that's needed. (and reconsider the name PathEnd)
|
||||
*/
|
||||
|
||||
// NewFromList creates a FromList object of given order.
|
||||
//
|
||||
// The Paths member is allocated to the specified order n but other members
|
||||
// are left as zero values.
|
||||
func NewFromList(n int) FromList {
|
||||
return FromList{Paths: make([]PathEnd, n)}
|
||||
}
|
||||
|
||||
// BoundsOk validates the "from" values in the list.
|
||||
//
|
||||
// Negative values are allowed as they indicate root nodes.
|
||||
//
|
||||
// BoundsOk returns true when all from values are less than len(t).
|
||||
// Otherwise it returns false and a node with a from value >= len(t).
|
||||
func (f FromList) BoundsOk() (ok bool, n NI) {
|
||||
for n, e := range f.Paths {
|
||||
if int(e.From) >= len(f.Paths) {
|
||||
return false, NI(n)
|
||||
}
|
||||
}
|
||||
return true, -1
|
||||
}
|
||||
|
||||
// CommonStart returns the common start node of minimal paths to a and b.
|
||||
//
|
||||
// It returns -1 if a and b cannot be traced back to a common node.
|
||||
//
|
||||
// The method relies on populated PathEnd.Len members. Use RecalcLen if
|
||||
// the Len members are not known to be present and correct.
|
||||
func (f FromList) CommonStart(a, b NI) NI {
|
||||
p := f.Paths
|
||||
if p[a].Len < p[b].Len {
|
||||
a, b = b, a
|
||||
}
|
||||
for bl := p[b].Len; p[a].Len > bl; {
|
||||
a = p[a].From
|
||||
if a < 0 {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
for a != b {
|
||||
a = p[a].From
|
||||
if a < 0 {
|
||||
return -1
|
||||
}
|
||||
b = p[b].From
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// Cyclic determines if f contains a cycle, a non-empty path from a node
|
||||
// back to itself.
|
||||
//
|
||||
// Cyclic returns true if g contains at least one cycle. It also returns
|
||||
// an example of a node involved in a cycle.
|
||||
//
|
||||
// Cyclic returns (false, -1) in the normal case where f is acyclic.
|
||||
// Note that the bool is not an "ok" return. A cyclic FromList is usually
|
||||
// not okay.
|
||||
func (f FromList) Cyclic() (cyclic bool, n NI) {
|
||||
p := f.Paths
|
||||
vis := bits.New(len(p))
|
||||
for i := range p {
|
||||
path := bits.New(len(p))
|
||||
for n := i; vis.Bit(n) == 0; {
|
||||
vis.SetBit(n, 1)
|
||||
path.SetBit(n, 1)
|
||||
if n = int(p[n].From); n < 0 {
|
||||
break
|
||||
}
|
||||
if path.Bit(n) == 1 {
|
||||
return true, NI(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, -1
|
||||
}
|
||||
|
||||
// IsolatedNodeBits returns a bitmap of isolated nodes in receiver graph f.
|
||||
//
|
||||
// An isolated node is one with no arcs going to or from it.
|
||||
func (f FromList) IsolatedNodes() (iso bits.Bits) {
|
||||
p := f.Paths
|
||||
iso = bits.New(len(p))
|
||||
iso.SetAll()
|
||||
for n, e := range p {
|
||||
if e.From >= 0 {
|
||||
iso.SetBit(n, 0)
|
||||
iso.SetBit(int(e.From), 0)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PathTo decodes a FromList, recovering a single path.
|
||||
//
|
||||
// The path is returned as a list of nodes where the first element will be
|
||||
// a root node and the last element will be the specified end node.
|
||||
//
|
||||
// Only the Paths member of the receiver is used. Other members of the
|
||||
// FromList do not need to be valid, however the MaxLen member can be useful
|
||||
// for allocating argument p.
|
||||
//
|
||||
// Argument p can provide the result slice. If p has capacity for the result
|
||||
// it will be used, otherwise a new slice is created for the result.
|
||||
//
|
||||
// See also function PathTo.
|
||||
func (f FromList) PathTo(end NI, p []NI) []NI {
|
||||
return PathTo(f.Paths, end, p)
|
||||
}
|
||||
|
||||
// PathTo decodes a single path from a PathEnd list.
|
||||
//
|
||||
// A PathEnd list is the main data representation in a FromList. See FromList.
|
||||
//
|
||||
// PathTo returns a list of nodes where the first element will be
|
||||
// a root node and the last element will be the specified end node.
|
||||
//
|
||||
// Argument p can provide the result slice. If p has capacity for the result
|
||||
// it will be used, otherwise a new slice is created for the result.
|
||||
//
|
||||
// See also method FromList.PathTo.
|
||||
func PathTo(paths []PathEnd, end NI, p []NI) []NI {
|
||||
n := paths[end].Len
|
||||
if n == 0 {
|
||||
return p[:0]
|
||||
}
|
||||
if cap(p) >= n {
|
||||
p = p[:n]
|
||||
} else {
|
||||
p = make([]NI, n)
|
||||
}
|
||||
for {
|
||||
n--
|
||||
p[n] = end
|
||||
if n == 0 {
|
||||
return p
|
||||
}
|
||||
end = paths[end].From
|
||||
}
|
||||
}
|
||||
|
||||
// PathToLabeled decodes a FromList, recovering a single path.
|
||||
//
|
||||
// The start of the returned path will be a root node of the FromList.
|
||||
//
|
||||
// Only the Paths member of the receiver is used. Other members of the
|
||||
// FromList do not need to be valid, however the MaxLen member can be useful
|
||||
// for allocating argument p.
|
||||
//
|
||||
// Argument p can provide the result slice. If p has capacity for the result
|
||||
// it will be used, otherwise a new slice is created for the result.
|
||||
//
|
||||
// See also function PathTo.
|
||||
func (f FromList) PathToLabeled(end NI, labels []LI, p []Half) LabeledPath {
|
||||
n := f.Paths[end].Len - 1
|
||||
if n <= 0 {
|
||||
return LabeledPath{end, p[:0]}
|
||||
}
|
||||
if cap(p) >= n {
|
||||
p = p[:n]
|
||||
} else {
|
||||
p = make([]Half, n)
|
||||
}
|
||||
for {
|
||||
n--
|
||||
p[n] = Half{To: end, Label: labels[end]}
|
||||
end = f.Paths[end].From
|
||||
if n == 0 {
|
||||
return LabeledPath{end, p}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Preorder traverses a FromList in preorder.
|
||||
//
|
||||
// Nodes are visited in order such that for any node n with from node fr,
|
||||
// fr is visited before n. Where f represents a tree, the visit ordering
|
||||
// corresponds to a preordering, or depth first traversal of the tree.
|
||||
// Where f represents a forest, the preorderings of the trees can be
|
||||
// intermingled.
|
||||
//
|
||||
// Leaves must be set correctly first. Use RecalcLeaves if leaves are not
|
||||
// known to be set correctly. FromList f cannot be cyclic.
|
||||
//
|
||||
// Traversal continues while visitor function v returns true. It terminates
|
||||
// if v returns false. Preorder returns true if it completes without v
|
||||
// returning false. Preorder returns false if traversal is terminated by v
|
||||
// returning false.
|
||||
func (f FromList) Preorder(v func(NI) bool) bool {
|
||||
p := f.Paths
|
||||
done := bits.New(len(p))
|
||||
var df func(NI) bool
|
||||
df = func(n NI) bool {
|
||||
done.SetBit(int(n), 1)
|
||||
if fr := p[n].From; fr >= 0 && done.Bit(int(fr)) == 0 {
|
||||
df(fr)
|
||||
}
|
||||
return v(n)
|
||||
}
|
||||
for n := range f.Paths {
|
||||
p[n].Len = 0
|
||||
}
|
||||
return f.Leaves.IterateOnes(func(n int) bool {
|
||||
return df(NI(n))
|
||||
})
|
||||
}
|
||||
|
||||
// RecalcLeaves recomputes the Leaves member of f.
|
||||
func (f *FromList) RecalcLeaves() {
|
||||
p := f.Paths
|
||||
lv := &f.Leaves
|
||||
if lv.Num != len(p) {
|
||||
*lv = bits.New(len(p))
|
||||
}
|
||||
lv.SetAll()
|
||||
for n := range f.Paths {
|
||||
if fr := p[n].From; fr >= 0 {
|
||||
lv.SetBit(int(fr), 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RecalcLen recomputes Len for each path end, and recomputes MaxLen.
|
||||
//
|
||||
// RecalcLen relies on the Leaves member being valid. If it is not known
|
||||
// to be valid, call RecalcLeaves before calling RecalcLen.
|
||||
//
|
||||
// RecalcLen will panic if the FromList is cyclic. Use the Cyclic method
|
||||
// if needed to verify that the FromList is acyclic.
|
||||
func (f *FromList) RecalcLen() {
|
||||
p := f.Paths
|
||||
var setLen func(NI) int
|
||||
setLen = func(n NI) int {
|
||||
switch {
|
||||
case p[n].Len > 0:
|
||||
return p[n].Len
|
||||
case p[n].From < 0:
|
||||
p[n].Len = 1
|
||||
return 1
|
||||
}
|
||||
l := 1 + setLen(p[n].From)
|
||||
p[n].Len = l
|
||||
return l
|
||||
}
|
||||
for n := range f.Paths {
|
||||
p[n].Len = 0
|
||||
}
|
||||
f.MaxLen = 0
|
||||
f.Leaves.IterateOnes(func(n int) bool {
|
||||
if l := setLen(NI(n)); l > f.MaxLen {
|
||||
f.MaxLen = l
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// ReRoot reorients the tree containing n to make n the root node.
|
||||
//
|
||||
// It keeps the tree connected by "reversing" the path from n to the old root.
|
||||
//
|
||||
// After ReRoot, the Leaves and Len members are invalid.
|
||||
// Call RecalcLeaves or RecalcLen as needed.
|
||||
func (f *FromList) ReRoot(n NI) {
|
||||
p := f.Paths
|
||||
fr := p[n].From
|
||||
if fr < 0 {
|
||||
return
|
||||
}
|
||||
p[n].From = -1
|
||||
for {
|
||||
ff := p[fr].From
|
||||
p[fr].From = n
|
||||
if ff < 0 {
|
||||
return
|
||||
}
|
||||
n = fr
|
||||
fr = ff
|
||||
}
|
||||
}
|
||||
|
||||
// Root finds the root of a node in a FromList.
|
||||
func (f FromList) Root(n NI) NI {
|
||||
for p := f.Paths; ; {
|
||||
fr := p[n].From
|
||||
if fr < 0 {
|
||||
return n
|
||||
}
|
||||
n = fr
|
||||
}
|
||||
}
|
||||
|
||||
// Transpose constructs the directed graph corresponding to FromList f
|
||||
// but with arcs in the opposite direction. That is, from roots toward leaves.
|
||||
//
|
||||
// If non-nil argrument roots is passed, Transpose populates it as roots of
|
||||
// the resulting forest and returns nRoots as a count of the roots.
|
||||
//
|
||||
// The method relies only on the From member of f.Paths. Other members of
|
||||
// the FromList are not used.
|
||||
func (f FromList) Transpose(roots *bits.Bits) (forest Directed, nRoots int) {
|
||||
p := f.Paths
|
||||
g := make(AdjacencyList, len(p))
|
||||
if roots != nil {
|
||||
nRoots = len(p)
|
||||
if roots.Num != nRoots {
|
||||
*roots = bits.New(nRoots)
|
||||
}
|
||||
roots.SetAll()
|
||||
}
|
||||
for i, e := range p {
|
||||
if e.From == -1 {
|
||||
continue
|
||||
}
|
||||
g[e.From] = append(g[e.From], NI(i))
|
||||
if roots != nil && roots.Bit(i) == 1 {
|
||||
roots.SetBit(i, 0)
|
||||
nRoots--
|
||||
}
|
||||
}
|
||||
return Directed{g}, nRoots
|
||||
}
|
||||
|
||||
// TransposeLabeled constructs the labeled directed graph corresponding
|
||||
// to FromList f but with arcs in the opposite direction. That is, from
|
||||
// roots toward leaves.
|
||||
//
|
||||
// The argument labels can be nil. In this case labels are generated matching
|
||||
// the path indexes. This corresponds to the "to", or child node.
|
||||
//
|
||||
// If labels is non-nil, it must be the same length as t.Paths and is used
|
||||
// to look up label numbers by the path index.
|
||||
//
|
||||
// If non-nil argrument roots is passed, Transpose populates it as roots of
|
||||
// the resulting forest and returns nRoots as a count of the roots.
|
||||
//
|
||||
// The method relies only on the From member of f.Paths. Other members of
|
||||
// the FromList are not used.
|
||||
func (f FromList) TransposeLabeled(labels []LI, roots *bits.Bits) (forest LabeledDirected, nRoots int) {
|
||||
p := f.Paths
|
||||
g := make(LabeledAdjacencyList, len(p))
|
||||
if roots != nil {
|
||||
nRoots = len(p)
|
||||
if roots.Num != nRoots {
|
||||
*roots = bits.New(nRoots)
|
||||
}
|
||||
roots.SetAll()
|
||||
}
|
||||
for i, p := range f.Paths {
|
||||
if p.From == -1 {
|
||||
continue
|
||||
}
|
||||
l := LI(i)
|
||||
if labels != nil {
|
||||
l = labels[i]
|
||||
}
|
||||
g[p.From] = append(g[p.From], Half{NI(i), l})
|
||||
if roots != nil && roots.Bit(i) == 1 {
|
||||
roots.SetBit(i, 0)
|
||||
nRoots--
|
||||
}
|
||||
}
|
||||
return LabeledDirected{g}, nRoots
|
||||
}
|
||||
|
||||
// Undirected constructs the undirected graph corresponding to FromList f.
|
||||
//
|
||||
// The resulting graph will be a tree or forest.
|
||||
//
|
||||
// If non-nil argrument roots is passed, Transpose populates it as roots of
|
||||
// the resulting forest and returns nRoots as a count of the roots.
|
||||
//
|
||||
// The method relies only on the From member of f.Paths. Other members of
|
||||
// the FromList are not used.
|
||||
func (f FromList) Undirected(roots *bits.Bits) (forest Undirected, nRoots int) {
|
||||
p := f.Paths
|
||||
g := make(AdjacencyList, len(p))
|
||||
if roots != nil {
|
||||
nRoots = len(p)
|
||||
if roots.Num != nRoots {
|
||||
*roots = bits.New(nRoots)
|
||||
}
|
||||
roots.SetAll()
|
||||
}
|
||||
for i, e := range p {
|
||||
if e.From == -1 {
|
||||
continue
|
||||
}
|
||||
g[i] = append(g[i], e.From)
|
||||
g[e.From] = append(g[e.From], NI(i))
|
||||
if roots != nil && roots.Bit(i) == 1 {
|
||||
roots.SetBit(i, 0)
|
||||
nRoots--
|
||||
}
|
||||
}
|
||||
return Undirected{g}, nRoots
|
||||
}
|
||||
|
||||
// LabeledUndirected constructs the labeled undirected graph corresponding
|
||||
// to FromList f.
|
||||
//
|
||||
// The resulting graph will be a tree or forest.
|
||||
//
|
||||
// The argument labels can be nil. In this case labels are generated matching
|
||||
// the path indexes. This corresponds to the "to", or child node.
|
||||
//
|
||||
// If labels is non-nil, it must be the same length as t.Paths and is used
|
||||
// to look up label numbers by the path index.
|
||||
//
|
||||
// If non-nil argrument roots is passed, LabeledUndirected populates it as
|
||||
// roots of the resulting forest and returns nRoots as a count of the roots.
|
||||
//
|
||||
// The method relies only on the From member of f.Paths. Other members of
|
||||
// the FromList are not used.
|
||||
func (f FromList) LabeledUndirected(labels []LI, roots *bits.Bits) (forest LabeledUndirected, nRoots int) {
|
||||
p := f.Paths
|
||||
g := make(LabeledAdjacencyList, len(p))
|
||||
if roots != nil {
|
||||
nRoots = len(p)
|
||||
if roots.Num != nRoots {
|
||||
*roots = bits.New(nRoots)
|
||||
}
|
||||
roots.SetAll()
|
||||
}
|
||||
for i, p := range f.Paths {
|
||||
if p.From == -1 {
|
||||
continue
|
||||
}
|
||||
l := LI(i)
|
||||
if labels != nil {
|
||||
l = labels[i]
|
||||
}
|
||||
g[i] = append(g[i], Half{p.From, l})
|
||||
g[p.From] = append(g[p.From], Half{NI(i), l})
|
||||
if roots != nil && roots.Bit(i) == 1 {
|
||||
roots.SetBit(i, 0)
|
||||
nRoots--
|
||||
}
|
||||
}
|
||||
return LabeledUndirected{g}, nRoots
|
||||
}
|
||||
3
vendor/github.com/soniakeys/graph/go.mod
generated
vendored
3
vendor/github.com/soniakeys/graph/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module "github.com/soniakeys/graph"
|
||||
|
||||
require "github.com/soniakeys/bits" v1.0.0
|
||||
767
vendor/github.com/soniakeys/graph/graph.go
generated
vendored
767
vendor/github.com/soniakeys/graph/graph.go
generated
vendored
@@ -1,767 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"text/template"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
// graph.go contains type definitions for all graph types and components.
|
||||
// Also, go generate directives for source transformations.
|
||||
//
|
||||
// For readability, the types are defined in a dependency order:
|
||||
//
|
||||
// NI
|
||||
// AdjacencyList
|
||||
// Directed
|
||||
// Undirected
|
||||
// Bipartite
|
||||
// Subgraph
|
||||
// DirectedSubgraph
|
||||
// UndirectedSubgraph
|
||||
// LI
|
||||
// Half
|
||||
// fromHalf
|
||||
// LabeledAdjacencyList
|
||||
// LabeledDirected
|
||||
// LabeledUndirected
|
||||
// LabeledBipartite
|
||||
// LabeledSubgraph
|
||||
// LabeledDirectedSubgraph
|
||||
// LabeledUndirectedSubgraph
|
||||
// Edge
|
||||
// LabeledEdge
|
||||
// LabeledPath
|
||||
// WeightFunc
|
||||
// WeightedEdgeList
|
||||
// TraverseOption
|
||||
|
||||
//go:generate cp adj_cg.go adj_RO.go
|
||||
//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w adj_RO.go
|
||||
//go:generate gofmt -r "n.To -> n" -w adj_RO.go
|
||||
//go:generate gofmt -r "Half -> NI" -w adj_RO.go
|
||||
//go:generate gofmt -r "LabeledSubgraph -> Subgraph" -w adj_RO.go
|
||||
|
||||
//go:generate cp dir_cg.go dir_RO.go
|
||||
//go:generate gofmt -r "LabeledDirected -> Directed" -w dir_RO.go
|
||||
//go:generate gofmt -r "LabeledDirectedSubgraph -> DirectedSubgraph" -w dir_RO.go
|
||||
//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w dir_RO.go
|
||||
//go:generate gofmt -r "labEulerian -> eulerian" -w dir_RO.go
|
||||
//go:generate gofmt -r "newLabEulerian -> newEulerian" -w dir_RO.go
|
||||
//go:generate gofmt -r "Half{n, -1} -> n" -w dir_RO.go
|
||||
//go:generate gofmt -r "n.To -> n" -w dir_RO.go
|
||||
//go:generate gofmt -r "Half -> NI" -w dir_RO.go
|
||||
|
||||
//go:generate cp undir_cg.go undir_RO.go
|
||||
//go:generate gofmt -r "LabeledUndirected -> Undirected" -w undir_RO.go
|
||||
//go:generate gofmt -r "LabeledBipartite -> Bipartite" -w undir_RO.go
|
||||
//go:generate gofmt -r "LabeledUndirectedSubgraph -> UndirectedSubgraph" -w undir_RO.go
|
||||
//go:generate gofmt -r "LabeledAdjacencyList -> AdjacencyList" -w undir_RO.go
|
||||
//go:generate gofmt -r "newLabEulerian -> newEulerian" -w undir_RO.go
|
||||
//go:generate gofmt -r "Half{n, -1} -> n" -w undir_RO.go
|
||||
//go:generate gofmt -r "n.To -> n" -w undir_RO.go
|
||||
//go:generate gofmt -r "Half -> NI" -w undir_RO.go
|
||||
|
||||
// An AdjacencyList represents a graph as a list of neighbors for each node.
|
||||
// The "node ID" of a node is simply it's slice index in the AdjacencyList.
|
||||
// For an AdjacencyList g, g[n] represents arcs going from node n to nodes
|
||||
// g[n].
|
||||
//
|
||||
// Adjacency lists are inherently directed but can be used to represent
|
||||
// directed or undirected graphs. See types Directed and Undirected.
|
||||
type AdjacencyList [][]NI
|
||||
|
||||
// Directed represents a directed graph.
|
||||
//
|
||||
// Directed methods generally rely on the graph being directed, specifically
|
||||
// that arcs do not have reciprocals.
|
||||
type Directed struct {
|
||||
AdjacencyList // embedded to include AdjacencyList methods
|
||||
}
|
||||
|
||||
// Undirected represents an undirected graph.
|
||||
//
|
||||
// In an undirected graph, for each arc between distinct nodes there is also
|
||||
// a reciprocal arc, an arc in the opposite direction. Loops do not have
|
||||
// reciprocals.
|
||||
//
|
||||
// Undirected methods generally rely on the graph being undirected,
|
||||
// specifically that every arc between distinct nodes has a reciprocal.
|
||||
type Undirected struct {
|
||||
AdjacencyList // embedded to include AdjacencyList methods
|
||||
}
|
||||
|
||||
// Bipartite represents a bipartite graph.
|
||||
//
|
||||
// In a bipartite graph, nodes are partitioned into two sets, or
|
||||
// "colors," such that every edge in the graph goes from one set to the
|
||||
// other.
|
||||
//
|
||||
// Member Color represents the partition with a bitmap of length the same
|
||||
// as the number of nodes in the graph. For convenience N0 stores the number
|
||||
// of zero bits in Color.
|
||||
//
|
||||
// To construct a Bipartite object, if you can easily or efficiently use
|
||||
// available information to construct the Color member, then you should do
|
||||
// this and construct a Bipartite object with a Go struct literal.
|
||||
//
|
||||
// If partition information is not readily available, see the constructor
|
||||
// Undirected.Bipartite.
|
||||
//
|
||||
// Alternatively, in some cases where the graph may have multiple connected
|
||||
// components, the lower level Undirected.BipartiteComponent can be used to
|
||||
// control color assignment by component.
|
||||
type Bipartite struct {
|
||||
Undirected
|
||||
Color bits.Bits
|
||||
N0 int
|
||||
}
|
||||
|
||||
// Subgraph represents a subgraph mapped to a supergraph.
|
||||
//
|
||||
// The subgraph is the embedded AdjacencyList and so the Subgraph type inherits
|
||||
// all methods of Adjacency list.
|
||||
//
|
||||
// The embedded subgraph mapped relative to a specific supergraph, member
|
||||
// Super. A subgraph may have fewer nodes than its supergraph.
|
||||
// Each node of the subgraph must map to a distinct node of the supergraph.
|
||||
//
|
||||
// The mapping giving the supergraph node for a given subgraph node is
|
||||
// represented by member SuperNI, a slice parallel to the the subgraph.
|
||||
//
|
||||
// The mapping in the other direction, giving a subgraph NI for a given
|
||||
// supergraph NI, is represented with map SubNI.
|
||||
//
|
||||
// Multiple Subgraphs can be created relative to a single supergraph.
|
||||
// The Subgraph type represents a mapping to only a single supergraph however.
|
||||
//
|
||||
// See graph methods InduceList and InduceBits for construction of
|
||||
// node-induced subgraphs.
|
||||
//
|
||||
// Alternatively an empty subgraph can be constructed with InduceList(nil).
|
||||
// Arbitrary subgraphs can then be built up with methods AddNode and AddArc.
|
||||
type Subgraph struct {
|
||||
AdjacencyList // the subgraph
|
||||
Super *AdjacencyList // the supergraph
|
||||
SubNI map[NI]NI // subgraph NIs, indexed by supergraph NIs
|
||||
SuperNI []NI // supergraph NIs indexed by subgraph NIs
|
||||
}
|
||||
|
||||
// DirectedSubgraph represents a subgraph mapped to a supergraph.
|
||||
//
|
||||
// See additional doc at Subgraph type.
|
||||
type DirectedSubgraph struct {
|
||||
Directed
|
||||
Super *Directed
|
||||
SubNI map[NI]NI
|
||||
SuperNI []NI
|
||||
}
|
||||
|
||||
// UndirectedSubgraph represents a subgraph mapped to a supergraph.
|
||||
//
|
||||
// See additional doc at Subgraph type.
|
||||
type UndirectedSubgraph struct {
|
||||
Undirected
|
||||
Super *Undirected
|
||||
SubNI map[NI]NI
|
||||
SuperNI []NI
|
||||
}
|
||||
|
||||
// LI is a label integer, used for associating labels with arcs.
|
||||
type LI int32
|
||||
|
||||
// Half is a half arc, representing a labeled arc and the "neighbor" node
|
||||
// that the arc leads to.
|
||||
//
|
||||
// Halfs can be composed to form a labeled adjacency list.
|
||||
type Half struct {
|
||||
To NI // node ID, usable as a slice index
|
||||
Label LI // half-arc ID for application data, often a weight
|
||||
}
|
||||
|
||||
// fromHalf is a half arc, representing a labeled arc and the "neighbor" node
|
||||
// that the arc originates from.
|
||||
//
|
||||
// This used internally in a couple of places. It used to be exported but is
|
||||
// not currently needed anwhere in the API.
|
||||
type fromHalf struct {
|
||||
From NI
|
||||
Label LI
|
||||
}
|
||||
|
||||
// A LabeledAdjacencyList represents a graph as a list of neighbors for each
|
||||
// node, connected by labeled arcs.
|
||||
//
|
||||
// Arc labels are not necessarily unique arc IDs. Different arcs can have
|
||||
// the same label.
|
||||
//
|
||||
// Arc labels are commonly used to assocate a weight with an arc. Arc labels
|
||||
// are general purpose however and can be used to associate arbitrary
|
||||
// information with an arc.
|
||||
//
|
||||
// Methods implementing weighted graph algorithms will commonly take a
|
||||
// weight function that turns a label int into a float64 weight.
|
||||
//
|
||||
// If only a small amount of information -- such as an integer weight or
|
||||
// a single printable character -- needs to be associated, it can sometimes
|
||||
// be possible to encode the information directly into the label int. For
|
||||
// more generality, some lookup scheme will be needed.
|
||||
//
|
||||
// In an undirected labeled graph, reciprocal arcs must have identical labels.
|
||||
// Note this does not preclude parallel arcs with different labels.
|
||||
type LabeledAdjacencyList [][]Half
|
||||
|
||||
// LabeledDirected represents a directed labeled graph.
|
||||
//
|
||||
// This is the labeled version of Directed. See types LabeledAdjacencyList
|
||||
// and Directed.
|
||||
type LabeledDirected struct {
|
||||
LabeledAdjacencyList // embedded to include LabeledAdjacencyList methods
|
||||
}
|
||||
|
||||
// LabeledUndirected represents an undirected labeled graph.
|
||||
//
|
||||
// This is the labeled version of Undirected. See types LabeledAdjacencyList
|
||||
// and Undirected.
|
||||
type LabeledUndirected struct {
|
||||
LabeledAdjacencyList // embedded to include LabeledAdjacencyList methods
|
||||
}
|
||||
|
||||
// LabeledBipartite represents a bipartite graph.
|
||||
//
|
||||
// In a bipartite graph, nodes are partitioned into two sets, or
|
||||
// "colors," such that every edge in the graph goes from one set to the
|
||||
// other.
|
||||
//
|
||||
// Member Color represents the partition with a bitmap of length the same
|
||||
// as the number of nodes in the graph. For convenience N0 stores the number
|
||||
// of zero bits in Color.
|
||||
//
|
||||
// To construct a LabeledBipartite object, if you can easily or efficiently use
|
||||
// available information to construct the Color member, then you should do
|
||||
// this and construct a LabeledBipartite object with a Go struct literal.
|
||||
//
|
||||
// If partition information is not readily available, see the constructor
|
||||
// Undirected.LabeledBipartite.
|
||||
//
|
||||
// Alternatively, in some cases where the graph may have multiple connected
|
||||
// components, the lower level LabeledUndirected.BipartiteComponent can be used
|
||||
// to control color assignment by component.
|
||||
type LabeledBipartite struct {
|
||||
LabeledUndirected
|
||||
Color bits.Bits
|
||||
N0 int
|
||||
}
|
||||
|
||||
// LabeledSubgraph represents a subgraph mapped to a supergraph.
|
||||
//
|
||||
// See additional doc at Subgraph type.
|
||||
type LabeledSubgraph struct {
|
||||
LabeledAdjacencyList
|
||||
Super *LabeledAdjacencyList
|
||||
SubNI map[NI]NI
|
||||
SuperNI []NI
|
||||
}
|
||||
|
||||
// LabeledDirectedSubgraph represents a subgraph mapped to a supergraph.
|
||||
//
|
||||
// See additional doc at Subgraph type.
|
||||
type LabeledDirectedSubgraph struct {
|
||||
LabeledDirected
|
||||
Super *LabeledDirected
|
||||
SubNI map[NI]NI
|
||||
SuperNI []NI
|
||||
}
|
||||
|
||||
// LabeledUndirectedSubgraph represents a subgraph mapped to a supergraph.
|
||||
//
|
||||
// See additional doc at Subgraph type.
|
||||
type LabeledUndirectedSubgraph struct {
|
||||
LabeledUndirected
|
||||
Super *LabeledUndirected
|
||||
SubNI map[NI]NI
|
||||
SuperNI []NI
|
||||
}
|
||||
|
||||
// Edge is an undirected edge between nodes N1 and N2.
|
||||
type Edge struct{ N1, N2 NI }
|
||||
|
||||
// LabeledEdge is an undirected edge with an associated label.
|
||||
type LabeledEdge struct {
|
||||
Edge
|
||||
LI
|
||||
}
|
||||
|
||||
// LabeledPath is a start node and a path of half arcs leading from start.
|
||||
type LabeledPath struct {
|
||||
Start NI
|
||||
Path []Half
|
||||
}
|
||||
|
||||
// Distance returns total path distance given WeightFunc w.
|
||||
func (p LabeledPath) Distance(w WeightFunc) float64 {
|
||||
d := 0.
|
||||
for _, h := range p.Path {
|
||||
d += w(h.Label)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// WeightFunc returns a weight for a given label.
|
||||
//
|
||||
// WeightFunc is a parameter type for various search functions. The intent
|
||||
// is to return a weight corresponding to an arc label. The name "weight"
|
||||
// is an abstract term. An arc "weight" will typically have some application
|
||||
// specific meaning other than physical weight.
|
||||
type WeightFunc func(label LI) (weight float64)
|
||||
|
||||
// WeightedEdgeList is a graph representation.
|
||||
//
|
||||
// It is a labeled edge list, with an associated weight function to return
|
||||
// a weight given an edge label.
|
||||
//
|
||||
// Also associated is the order, or number of nodes of the graph.
|
||||
// All nodes occurring in the edge list must be strictly less than Order.
|
||||
//
|
||||
// WeigtedEdgeList sorts by weight, obtained by calling the weight function.
|
||||
// If weight computation is expensive, consider supplying a cached or
|
||||
// memoized version.
|
||||
type WeightedEdgeList struct {
|
||||
Order int
|
||||
WeightFunc
|
||||
Edges []LabeledEdge
|
||||
}
|
||||
|
||||
// DistanceMatrix constructs a distance matrix corresponding to the weighted
|
||||
// edges of l.
|
||||
//
|
||||
// An edge n1, n2 with WeightFunc return w is represented by both
|
||||
// d[n1][n2] == w and d[n2][n1] = w. In case of parallel edges, the lowest
|
||||
// weight is stored. The distance from any node to itself d[n][n] is 0, unless
|
||||
// the node has a loop with a negative weight. If g has no edge between n1 and
|
||||
// distinct n2, +Inf is stored for d[n1][n2] and d[n2][n1].
|
||||
//
|
||||
// The returned DistanceMatrix is suitable for DistanceMatrix.FloydWarshall.
|
||||
func (l WeightedEdgeList) DistanceMatrix() (d DistanceMatrix) {
|
||||
d = newDM(l.Order)
|
||||
for _, e := range l.Edges {
|
||||
n1 := e.Edge.N1
|
||||
n2 := e.Edge.N2
|
||||
wt := l.WeightFunc(e.LI)
|
||||
// < to pick min of parallel arcs (also nicely ignores NaN)
|
||||
if wt < d[n1][n2] {
|
||||
d[n1][n2] = wt
|
||||
d[n2][n1] = wt
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// A DistanceMatrix is a square matrix representing some distance between
|
||||
// nodes of a graph. If the graph is directected, d[from][to] represents
|
||||
// some distance from node 'from' to node 'to'. Depending on context, the
|
||||
// distance may be an arc weight or path distance. A value of +Inf typically
|
||||
// means no arc or no path between the nodes.
|
||||
type DistanceMatrix [][]float64
|
||||
|
||||
// little helper function, makes a blank distance matrix for FloydWarshall.
|
||||
// could be exported?
|
||||
func newDM(n int) DistanceMatrix {
|
||||
inf := math.Inf(1)
|
||||
d := make(DistanceMatrix, n)
|
||||
for i := range d {
|
||||
di := make([]float64, n)
|
||||
for j := range di {
|
||||
di[j] = inf
|
||||
}
|
||||
di[i] = 0
|
||||
d[i] = di
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// FloydWarshall finds all pairs shortest distances for a weighted graph
|
||||
// without negative cycles.
|
||||
//
|
||||
// It operates on a distance matrix representing arcs of a graph and
|
||||
// destructively replaces arc weights with shortest path distances.
|
||||
//
|
||||
// In receiver d, d[fr][to] will be the shortest distance from node
|
||||
// 'fr' to node 'to'. An element value of +Inf means no path exists.
|
||||
// Any diagonal element < 0 indicates a negative cycle exists.
|
||||
//
|
||||
// See DistanceMatrix constructor methods of LabeledAdjacencyList and
|
||||
// WeightedEdgeList for suitable inputs.
|
||||
func (d DistanceMatrix) FloydWarshall() {
|
||||
for k, dk := range d {
|
||||
for _, di := range d {
|
||||
dik := di[k]
|
||||
for j := range d {
|
||||
if d2 := dik + dk[j]; d2 < di[j] {
|
||||
di[j] = d2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PathMatrix is a return type for FloydWarshallPaths.
|
||||
//
|
||||
// It encodes all pairs shortest paths.
|
||||
type PathMatrix [][]NI
|
||||
|
||||
// Path returns a shortest path from node start to end.
|
||||
//
|
||||
// Argument p is truncated, appended to, and returned as the result.
|
||||
// Thus the underlying allocation is reused if possible.
|
||||
// If there is no path from start to end, p is returned truncated to
|
||||
// zero length.
|
||||
//
|
||||
// If receiver m is not a valid populated PathMatrix as returned by
|
||||
// FloydWarshallPaths, behavior is undefined and a panic is likely.
|
||||
func (m PathMatrix) Path(start, end NI, p []NI) []NI {
|
||||
p = p[:0]
|
||||
for {
|
||||
p = append(p, start)
|
||||
if start == end {
|
||||
return p
|
||||
}
|
||||
start = m[start][end]
|
||||
if start < 0 {
|
||||
return p[:0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FloydWarshallPaths finds all pairs shortest paths for a weighted graph
|
||||
// without negative cycles.
|
||||
//
|
||||
// It operates on a distance matrix representing arcs of a graph and
|
||||
// destructively replaces arc weights with shortest path distances.
|
||||
//
|
||||
// In receiver d, d[fr][to] will be the shortest distance from node
|
||||
// 'fr' to node 'to'. An element value of +Inf means no path exists.
|
||||
// Any diagonal element < 0 indicates a negative cycle exists.
|
||||
//
|
||||
// The return value encodes the paths. See PathMatrix.Path.
|
||||
//
|
||||
// See DistanceMatrix constructor methods of LabeledAdjacencyList and
|
||||
// WeightedEdgeList for suitable inputs.
|
||||
//
|
||||
// See also similar method FloydWarshallFromLists which has a richer
|
||||
// return value.
|
||||
func (d DistanceMatrix) FloydWarshallPaths() PathMatrix {
|
||||
m := make(PathMatrix, len(d))
|
||||
inf := math.Inf(1)
|
||||
for i, di := range d {
|
||||
mi := make([]NI, len(d))
|
||||
for j, dij := range di {
|
||||
if dij == inf {
|
||||
mi[j] = -1
|
||||
} else {
|
||||
mi[j] = NI(j)
|
||||
}
|
||||
}
|
||||
m[i] = mi
|
||||
}
|
||||
for k, dk := range d {
|
||||
for i, di := range d {
|
||||
mi := m[i]
|
||||
dik := di[k]
|
||||
for j := range d {
|
||||
if d2 := dik + dk[j]; d2 < di[j] {
|
||||
di[j] = d2
|
||||
mi[j] = mi[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// FloydWarshallFromLists finds all pairs shortest paths for a weighted
|
||||
// graph without negative cycles.
|
||||
//
|
||||
// It operates on a distance matrix representing arcs of a graph and
|
||||
// destructively replaces arc weights with shortest path distances.
|
||||
//
|
||||
// In receiver d, d[fr][to] will be the shortest distance from node
|
||||
// 'fr' to node 'to'. An element value of +Inf means no path exists.
|
||||
// Any diagonal element < 0 indicates a negative cycle exists.
|
||||
//
|
||||
// The return value encodes the paths. The FromLists are fully populated
|
||||
// with Leaves and Len values. See for example FromList.PathTo for
|
||||
// extracting paths. Note though that for i'th FromList of the return
|
||||
// value, PathTo(j) will return the path from j's root, which will not
|
||||
// be i in the case that there is no path from i to j. You must check
|
||||
// the first node of the path to see if it is i. If not, there is no
|
||||
// path from i to j. See example.
|
||||
//
|
||||
// See DistanceMatrix constructor methods of LabeledAdjacencyList and
|
||||
// WeightedEdgeList for suitable inputs.
|
||||
//
|
||||
// See also similar method FloydWarshallPaths, which has a lighter
|
||||
// weight return value.
|
||||
func (d DistanceMatrix) FloydWarshallFromLists() []FromList {
|
||||
l := make([]FromList, len(d))
|
||||
inf := math.Inf(1)
|
||||
for i, di := range d {
|
||||
li := NewFromList(len(d))
|
||||
p := li.Paths
|
||||
for j, dij := range di {
|
||||
if i == j || dij == inf {
|
||||
p[j] = PathEnd{From: -1}
|
||||
} else {
|
||||
p[j] = PathEnd{From: NI(i)}
|
||||
}
|
||||
}
|
||||
l[i] = li
|
||||
}
|
||||
for k, dk := range d {
|
||||
pk := l[k].Paths
|
||||
for i, di := range d {
|
||||
dik := di[k]
|
||||
pi := l[i].Paths
|
||||
for j := range d {
|
||||
if d2 := dik + dk[j]; d2 < di[j] {
|
||||
di[j] = d2
|
||||
pi[j] = pk[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, li := range l {
|
||||
li.RecalcLeaves()
|
||||
li.RecalcLen()
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// AddEdge adds an edge to a subgraph.
|
||||
//
|
||||
// For argument e, e.N1 and e.N2 must be NIs in supergraph s.Super. As with
|
||||
// AddNode, AddEdge panics if e.N1 and e.N2 are not valid node indexes of
|
||||
// s.Super.
|
||||
//
|
||||
// Edge e must exist in s.Super. Further, the number of
|
||||
// parallel edges in the subgraph cannot exceed the number of corresponding
|
||||
// parallel edges in the supergraph. That is, each edge already added to the
|
||||
// subgraph counts against the edges available in the supergraph. If a matching
|
||||
// edge is not available, AddEdge returns an error.
|
||||
//
|
||||
// If a matching edge is available, subgraph nodes are added as needed, the
|
||||
// subgraph edge is added, and the method returns nil.
|
||||
func (s *UndirectedSubgraph) AddEdge(n1, n2 NI) error {
|
||||
// verify supergraph NIs first, but without adding subgraph nodes just yet.
|
||||
if int(n1) < 0 || int(n1) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddEdge: NI ", n1, " not in supergraph"))
|
||||
}
|
||||
if int(n2) < 0 || int(n2) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddEdge: NI ", n2, " not in supergraph"))
|
||||
}
|
||||
// count existing matching edges in subgraph
|
||||
n := 0
|
||||
a := s.Undirected.AdjacencyList
|
||||
if b1, ok := s.SubNI[n1]; ok {
|
||||
if b2, ok := s.SubNI[n2]; ok {
|
||||
// both NIs already exist in subgraph, need to count edges
|
||||
for _, t := range a[b1] {
|
||||
if t == b2 {
|
||||
n++
|
||||
}
|
||||
}
|
||||
if b1 != b2 {
|
||||
// verify reciprocal arcs exist
|
||||
r := 0
|
||||
for _, t := range a[b2] {
|
||||
if t == b1 {
|
||||
r++
|
||||
}
|
||||
}
|
||||
if r < n {
|
||||
n = r
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// verify matching edges are available in supergraph
|
||||
m := 0
|
||||
for _, t := range (*s.Super).AdjacencyList[n1] {
|
||||
if t == n2 {
|
||||
if m == n {
|
||||
goto r // arc match after all existing arcs matched
|
||||
}
|
||||
m++
|
||||
}
|
||||
}
|
||||
return errors.New("edge not available in supergraph")
|
||||
r:
|
||||
if n1 != n2 {
|
||||
// verify reciprocal arcs
|
||||
m = 0
|
||||
for _, t := range (*s.Super).AdjacencyList[n2] {
|
||||
if t == n1 {
|
||||
if m == n {
|
||||
goto good
|
||||
}
|
||||
m++
|
||||
}
|
||||
}
|
||||
return errors.New("edge not available in supergraph")
|
||||
}
|
||||
good:
|
||||
// matched enough edges. nodes can finally
|
||||
// be added as needed and then the edge can be added.
|
||||
b1 := s.AddNode(n1)
|
||||
b2 := s.AddNode(n2)
|
||||
s.Undirected.AddEdge(b1, b2)
|
||||
return nil // success
|
||||
}
|
||||
|
||||
// AddEdge adds an edge to a subgraph.
|
||||
//
|
||||
// For argument e, e.N1 and e.N2 must be NIs in supergraph s.Super. As with
|
||||
// AddNode, AddEdge panics if e.N1 and e.N2 are not valid node indexes of
|
||||
// s.Super.
|
||||
//
|
||||
// Edge e must exist in s.Super with label l. Further, the number of
|
||||
// parallel edges in the subgraph cannot exceed the number of corresponding
|
||||
// parallel edges in the supergraph. That is, each edge already added to the
|
||||
// subgraph counts against the edges available in the supergraph. If a matching
|
||||
// edge is not available, AddEdge returns an error.
|
||||
//
|
||||
// If a matching edge is available, subgraph nodes are added as needed, the
|
||||
// subgraph edge is added, and the method returns nil.
|
||||
func (s *LabeledUndirectedSubgraph) AddEdge(e Edge, l LI) error {
|
||||
// verify supergraph NIs first, but without adding subgraph nodes just yet.
|
||||
if int(e.N1) < 0 || int(e.N1) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddEdge: NI ", e.N1, " not in supergraph"))
|
||||
}
|
||||
if int(e.N2) < 0 || int(e.N2) >= s.Super.Order() {
|
||||
panic(fmt.Sprint("AddEdge: NI ", e.N2, " not in supergraph"))
|
||||
}
|
||||
// count existing matching edges in subgraph
|
||||
n := 0
|
||||
a := s.LabeledUndirected.LabeledAdjacencyList
|
||||
if b1, ok := s.SubNI[e.N1]; ok {
|
||||
if b2, ok := s.SubNI[e.N2]; ok {
|
||||
// both NIs already exist in subgraph, need to count edges
|
||||
h := Half{b2, l}
|
||||
for _, t := range a[b1] {
|
||||
if t == h {
|
||||
n++
|
||||
}
|
||||
}
|
||||
if b1 != b2 {
|
||||
// verify reciprocal arcs exist
|
||||
r := 0
|
||||
h.To = b1
|
||||
for _, t := range a[b2] {
|
||||
if t == h {
|
||||
r++
|
||||
}
|
||||
}
|
||||
if r < n {
|
||||
n = r
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// verify matching edges are available in supergraph
|
||||
m := 0
|
||||
h := Half{e.N2, l}
|
||||
for _, t := range (*s.Super).LabeledAdjacencyList[e.N1] {
|
||||
if t == h {
|
||||
if m == n {
|
||||
goto r // arc match after all existing arcs matched
|
||||
}
|
||||
m++
|
||||
}
|
||||
}
|
||||
return errors.New("edge not available in supergraph")
|
||||
r:
|
||||
if e.N1 != e.N2 {
|
||||
// verify reciprocal arcs
|
||||
m = 0
|
||||
h.To = e.N1
|
||||
for _, t := range (*s.Super).LabeledAdjacencyList[e.N2] {
|
||||
if t == h {
|
||||
if m == n {
|
||||
goto good
|
||||
}
|
||||
m++
|
||||
}
|
||||
}
|
||||
return errors.New("edge not available in supergraph")
|
||||
}
|
||||
good:
|
||||
// matched enough edges. nodes can finally
|
||||
// be added as needed and then the edge can be added.
|
||||
n1 := s.AddNode(e.N1)
|
||||
n2 := s.AddNode(e.N2)
|
||||
s.LabeledUndirected.AddEdge(Edge{n1, n2}, l)
|
||||
return nil // success
|
||||
}
|
||||
|
||||
// utility function called from all of the InduceList methods.
|
||||
func mapList(l []NI) (sub map[NI]NI, sup []NI) {
|
||||
sub = map[NI]NI{}
|
||||
// one pass to collect unique NIs
|
||||
for _, p := range l {
|
||||
sub[NI(p)] = -1
|
||||
}
|
||||
if len(sub) == len(l) { // NIs in l are unique
|
||||
sup = append([]NI{}, l...) // just copy them
|
||||
for b, p := range l {
|
||||
sub[p] = NI(b) // and fill in map
|
||||
}
|
||||
} else { // NIs in l not unique
|
||||
sup = make([]NI, 0, len(sub))
|
||||
for _, p := range l { // preserve ordering of first occurrences in l
|
||||
if sub[p] < 0 {
|
||||
sub[p] = NI(len(sup))
|
||||
sup = append(sup, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// utility function called from all of the InduceBits methods.
|
||||
func mapBits(t bits.Bits) (sub map[NI]NI, sup []NI) {
|
||||
sup = make([]NI, 0, t.OnesCount())
|
||||
sub = make(map[NI]NI, cap(sup))
|
||||
t.IterateOnes(func(n int) bool {
|
||||
sub[NI(n)] = NI(len(sup))
|
||||
sup = append(sup, NI(n))
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// OrderMap formats maps for testable examples.
|
||||
//
|
||||
// OrderMap provides simple, no-frills formatting of maps in sorted order,
|
||||
// convenient in some cases for output of testable examples.
|
||||
func OrderMap(m interface{}) string {
|
||||
// in particular exclude slices, which template would happily accept but
|
||||
// which would probably represent a coding mistake
|
||||
if reflect.TypeOf(m).Kind() != reflect.Map {
|
||||
panic("not a map")
|
||||
}
|
||||
t := template.Must(template.New("").Parse(
|
||||
`map[{{range $k, $v := .}}{{$k}}:{{$v}} {{end}}]`))
|
||||
var b bytes.Buffer
|
||||
if err := t.Execute(&b, m); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
135
vendor/github.com/soniakeys/graph/hacking.adoc
generated
vendored
135
vendor/github.com/soniakeys/graph/hacking.adoc
generated
vendored
@@ -1,135 +0,0 @@
|
||||
= Hacking
|
||||
|
||||
== Get, install
|
||||
Basic use of the package is just go get, or git clone; go install. There are
|
||||
no dependencies outside the standard library.
|
||||
|
||||
== Build
|
||||
CI is currently on travis-ci.org.
|
||||
|
||||
The build runs go vet with a few exceptions for things I'm not a big fan of.
|
||||
|
||||
https://github.com/client9/misspell has been valuable.
|
||||
|
||||
Also I wrote https://github.com/soniakeys/vetc to validate that each source
|
||||
file has copyright/license statement.
|
||||
|
||||
Then, it’s not in the ci script, but I wrote https://github.com/soniakeys/rcv
|
||||
to put coverage stats in the readme. Maybe it could be commit hook or
|
||||
something but for now I’ll try just running it manually now and then.
|
||||
|
||||
Go fmt is not in the ci script, but I have at least one editor set up to run
|
||||
it on save, so code should stay formatted pretty well.
|
||||
|
||||
== Examples with random output
|
||||
The math/rand generators with constant seeds used to give consistent numbers
|
||||
across Go versions and so some examples relied on this. Sometime after Go 1.9
|
||||
though the numbers changed. The technique for now is to go ahead and write
|
||||
those examples, get them working, then change the `// Output:` line to
|
||||
`// Random output:`. This keeps them showing in go doc but keeps them from
|
||||
being run by go test. This works for now. It might be revisited at some
|
||||
point.
|
||||
|
||||
== Plans
|
||||
The primary to-do list is the issue tracker on Github.
|
||||
|
||||
== Direction, focus, features
|
||||
The project started with no real goal or purpose, just as a place for some code
|
||||
that might be useful. Here are some elements that characterize the direction.
|
||||
|
||||
* The focus has been on algorithms on adjacency lists. That is, adjacency list
|
||||
is the fundamental representation for most implemented algorithms. There are
|
||||
many other interesting representations, many reasons to use them, but
|
||||
adjacency list is common in literature and practice. It has been useful to
|
||||
focus on this data representation, at first anyway.
|
||||
|
||||
* The focus has been on single threaded algorithms. Again, there is much new
|
||||
and interesting work being done with concurrent, parallel, and distributed
|
||||
graph algorithms, and Go might be an excellent language to implement some of
|
||||
these algorithms. But as a preliminary step, more traditional
|
||||
single-threaded algorithms are implemented.
|
||||
|
||||
* The focus has been on static finite graphs. Again there is much interesting
|
||||
work in online algorithms, dynamic graphs, and infinite graphs, but these
|
||||
are not generally considered here.
|
||||
|
||||
* Algorithms selected for implementation are generally ones commonly appearing
|
||||
in beginning graph theory discussions and in general purpose graph libraries
|
||||
in other programming languages. With these as drivers, there's a big risk
|
||||
developing a library of curiosities and academic exercises rather than a
|
||||
library of practical utility. But well, it's a start. The hope is that
|
||||
there are some practical drivers behind graph theory and behind other graph
|
||||
libraries.
|
||||
|
||||
* There is active current research going on in graph algorithm development.
|
||||
One goal for this library is to implement newer and faster algorithms.
|
||||
In some cases where it seems not too much work, older/classic/traditional
|
||||
algorithms may be implemented for comparison. These generally go in the
|
||||
alt subdirectory.
|
||||
|
||||
== General principles
|
||||
* The API is rather low level.
|
||||
|
||||
* Slices instead of maps. Maps are pretty efficient, and the property of
|
||||
unique keys can be useful, But slices are still faster and more efficient,
|
||||
and the unique key property is not always needed or wanted. The Adjacency
|
||||
list implementation of this library is all done in slices. Slices are used
|
||||
in algorithms where possible, in preference to maps. Maps are still used in
|
||||
some cases where uniqueness is needed.
|
||||
|
||||
* Interfaces not generally used. Algorithms are implemented directly on
|
||||
concrete data types and not on interfaces describing the capabilities of
|
||||
the data types. The abstraction of interfaces is a nice match to graph
|
||||
theory and the convenience of running graph algorithms on any type that
|
||||
implements an interface is appealing, but the costs seem too high to me.
|
||||
Slices are rich with capababilites that get hidden behind interfaces and
|
||||
direct slice manipulation is always faster than going through interfaces.
|
||||
An impedance for programs using the library is that they will generally
|
||||
have to implement a mapping from slice indexes to their application data,
|
||||
often including for example, some other form of node ID. This seems fair
|
||||
to push this burden outside the graph library; the library cannot know
|
||||
the needs of this mapping.
|
||||
|
||||
* Bitsets are widely used, particularly to store one bit of information per
|
||||
node of a graph. I used math/big at first but then moved to a dense bitset
|
||||
of my own. Yes, I considered other third-party bitsets but had my own
|
||||
feature set I wanted. A slice of bools is another alternative. Bools will
|
||||
be faster in almost all cases but the bitset will use less memory. I'm
|
||||
chosing size over speed for now.
|
||||
|
||||
* Code generation is used to provide methods that work on both labeled and
|
||||
unlabeled graphs. Code is written to labeled types, then transformations
|
||||
generate the unlabled equivalents.
|
||||
|
||||
* Methods are named for what they return rather than what they do, where
|
||||
reasonable anyway.
|
||||
|
||||
* Consistency in method signature and behavior across corresponding methods,
|
||||
for example directed/undirected, labeled/unlabeled, again, as long as it's
|
||||
reasonable.
|
||||
|
||||
* Sometimes in tension with the consistency principle, methods are lazy about
|
||||
datatypes of parameters and return values. Sometimes a vale might have
|
||||
different reasonable representations, a set might be a bitset, map, slice
|
||||
of bools, or slice of set members for example. Methods will take and return
|
||||
whatever is convenient for them and not convert the form just for consistency
|
||||
or to try to guess what a caller would prefer.
|
||||
|
||||
* Methods return multiple results for whatever the algorithm produces that
|
||||
might be of interest. Sometimes an algorithm will have a primary result but
|
||||
then some secondary values that also might be of interest. If they are
|
||||
already computed as a byproduct of the algorithm, or can be computed at
|
||||
negligible cost, return them.
|
||||
|
||||
* Sometimes in conflict with the multiple result principle, methods will not
|
||||
speculatively compute secondary results if there is any significant cost
|
||||
and if the secondary result can be just as easily computed later.
|
||||
|
||||
== Code Maintenance
|
||||
There are tons of cut and paste variants. There's the basic AdjacencyList,
|
||||
then Directed and Undirected variants, then Labeled variants of each of those.
|
||||
Code gen helps avoid some cut and paste but there's a bunch that doesn't
|
||||
code gen very well and so is duplicated with cut and paste. In particular
|
||||
the testable examples in the _test files don't cg well and so are pretty much
|
||||
all duplicated by hand. If you change code, think about where there should
|
||||
be variants and go look to see if the variants need similar changes.
|
||||
254
vendor/github.com/soniakeys/graph/mst.go
generated
vendored
254
vendor/github.com/soniakeys/graph/mst.go
generated
vendored
@@ -1,254 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sort"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
type dsElement struct {
|
||||
from NI
|
||||
rank int
|
||||
}
|
||||
|
||||
type disjointSet struct {
|
||||
set []dsElement
|
||||
}
|
||||
|
||||
func newDisjointSet(n int) disjointSet {
|
||||
set := make([]dsElement, n)
|
||||
for i := range set {
|
||||
set[i].from = -1
|
||||
}
|
||||
return disjointSet{set}
|
||||
}
|
||||
|
||||
// return true if disjoint trees were combined.
|
||||
// false if x and y were already in the same tree.
|
||||
func (ds disjointSet) union(x, y NI) bool {
|
||||
xr := ds.find(x)
|
||||
yr := ds.find(y)
|
||||
if xr == yr {
|
||||
return false
|
||||
}
|
||||
switch xe, ye := &ds.set[xr], &ds.set[yr]; {
|
||||
case xe.rank < ye.rank:
|
||||
xe.from = yr
|
||||
case xe.rank == ye.rank:
|
||||
xe.rank++
|
||||
fallthrough
|
||||
default:
|
||||
ye.from = xr
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (ds disjointSet) find(n NI) NI {
|
||||
// fast paths for n == root or from root.
|
||||
// no updates need in these cases.
|
||||
s := ds.set
|
||||
fr := s[n].from
|
||||
if fr < 0 { // n is root
|
||||
return n
|
||||
}
|
||||
n, fr = fr, s[fr].from
|
||||
if fr < 0 { // n is from root
|
||||
return n
|
||||
}
|
||||
// otherwise updates needed.
|
||||
// two iterative passes (rather than recursion or stack)
|
||||
// pass 1: find root
|
||||
r := fr
|
||||
for {
|
||||
f := s[r].from
|
||||
if f < 0 {
|
||||
break
|
||||
}
|
||||
r = f
|
||||
}
|
||||
// pass 2: update froms
|
||||
for {
|
||||
s[n].from = r
|
||||
if fr == r {
|
||||
return r
|
||||
}
|
||||
n = fr
|
||||
fr = s[n].from
|
||||
}
|
||||
}
|
||||
|
||||
// Kruskal implements Kruskal's algorithm for constructing a minimum spanning
|
||||
// forest on an undirected graph.
|
||||
//
|
||||
// The forest is returned as an undirected graph.
|
||||
//
|
||||
// Also returned is a total distance for the returned forest.
|
||||
//
|
||||
// This method is a convenience wrapper for LabeledEdgeList.Kruskal.
|
||||
// If you have no need for the input graph as a LabeledUndirected, it may be
|
||||
// more efficient to construct a LabeledEdgeList directly.
|
||||
func (g LabeledUndirected) Kruskal(w WeightFunc) (spanningForest LabeledUndirected, dist float64) {
|
||||
return g.WeightedArcsAsEdges(w).Kruskal()
|
||||
}
|
||||
|
||||
// Kruskal implements Kruskal's algorithm for constructing a minimum spanning
|
||||
// forest on an undirected graph.
|
||||
//
|
||||
// The algorithm allows parallel edges, thus it is acceptable to construct
|
||||
// the receiver with LabeledUndirected.WeightedArcsAsEdges. It may be more
|
||||
// efficient though, if you can construct the receiver WeightedEdgeList
|
||||
// directly without parallel edges.
|
||||
//
|
||||
// The forest is returned as an undirected graph.
|
||||
//
|
||||
// Also returned is a total distance for the returned forest.
|
||||
//
|
||||
// The edge list of the receiver is sorted in place as a side effect of this
|
||||
// method. See KruskalSorted for a version that relies on the edge list being
|
||||
// already sorted. This method is a wrapper for KruskalSorted. If you can
|
||||
// generate the input graph sorted as required for KruskalSorted, you can
|
||||
// call that method directly and avoid the overhead of the sort.
|
||||
func (l WeightedEdgeList) Kruskal() (g LabeledUndirected, dist float64) {
|
||||
e := l.Edges
|
||||
w := l.WeightFunc
|
||||
sort.Slice(e, func(i, j int) bool { return w(e[i].LI) < w(e[j].LI) })
|
||||
return l.KruskalSorted()
|
||||
}
|
||||
|
||||
// KruskalSorted implements Kruskal's algorithm for constructing a minimum
|
||||
// spanning tree on an undirected graph.
|
||||
//
|
||||
// When called, the edge list of the receiver must be already sorted by weight.
|
||||
// See the Kruskal method for a version that accepts an unsorted edge list.
|
||||
// As with Kruskal, parallel edges are allowed.
|
||||
//
|
||||
// The forest is returned as an undirected graph.
|
||||
//
|
||||
// Also returned is a total distance for the returned forest.
|
||||
func (l WeightedEdgeList) KruskalSorted() (g LabeledUndirected, dist float64) {
|
||||
ds := newDisjointSet(l.Order)
|
||||
g.LabeledAdjacencyList = make(LabeledAdjacencyList, l.Order)
|
||||
for _, e := range l.Edges {
|
||||
if ds.union(e.N1, e.N2) {
|
||||
g.AddEdge(Edge{e.N1, e.N2}, e.LI)
|
||||
dist += l.WeightFunc(e.LI)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Prim implements the Jarník-Prim-Dijkstra algorithm for constructing
|
||||
// a minimum spanning tree on an undirected graph.
|
||||
//
|
||||
// Prim computes a minimal spanning tree on the connected component containing
|
||||
// the given start node. The tree is returned in FromList f. Argument f
|
||||
// cannot be a nil pointer although it can point to a zero value FromList.
|
||||
//
|
||||
// If the passed FromList.Paths has the len of g though, it will be reused.
|
||||
// In the case of a graph with multiple connected components, this allows a
|
||||
// spanning forest to be accumulated by calling Prim successively on
|
||||
// representative nodes of the components. In this case if leaves for
|
||||
// individual trees are of interest, pass a non-nil zero-value for the argument
|
||||
// componentLeaves and it will be populated with leaves for the single tree
|
||||
// spanned by the call.
|
||||
//
|
||||
// If argument labels is non-nil, it must have the same length as g and will
|
||||
// be populated with labels corresponding to the edges of the tree.
|
||||
//
|
||||
// Returned are the number of nodes spanned for the single tree (which will be
|
||||
// the order of the connected component) and the total spanned distance for the
|
||||
// single tree.
|
||||
func (g LabeledUndirected) Prim(start NI, w WeightFunc, f *FromList, labels []LI, componentLeaves *bits.Bits) (numSpanned int, dist float64) {
|
||||
al := g.LabeledAdjacencyList
|
||||
if len(f.Paths) != len(al) {
|
||||
*f = NewFromList(len(al))
|
||||
}
|
||||
if f.Leaves.Num != len(al) {
|
||||
f.Leaves = bits.New(len(al))
|
||||
}
|
||||
b := make([]prNode, len(al)) // "best"
|
||||
for n := range b {
|
||||
b[n].nx = NI(n)
|
||||
b[n].fx = -1
|
||||
}
|
||||
rp := f.Paths
|
||||
var frontier prHeap
|
||||
rp[start] = PathEnd{From: -1, Len: 1}
|
||||
numSpanned = 1
|
||||
fLeaves := &f.Leaves
|
||||
fLeaves.SetBit(int(start), 1)
|
||||
if componentLeaves != nil {
|
||||
if componentLeaves.Num != len(al) {
|
||||
*componentLeaves = bits.New(len(al))
|
||||
}
|
||||
componentLeaves.SetBit(int(start), 1)
|
||||
}
|
||||
for a := start; ; {
|
||||
for _, nb := range al[a] {
|
||||
if rp[nb.To].Len > 0 {
|
||||
continue // already in MST, no action
|
||||
}
|
||||
switch bp := &b[nb.To]; {
|
||||
case bp.fx == -1: // new node for frontier
|
||||
bp.from = fromHalf{From: a, Label: nb.Label}
|
||||
bp.wt = w(nb.Label)
|
||||
heap.Push(&frontier, bp)
|
||||
case w(nb.Label) < bp.wt: // better arc
|
||||
bp.from = fromHalf{From: a, Label: nb.Label}
|
||||
bp.wt = w(nb.Label)
|
||||
heap.Fix(&frontier, bp.fx)
|
||||
}
|
||||
}
|
||||
if len(frontier) == 0 {
|
||||
break // done
|
||||
}
|
||||
bp := heap.Pop(&frontier).(*prNode)
|
||||
a = bp.nx
|
||||
rp[a].Len = rp[bp.from.From].Len + 1
|
||||
rp[a].From = bp.from.From
|
||||
if len(labels) != 0 {
|
||||
labels[a] = bp.from.Label
|
||||
}
|
||||
dist += bp.wt
|
||||
fLeaves.SetBit(int(bp.from.From), 0)
|
||||
fLeaves.SetBit(int(a), 1)
|
||||
if componentLeaves != nil {
|
||||
componentLeaves.SetBit(int(bp.from.From), 0)
|
||||
componentLeaves.SetBit(int(a), 1)
|
||||
}
|
||||
numSpanned++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type prNode struct {
|
||||
nx NI
|
||||
from fromHalf
|
||||
wt float64 // p.Weight(from.Label)
|
||||
fx int
|
||||
}
|
||||
|
||||
type prHeap []*prNode
|
||||
|
||||
func (h prHeap) Len() int { return len(h) }
|
||||
func (h prHeap) Less(i, j int) bool { return h[i].wt < h[j].wt }
|
||||
func (h prHeap) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
h[i].fx = i
|
||||
h[j].fx = j
|
||||
}
|
||||
func (p *prHeap) Push(x interface{}) {
|
||||
nd := x.(*prNode)
|
||||
nd.fx = len(*p)
|
||||
*p = append(*p, nd)
|
||||
}
|
||||
func (p *prHeap) Pop() interface{} {
|
||||
r := *p
|
||||
last := len(r) - 1
|
||||
*p = r[:last]
|
||||
return r[last]
|
||||
}
|
||||
708
vendor/github.com/soniakeys/graph/random.go
generated
vendored
708
vendor/github.com/soniakeys/graph/random.go
generated
vendored
@@ -1,708 +0,0 @@
|
||||
// Copyright 2016 Sonia Keys
|
||||
// License MIT: https://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"math/rand"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
// ChungLu constructs a random simple undirected graph.
|
||||
//
|
||||
// The Chung Lu model is similar to a "configuration model" where each
|
||||
// node has a specified degree. In the Chung Lu model the degree specified
|
||||
// for each node is taken as an expected degree, not an exact degree.
|
||||
//
|
||||
// Argument w is "weight," the expected degree for each node.
|
||||
// The values of w must be given in decreasing order.
|
||||
//
|
||||
// The constructed graph will have node 0 with expected degree w[0] and so on
|
||||
// so degree will decrease with node number. To randomize degree across
|
||||
// node numbers, consider using the Permute method with a rand.Perm.
|
||||
//
|
||||
// Also returned is the actual size m of constructed graph g.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
func ChungLu(w []float64, rr *rand.Rand) (g Undirected, m int) {
|
||||
// Ref: "Efficient Generation of Networks with Given Expected Degrees"
|
||||
// Joel C. Miller and Aric Hagberg
|
||||
// accessed at http://aric.hagberg.org/papers/miller-2011-efficient.pdf
|
||||
rf := rand.Float64
|
||||
if rr != nil {
|
||||
rf = rr.Float64
|
||||
}
|
||||
a := make(AdjacencyList, len(w))
|
||||
S := 0.
|
||||
for i := len(w) - 1; i >= 0; i-- {
|
||||
S += w[i]
|
||||
}
|
||||
for u := 0; u < len(w)-1; u++ {
|
||||
v := u + 1
|
||||
p := w[u] * w[v] / S
|
||||
if p > 1 {
|
||||
p = 1
|
||||
}
|
||||
for v < len(w) && p > 0 {
|
||||
if p != 1 {
|
||||
v += int(math.Log(rf()) / math.Log(1-p))
|
||||
}
|
||||
if v < len(w) {
|
||||
q := w[u] * w[v] / S
|
||||
if q > 1 {
|
||||
q = 1
|
||||
}
|
||||
if rf() < q/p {
|
||||
a[u] = append(a[u], NI(v))
|
||||
a[v] = append(a[v], NI(u))
|
||||
m++
|
||||
}
|
||||
p = q
|
||||
v++
|
||||
}
|
||||
}
|
||||
}
|
||||
return Undirected{a}, m
|
||||
}
|
||||
|
||||
// Euclidean generates a random simple graph on the Euclidean plane.
|
||||
//
|
||||
// Nodes are associated with coordinates uniformly distributed on a unit
|
||||
// square. Arcs are added between random nodes with a bias toward connecting
|
||||
// nearer nodes.
|
||||
//
|
||||
// Unfortunately the function has a few "knobs".
|
||||
// The returned graph will have order nNodes and arc size nArcs. The affinity
|
||||
// argument controls the bias toward connecting nearer nodes. The function
|
||||
// selects random pairs of nodes as a candidate arc then rejects the candidate
|
||||
// if the nodes fail an affinity test. Also parallel arcs are rejected.
|
||||
// As more affine or denser graphs are requested, rejections increase,
|
||||
// increasing run time. The patience argument controls the number of arc
|
||||
// rejections allowed before the function gives up and returns an error.
|
||||
// Note that higher affinity will require more patience and that some
|
||||
// combinations of nNodes and nArcs cannot be achieved with any amount of
|
||||
// patience given that the returned graph must be simple.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// Returned is a directed simple graph and associated positions indexed by
|
||||
// node number. In the arc list for each node, to-nodes are in random
|
||||
// order.
|
||||
//
|
||||
// See also LabeledEuclidean.
|
||||
func Euclidean(nNodes, nArcs int, affinity float64, patience int, rr *rand.Rand) (g Directed, pos []struct{ X, Y float64 }, err error) {
|
||||
a := make(AdjacencyList, nNodes) // graph
|
||||
ri, rf, re := rand.Intn, rand.Float64, rand.ExpFloat64
|
||||
if rr != nil {
|
||||
ri, rf, re = rr.Intn, rr.Float64, rr.ExpFloat64
|
||||
}
|
||||
// generate random positions
|
||||
pos = make([]struct{ X, Y float64 }, nNodes)
|
||||
for i := range pos {
|
||||
pos[i].X = rf()
|
||||
pos[i].Y = rf()
|
||||
}
|
||||
// arcs
|
||||
var tooFar, dup int
|
||||
arc:
|
||||
for i := 0; i < nArcs; {
|
||||
if tooFar == nArcs*patience {
|
||||
err = errors.New("affinity not found")
|
||||
return
|
||||
}
|
||||
if dup == nArcs*patience {
|
||||
err = errors.New("overcrowding")
|
||||
return
|
||||
}
|
||||
n1 := NI(ri(nNodes))
|
||||
var n2 NI
|
||||
for {
|
||||
n2 = NI(ri(nNodes))
|
||||
if n2 != n1 { // no graph loops
|
||||
break
|
||||
}
|
||||
}
|
||||
c1 := &pos[n1]
|
||||
c2 := &pos[n2]
|
||||
dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y)
|
||||
if dist*affinity > re() { // favor near nodes
|
||||
tooFar++
|
||||
continue
|
||||
}
|
||||
for _, nb := range a[n1] {
|
||||
if nb == n2 { // no parallel arcs
|
||||
dup++
|
||||
continue arc
|
||||
}
|
||||
}
|
||||
a[n1] = append(a[n1], n2)
|
||||
i++
|
||||
}
|
||||
g = Directed{a}
|
||||
return
|
||||
}
|
||||
|
||||
// LabeledEuclidean generates a random simple graph on the Euclidean plane.
|
||||
//
|
||||
// Arc label values in the returned graph g are indexes into the return value
|
||||
// wt. Wt is the Euclidean distance between the from and to nodes of the arc.
|
||||
//
|
||||
// Otherwise the function arguments and return values are the same as for
|
||||
// function Euclidean. See Euclidean.
|
||||
func LabeledEuclidean(nNodes, nArcs int, affinity float64, patience int, rr *rand.Rand) (g LabeledDirected, pos []struct{ X, Y float64 }, wt []float64, err error) {
|
||||
a := make(LabeledAdjacencyList, nNodes) // graph
|
||||
wt = make([]float64, nArcs) // arc weights
|
||||
ri, rf, re := rand.Intn, rand.Float64, rand.ExpFloat64
|
||||
if rr != nil {
|
||||
ri, rf, re = rr.Intn, rr.Float64, rr.ExpFloat64
|
||||
}
|
||||
// generate random positions
|
||||
pos = make([]struct{ X, Y float64 }, nNodes)
|
||||
for i := range pos {
|
||||
pos[i].X = rf()
|
||||
pos[i].Y = rf()
|
||||
}
|
||||
// arcs
|
||||
var tooFar, dup int
|
||||
arc:
|
||||
for i := 0; i < nArcs; {
|
||||
if tooFar == nArcs*patience {
|
||||
err = errors.New("affinity not found")
|
||||
return
|
||||
}
|
||||
if dup == nArcs*patience {
|
||||
err = errors.New("overcrowding")
|
||||
return
|
||||
}
|
||||
n1 := NI(ri(nNodes))
|
||||
var n2 NI
|
||||
for {
|
||||
n2 = NI(ri(nNodes))
|
||||
if n2 != n1 { // no graph loops
|
||||
break
|
||||
}
|
||||
}
|
||||
c1 := &pos[n1]
|
||||
c2 := &pos[n2]
|
||||
dist := math.Hypot(c2.X-c1.X, c2.Y-c1.Y)
|
||||
if dist*affinity > re() { // favor near nodes
|
||||
tooFar++
|
||||
continue
|
||||
}
|
||||
for _, nb := range a[n1] {
|
||||
if nb.To == n2 { // no parallel arcs
|
||||
dup++
|
||||
continue arc
|
||||
}
|
||||
}
|
||||
wt[i] = dist
|
||||
a[n1] = append(a[n1], Half{n2, LI(i)})
|
||||
i++
|
||||
}
|
||||
g = LabeledDirected{a}
|
||||
return
|
||||
}
|
||||
|
||||
// Geometric generates a random geometric graph (RGG) on the Euclidean plane.
|
||||
//
|
||||
// An RGG is an undirected simple graph. Nodes are associated with coordinates
|
||||
// uniformly distributed on a unit square. Edges are added between all nodes
|
||||
// falling within a specified distance or radius of each other.
|
||||
//
|
||||
// The resulting number of edges is somewhat random but asymptotically
|
||||
// approaches m = πr²n²/2. The method accumulates and returns the actual
|
||||
// number of edges constructed. In the arc list for each node, to-nodes are
|
||||
// ordered. Consider using ShuffleArcLists if random order is important.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// See also LabeledGeometric.
|
||||
func Geometric(nNodes int, radius float64, rr *rand.Rand) (g Undirected, pos []struct{ X, Y float64 }, m int) {
|
||||
// Expected degree is approximately nπr².
|
||||
a := make(AdjacencyList, nNodes)
|
||||
rf := rand.Float64
|
||||
if rr != nil {
|
||||
rf = rr.Float64
|
||||
}
|
||||
pos = make([]struct{ X, Y float64 }, nNodes)
|
||||
for i := range pos {
|
||||
pos[i].X = rf()
|
||||
pos[i].Y = rf()
|
||||
}
|
||||
for u, up := range pos {
|
||||
for v := u + 1; v < len(pos); v++ {
|
||||
vp := pos[v]
|
||||
dx := math.Abs(up.X - vp.X)
|
||||
if dx >= radius {
|
||||
continue
|
||||
}
|
||||
dy := math.Abs(up.Y - vp.Y)
|
||||
if dy >= radius {
|
||||
continue
|
||||
}
|
||||
if math.Hypot(dx, dy) < radius {
|
||||
a[u] = append(a[u], NI(v))
|
||||
a[v] = append(a[v], NI(u))
|
||||
m++
|
||||
}
|
||||
}
|
||||
}
|
||||
g = Undirected{a}
|
||||
return
|
||||
}
|
||||
|
||||
// LabeledGeometric generates a random geometric graph (RGG) on the Euclidean
|
||||
// plane.
|
||||
//
|
||||
// Edge label values in the returned graph g are indexes into the return value
|
||||
// wt. Wt is the Euclidean distance between nodes of the edge. The graph
|
||||
// size m is len(wt).
|
||||
//
|
||||
// See Geometric for additional description.
|
||||
func LabeledGeometric(nNodes int, radius float64, rr *rand.Rand) (g LabeledUndirected, pos []struct{ X, Y float64 }, wt []float64) {
|
||||
a := make(LabeledAdjacencyList, nNodes)
|
||||
rf := rand.Float64
|
||||
if rr != nil {
|
||||
rf = rr.Float64
|
||||
}
|
||||
pos = make([]struct{ X, Y float64 }, nNodes)
|
||||
for i := range pos {
|
||||
pos[i].X = rf()
|
||||
pos[i].Y = rf()
|
||||
}
|
||||
for u, up := range pos {
|
||||
for v := u + 1; v < len(pos); v++ {
|
||||
vp := pos[v]
|
||||
if w := math.Hypot(up.X-vp.X, up.Y-vp.Y); w < radius {
|
||||
a[u] = append(a[u], Half{NI(v), LI(len(wt))})
|
||||
a[v] = append(a[v], Half{NI(u), LI(len(wt))})
|
||||
wt = append(wt, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
g = LabeledUndirected{a}
|
||||
return
|
||||
}
|
||||
|
||||
// GnmUndirected constructs a random simple undirected graph.
|
||||
//
|
||||
// Construction is by the Erdős–Rényi model where the specified number of
|
||||
// distinct edges is selected from all possible edges with equal probability.
|
||||
//
|
||||
// Argument n is number of nodes, m is number of edges and must be <= n(n-1)/2.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// In the generated arc list for each node, to-nodes are ordered.
|
||||
// Consider using ShuffleArcLists if random order is important.
|
||||
//
|
||||
// See also Gnm3Undirected, a method producing a statistically equivalent
|
||||
// result, but by an algorithm with somewhat different performance properties.
|
||||
// Performance of the two methods is expected to be similar in most cases but
|
||||
// it may be worth trying both with your data to see if one has a clear
|
||||
// advantage.
|
||||
func GnmUndirected(n, m int, rr *rand.Rand) Undirected {
|
||||
// based on Alg. 2 from "Efficient Generation of Large Random Networks",
|
||||
// Vladimir Batagelj and Ulrik Brandes.
|
||||
// accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf
|
||||
ri := rand.Intn
|
||||
if rr != nil {
|
||||
ri = rr.Intn
|
||||
}
|
||||
re := n * (n - 1) / 2
|
||||
ml := m
|
||||
if m*2 > re {
|
||||
ml = re - m
|
||||
}
|
||||
e := map[int]struct{}{}
|
||||
for len(e) < ml {
|
||||
e[ri(re)] = struct{}{}
|
||||
}
|
||||
a := make(AdjacencyList, n)
|
||||
if m*2 > re {
|
||||
i := 0
|
||||
for v := 1; v < n; v++ {
|
||||
for w := 0; w < v; w++ {
|
||||
if _, ok := e[i]; !ok {
|
||||
a[v] = append(a[v], NI(w))
|
||||
a[w] = append(a[w], NI(v))
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := range e {
|
||||
v := 1 + int(math.Sqrt(.25+float64(2*i))-.5)
|
||||
w := i - (v * (v - 1) / 2)
|
||||
a[v] = append(a[v], NI(w))
|
||||
a[w] = append(a[w], NI(v))
|
||||
}
|
||||
}
|
||||
return Undirected{a}
|
||||
}
|
||||
|
||||
// GnmDirected constructs a random simple directed graph.
|
||||
//
|
||||
// Construction is by the Erdős–Rényi model where the specified number of
|
||||
// distinct arcs is selected from all possible arcs with equal probability.
|
||||
//
|
||||
// Argument n is number of nodes, ma is number of arcs and must be <= n(n-1).
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// In the generated arc list for each node, to-nodes are ordered.
|
||||
// Consider using ShuffleArcLists if random order is important.
|
||||
//
|
||||
// See also Gnm3Directed, a method producing a statistically equivalent
|
||||
// result, but by
|
||||
// an algorithm with somewhat different performance properties. Performance
|
||||
// of the two methods is expected to be similar in most cases but it may be
|
||||
// worth trying both with your data to see if one has a clear advantage.
|
||||
func GnmDirected(n, ma int, rr *rand.Rand) Directed {
|
||||
// based on Alg. 2 from "Efficient Generation of Large Random Networks",
|
||||
// Vladimir Batagelj and Ulrik Brandes.
|
||||
// accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf
|
||||
ri := rand.Intn
|
||||
if rr != nil {
|
||||
ri = rr.Intn
|
||||
}
|
||||
re := n * (n - 1)
|
||||
ml := ma
|
||||
if ma*2 > re {
|
||||
ml = re - ma
|
||||
}
|
||||
e := map[int]struct{}{}
|
||||
for len(e) < ml {
|
||||
e[ri(re)] = struct{}{}
|
||||
}
|
||||
a := make(AdjacencyList, n)
|
||||
if ma*2 > re {
|
||||
i := 0
|
||||
for v := 0; v < n; v++ {
|
||||
for w := 0; w < n; w++ {
|
||||
if w == v {
|
||||
continue
|
||||
}
|
||||
if _, ok := e[i]; !ok {
|
||||
a[v] = append(a[v], NI(w))
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := range e {
|
||||
v := i / (n - 1)
|
||||
w := i % (n - 1)
|
||||
if w >= v {
|
||||
w++
|
||||
}
|
||||
a[v] = append(a[v], NI(w))
|
||||
}
|
||||
}
|
||||
return Directed{a}
|
||||
}
|
||||
|
||||
// Gnm3Undirected constructs a random simple undirected graph.
|
||||
//
|
||||
// Construction is by the Erdős–Rényi model where the specified number of
|
||||
// distinct edges is selected from all possible edges with equal probability.
|
||||
//
|
||||
// Argument n is number of nodes, m is number of edges and must be <= n(n-1)/2.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// In the generated arc list for each node, to-nodes are ordered.
|
||||
// Consider using ShuffleArcLists if random order is important.
|
||||
//
|
||||
// See also GnmUndirected, a method producing a statistically equivalent
|
||||
// result, but by an algorithm with somewhat different performance properties.
|
||||
// Performance of the two methods is expected to be similar in most cases but
|
||||
// it may be worth trying both with your data to see if one has a clear
|
||||
// advantage.
|
||||
func Gnm3Undirected(n, m int, rr *rand.Rand) Undirected {
|
||||
// based on Alg. 3 from "Efficient Generation of Large Random Networks",
|
||||
// Vladimir Batagelj and Ulrik Brandes.
|
||||
// accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf
|
||||
//
|
||||
// I like this algorithm for its elegance. Pitty it tends to run a
|
||||
// a little slower than the retry algorithm of Gnm.
|
||||
ri := rand.Intn
|
||||
if rr != nil {
|
||||
ri = rr.Intn
|
||||
}
|
||||
a := make(AdjacencyList, n)
|
||||
re := n * (n - 1) / 2
|
||||
rm := map[int]int{}
|
||||
for i := 0; i < m; i++ {
|
||||
er := i + ri(re-i)
|
||||
eNew := er
|
||||
if rp, ok := rm[er]; ok {
|
||||
eNew = rp
|
||||
}
|
||||
if rp, ok := rm[i]; !ok {
|
||||
rm[er] = i
|
||||
} else {
|
||||
rm[er] = rp
|
||||
}
|
||||
v := 1 + int(math.Sqrt(.25+float64(2*eNew))-.5)
|
||||
w := eNew - (v * (v - 1) / 2)
|
||||
a[v] = append(a[v], NI(w))
|
||||
a[w] = append(a[w], NI(v))
|
||||
}
|
||||
return Undirected{a}
|
||||
}
|
||||
|
||||
// Gnm3Directed constructs a random simple directed graph.
|
||||
//
|
||||
// Construction is by the Erdős–Rényi model where the specified number of
|
||||
// distinct arcs is selected from all possible arcs with equal probability.
|
||||
//
|
||||
// Argument n is number of nodes, ma is number of arcs and must be <= n(n-1).
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// In the generated arc list for each node, to-nodes are ordered.
|
||||
// Consider using ShuffleArcLists if random order is important.
|
||||
//
|
||||
// See also GnmDirected, a method producing a statistically equivalent result,
|
||||
// but by an algorithm with somewhat different performance properties.
|
||||
// Performance of the two methods is expected to be similar in most cases
|
||||
// but it may be worth trying both with your data to see if one has a clear
|
||||
// advantage.
|
||||
func Gnm3Directed(n, ma int, rr *rand.Rand) Directed {
|
||||
// based on Alg. 3 from "Efficient Generation of Large Random Networks",
|
||||
// Vladimir Batagelj and Ulrik Brandes.
|
||||
// accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf
|
||||
ri := rand.Intn
|
||||
if rr != nil {
|
||||
ri = rr.Intn
|
||||
}
|
||||
a := make(AdjacencyList, n)
|
||||
re := n * (n - 1)
|
||||
rm := map[int]int{}
|
||||
for i := 0; i < ma; i++ {
|
||||
er := i + ri(re-i)
|
||||
eNew := er
|
||||
if rp, ok := rm[er]; ok {
|
||||
eNew = rp
|
||||
}
|
||||
if rp, ok := rm[i]; !ok {
|
||||
rm[er] = i
|
||||
} else {
|
||||
rm[er] = rp
|
||||
}
|
||||
v := eNew / (n - 1)
|
||||
w := eNew % (n - 1)
|
||||
if w >= v {
|
||||
w++
|
||||
}
|
||||
a[v] = append(a[v], NI(w))
|
||||
}
|
||||
return Directed{a}
|
||||
}
|
||||
|
||||
// GnpUndirected constructs a random simple undirected graph.
|
||||
//
|
||||
// Construction is by the Gilbert model, an Erdős–Rényi like model where
|
||||
// distinct edges are independently selected from all possible edges with
|
||||
// the specified probability.
|
||||
//
|
||||
// Argument n is number of nodes, p is probability for selecting an edge.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// In the generated arc list for each node, to-nodes are ordered.
|
||||
// Consider using ShuffleArcLists if random order is important.
|
||||
//
|
||||
// Also returned is the actual size m of constructed graph g.
|
||||
func GnpUndirected(n int, p float64, rr *rand.Rand) (g Undirected, m int) {
|
||||
a := make(AdjacencyList, n)
|
||||
if n < 2 {
|
||||
return Undirected{a}, 0
|
||||
}
|
||||
rf := rand.Float64
|
||||
if rr != nil {
|
||||
rf = rr.Float64
|
||||
}
|
||||
// based on Alg. 1 from "Efficient Generation of Large Random Networks",
|
||||
// Vladimir Batagelj and Ulrik Brandes.
|
||||
// accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf
|
||||
var v, w NI = 1, -1
|
||||
g:
|
||||
for c := 1 / math.Log(1-p); ; {
|
||||
w += 1 + NI(c*math.Log(1-rf()))
|
||||
for {
|
||||
if w < v {
|
||||
a[v] = append(a[v], w)
|
||||
a[w] = append(a[w], v)
|
||||
m++
|
||||
continue g
|
||||
}
|
||||
w -= v
|
||||
v++
|
||||
if v == NI(n) {
|
||||
break g
|
||||
}
|
||||
}
|
||||
}
|
||||
return Undirected{a}, m
|
||||
}
|
||||
|
||||
// GnpDirected constructs a random simple directed graph.
|
||||
//
|
||||
// Construction is by the Gilbert model, an Erdős–Rényi like model where
|
||||
// distinct arcs are independently selected from all possible arcs with
|
||||
// the specified probability.
|
||||
//
|
||||
// Argument n is number of nodes, p is probability for selecting an arc.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// In the generated arc list for each node, to-nodes are ordered.
|
||||
// Consider using ShuffleArcLists if random order is important.
|
||||
//
|
||||
// Also returned is the actual arc size m of constructed graph g.
|
||||
func GnpDirected(n int, p float64, rr *rand.Rand) (g Directed, ma int) {
|
||||
a := make(AdjacencyList, n)
|
||||
if n < 2 {
|
||||
return Directed{a}, 0
|
||||
}
|
||||
rf := rand.Float64
|
||||
if rr != nil {
|
||||
rf = rr.Float64
|
||||
}
|
||||
// based on Alg. 1 from "Efficient Generation of Large Random Networks",
|
||||
// Vladimir Batagelj and Ulrik Brandes.
|
||||
// accessed at http://algo.uni-konstanz.de/publications/bb-eglrn-05.pdf
|
||||
var v, w NI = 0, -1
|
||||
g:
|
||||
for c := 1 / math.Log(1-p); ; {
|
||||
w += 1 + NI(c*math.Log(1-rf()))
|
||||
for ; ; w -= NI(n) {
|
||||
if w == v {
|
||||
w++
|
||||
}
|
||||
if w < NI(n) {
|
||||
a[v] = append(a[v], w)
|
||||
ma++
|
||||
continue g
|
||||
}
|
||||
v++
|
||||
if v == NI(n) {
|
||||
break g
|
||||
}
|
||||
}
|
||||
}
|
||||
return Directed{a}, ma
|
||||
}
|
||||
|
||||
// KroneckerDirected generates a Kronecker-like random directed graph.
|
||||
//
|
||||
// The returned graph g is simple and has no isolated nodes but is not
|
||||
// necessarily fully connected. The number of of nodes will be <= 2^scale,
|
||||
// and will be near 2^scale for typical values of arcFactor, >= 2.
|
||||
// ArcFactor * 2^scale arcs are generated, although loops and duplicate arcs
|
||||
// are rejected. In the arc list for each node, to-nodes are in random
|
||||
// order.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// Return value ma is the number of arcs retained in the result graph.
|
||||
func KroneckerDirected(scale uint, arcFactor float64, rr *rand.Rand) (g Directed, ma int) {
|
||||
a, m := kronecker(scale, arcFactor, true, rr)
|
||||
return Directed{a}, m
|
||||
}
|
||||
|
||||
// KroneckerUndirected generates a Kronecker-like random undirected graph.
|
||||
//
|
||||
// The returned graph g is simple and has no isolated nodes but is not
|
||||
// necessarily fully connected. The number of of nodes will be <= 2^scale,
|
||||
// and will be near 2^scale for typical values of edgeFactor, >= 2.
|
||||
// EdgeFactor * 2^scale edges are generated, although loops and duplicate edges
|
||||
// are rejected. In the arc list for each node, to-nodes are in random
|
||||
// order.
|
||||
//
|
||||
// If Rand r is nil, the rand package default shared source is used.
|
||||
//
|
||||
// Return value m is the true number of edges--not arcs--retained in the result
|
||||
// graph.
|
||||
func KroneckerUndirected(scale uint, edgeFactor float64, rr *rand.Rand) (g Undirected, m int) {
|
||||
al, s := kronecker(scale, edgeFactor, false, rr)
|
||||
return Undirected{al}, s
|
||||
}
|
||||
|
||||
// Styled after the Graph500 example code. Not well tested currently.
|
||||
// Graph500 example generates undirected only. No idea if the directed variant
|
||||
// here is meaningful or not.
|
||||
//
|
||||
// note mma returns arc size ma for dir=true, but returns size m for dir=false
|
||||
func kronecker(scale uint, edgeFactor float64, dir bool, rr *rand.Rand) (g AdjacencyList, mma int) {
|
||||
rf, ri, rp := rand.Float64, rand.Intn, rand.Perm
|
||||
if rr != nil {
|
||||
rf, ri, rp = rr.Float64, rr.Intn, rr.Perm
|
||||
}
|
||||
N := 1 << scale // node extent
|
||||
M := int(edgeFactor*float64(N) + .5) // number of arcs/edges to generate
|
||||
a, b, c := 0.57, 0.19, 0.19 // initiator probabilities
|
||||
ab := a + b
|
||||
cNorm := c / (1 - ab)
|
||||
aNorm := a / ab
|
||||
ij := make([][2]NI, M)
|
||||
bm := bits.New(N)
|
||||
var nNodes int
|
||||
for k := range ij {
|
||||
var i, j int
|
||||
for b := 1; b < N; b <<= 1 {
|
||||
if rf() > ab {
|
||||
i |= b
|
||||
if rf() > cNorm {
|
||||
j |= b
|
||||
}
|
||||
} else if rf() > aNorm {
|
||||
j |= b
|
||||
}
|
||||
}
|
||||
if bm.Bit(i) == 0 {
|
||||
bm.SetBit(i, 1)
|
||||
nNodes++
|
||||
}
|
||||
if bm.Bit(j) == 0 {
|
||||
bm.SetBit(j, 1)
|
||||
nNodes++
|
||||
}
|
||||
r := ri(k + 1) // shuffle edges as they are generated
|
||||
ij[k] = ij[r]
|
||||
ij[r] = [2]NI{NI(i), NI(j)}
|
||||
}
|
||||
p := rp(nNodes) // mapping to shuffle IDs of non-isolated nodes
|
||||
px := 0
|
||||
rn := make([]NI, N)
|
||||
for i := range rn {
|
||||
if bm.Bit(i) == 1 {
|
||||
rn[i] = NI(p[px]) // fill lookup table
|
||||
px++
|
||||
}
|
||||
}
|
||||
g = make(AdjacencyList, nNodes)
|
||||
ij:
|
||||
for _, e := range ij {
|
||||
if e[0] == e[1] {
|
||||
continue // skip loops
|
||||
}
|
||||
ri, rj := rn[e[0]], rn[e[1]]
|
||||
for _, nb := range g[ri] {
|
||||
if nb == rj {
|
||||
continue ij // skip parallel edges
|
||||
}
|
||||
}
|
||||
g[ri] = append(g[ri], rj)
|
||||
mma++
|
||||
if !dir {
|
||||
g[rj] = append(g[rj], ri)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
50
vendor/github.com/soniakeys/graph/readme.adoc
generated
vendored
50
vendor/github.com/soniakeys/graph/readme.adoc
generated
vendored
@@ -1,50 +0,0 @@
|
||||
= Graph
|
||||
|
||||
A graph library with goals of speed and simplicity, Graph implements
|
||||
graph algorithms on graphs of zero-based integer node IDs.
|
||||
|
||||
image:https://godoc.org/github.com/soniakeys/graph?status.svg[link=https://godoc.org/github.com/soniakeys/graph]
|
||||
image:http://gowalker.org/api/v1/badge[link=https://gowalker.org/github.com/soniakeys/graph]
|
||||
image:http://go-search.org/badge?id=github.com%2Fsoniakeys%2Fgraph[link=http://go-search.org/view?id=github.com%2Fsoniakeys%2Fgraph]
|
||||
image:https://travis-ci.org/soniakeys/graph.svg?branch=master[link=https://travis-ci.org/soniakeys/graph]
|
||||
|
||||
The library provides efficient graph representations and many methods on
|
||||
graph types. It can be imported and used directly in many applications that
|
||||
require or can benefit from graph algorithms.
|
||||
|
||||
The library should also be considered as library of source code that can serve
|
||||
as starting material for coding variant or more complex algorithms.
|
||||
|
||||
== Ancillary material of interest
|
||||
|
||||
The directory link:tutorials[tutorials] is a work in progress - there are only
|
||||
a few tutorials there yet - but the concept is to provide some topical
|
||||
walk-throughs to supplement godoc. The source-based godoc documentation
|
||||
remains the primary documentation.
|
||||
|
||||
The directory link:anecdote[anecdote] contains a stand-alone program that
|
||||
performs single runs of a number of methods, collecting one-off or "anecdotal"
|
||||
timings. It currently runs only a small fraction of the library methods but
|
||||
may still be of interest for giving a general idea of how fast some methods
|
||||
run.
|
||||
|
||||
The directory link:bench[bench] is another work in progress. The concept is
|
||||
to present some plots showing benchmark performance approaching some
|
||||
theoretical asymptote.
|
||||
|
||||
link:hacking.adoc[hacking.adoc] has some information about how the library is
|
||||
developed, built, and tested. It might be of interest if for example you
|
||||
plan to fork or contribute to the the repository.
|
||||
|
||||
== Test coverage
|
||||
1 Jul 2017
|
||||
....
|
||||
graph 93.7%
|
||||
graph/alt 88.0%
|
||||
graph/dot 77.7%
|
||||
graph/treevis 79.4%
|
||||
....
|
||||
|
||||
== License
|
||||
All files in the repository are licensed with the MIT License,
|
||||
https://opensource.org/licenses/MIT.
|
||||
761
vendor/github.com/soniakeys/graph/sssp.go
generated
vendored
761
vendor/github.com/soniakeys/graph/sssp.go
generated
vendored
@@ -1,761 +0,0 @@
|
||||
// Copyright 2013 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
// rNode holds data for a "reached" node
|
||||
type rNode struct {
|
||||
nx NI
|
||||
state int8 // state constants defined below
|
||||
f float64 // "g+h", path dist + heuristic estimate
|
||||
fx int // heap.Fix index
|
||||
}
|
||||
|
||||
// for rNode.state
|
||||
const (
|
||||
unreached = 0
|
||||
reached = 1
|
||||
open = 1
|
||||
closed = 2
|
||||
)
|
||||
|
||||
type openHeap []*rNode
|
||||
|
||||
// A Heuristic is defined on a specific end node. The function
|
||||
// returns an estimate of the path distance from node argument
|
||||
// "from" to the end node. Two subclasses of heuristics are "admissible"
|
||||
// and "monotonic."
|
||||
//
|
||||
// Admissible means the value returned is guaranteed to be less than or
|
||||
// equal to the actual shortest path distance from the node to end.
|
||||
//
|
||||
// An admissible estimate may further be monotonic.
|
||||
// Monotonic means that for any neighboring nodes A and B with half arc aB
|
||||
// leading from A to B, and for heuristic h defined on some end node, then
|
||||
// h(A) <= aB.ArcWeight + h(B).
|
||||
//
|
||||
// See AStarA for additional notes on implementing heuristic functions for
|
||||
// AStar search methods.
|
||||
type Heuristic func(from NI) float64
|
||||
|
||||
// Admissible returns true if heuristic h is admissible on graph g relative to
|
||||
// the given end node.
|
||||
//
|
||||
// If h is inadmissible, the string result describes a counter example.
|
||||
func (h Heuristic) Admissible(g LabeledAdjacencyList, w WeightFunc, end NI) (bool, string) {
|
||||
// invert graph
|
||||
inv := make(LabeledAdjacencyList, len(g))
|
||||
for from, nbs := range g {
|
||||
for _, nb := range nbs {
|
||||
inv[nb.To] = append(inv[nb.To],
|
||||
Half{To: NI(from), Label: nb.Label})
|
||||
}
|
||||
}
|
||||
// run dijkstra
|
||||
// Dijkstra.AllPaths takes a start node but after inverting the graph
|
||||
// argument end now represents the start node of the inverted graph.
|
||||
f, _, dist, _ := inv.Dijkstra(end, -1, w)
|
||||
// compare h to found shortest paths
|
||||
for n := range inv {
|
||||
if f.Paths[n].Len == 0 {
|
||||
continue // no path, any heuristic estimate is fine.
|
||||
}
|
||||
if !(h(NI(n)) <= dist[n]) {
|
||||
return false, fmt.Sprintf("h(%d) = %g, "+
|
||||
"required to be <= found shortest path (%g)",
|
||||
n, h(NI(n)), dist[n])
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
|
||||
// Monotonic returns true if heuristic h is monotonic on weighted graph g.
|
||||
//
|
||||
// If h is non-monotonic, the string result describes a counter example.
|
||||
func (h Heuristic) Monotonic(g LabeledAdjacencyList, w WeightFunc) (bool, string) {
|
||||
// precompute
|
||||
hv := make([]float64, len(g))
|
||||
for n := range g {
|
||||
hv[n] = h(NI(n))
|
||||
}
|
||||
// iterate over all edges
|
||||
for from, nbs := range g {
|
||||
for _, nb := range nbs {
|
||||
arcWeight := w(nb.Label)
|
||||
if !(hv[from] <= arcWeight+hv[nb.To]) {
|
||||
return false, fmt.Sprintf("h(%d) = %g, "+
|
||||
"required to be <= arc weight + h(%d) (= %g + %g = %g)",
|
||||
from, hv[from],
|
||||
nb.To, arcWeight, hv[nb.To], arcWeight+hv[nb.To])
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
|
||||
// AStarA finds a path between two nodes.
|
||||
//
|
||||
// AStarA implements both algorithm A and algorithm A*. The difference in the
|
||||
// two algorithms is strictly in the heuristic estimate returned by argument h.
|
||||
// If h is an "admissible" heuristic estimate, then the algorithm is termed A*,
|
||||
// otherwise it is algorithm A.
|
||||
//
|
||||
// Like Dijkstra's algorithm, AStarA with an admissible heuristic finds the
|
||||
// shortest path between start and end. AStarA generally runs faster than
|
||||
// Dijkstra though, by using the heuristic distance estimate.
|
||||
//
|
||||
// AStarA with an inadmissible heuristic becomes algorithm A. Algorithm A
|
||||
// will find a path, but it is not guaranteed to be the shortest path.
|
||||
// The heuristic still guides the search however, so a nearly admissible
|
||||
// heuristic is likely to find a very good path, if not the best. Quality
|
||||
// of the path returned degrades gracefully with the quality of the heuristic.
|
||||
//
|
||||
// The heuristic function h should ideally be fairly inexpensive. AStarA
|
||||
// may call it more than once for the same node, especially as graph density
|
||||
// increases. In some cases it may be worth the effort to memoize or
|
||||
// precompute values.
|
||||
//
|
||||
// Argument g is the graph to be searched, with arc weights returned by w.
|
||||
// As usual for AStar, arc weights must be non-negative.
|
||||
// Graphs may be directed or undirected.
|
||||
//
|
||||
// If AStarA finds a path it returns a FromList encoding the path, the arc
|
||||
// labels for path nodes, the total path distance, and ok = true.
|
||||
// Otherwise it returns ok = false.
|
||||
func (g LabeledAdjacencyList) AStarA(w WeightFunc, start, end NI, h Heuristic) (f FromList, labels []LI, dist float64, ok bool) {
|
||||
// NOTE: AStarM is largely duplicate code.
|
||||
|
||||
f = NewFromList(len(g))
|
||||
labels = make([]LI, len(g))
|
||||
d := make([]float64, len(g))
|
||||
r := make([]rNode, len(g))
|
||||
for i := range r {
|
||||
r[i].nx = NI(i)
|
||||
}
|
||||
// start node is reached initially
|
||||
cr := &r[start]
|
||||
cr.state = reached
|
||||
cr.f = h(start) // total path estimate is estimate from start
|
||||
rp := f.Paths
|
||||
rp[start] = PathEnd{Len: 1, From: -1} // path length at start is 1 node
|
||||
// oh is a heap of nodes "open" for exploration. nodes go on the heap
|
||||
// when they get an initial or new "g" path distance, and therefore a
|
||||
// new "f" which serves as priority for exploration.
|
||||
oh := openHeap{cr}
|
||||
for len(oh) > 0 {
|
||||
bestPath := heap.Pop(&oh).(*rNode)
|
||||
bestNode := bestPath.nx
|
||||
if bestNode == end {
|
||||
return f, labels, d[end], true
|
||||
}
|
||||
bp := &rp[bestNode]
|
||||
nextLen := bp.Len + 1
|
||||
for _, nb := range g[bestNode] {
|
||||
alt := &r[nb.To]
|
||||
ap := &rp[alt.nx]
|
||||
// "g" path distance from start
|
||||
g := d[bestNode] + w(nb.Label)
|
||||
if alt.state == reached {
|
||||
if g > d[nb.To] {
|
||||
// candidate path to nb is longer than some alternate path
|
||||
continue
|
||||
}
|
||||
if g == d[nb.To] && nextLen >= ap.Len {
|
||||
// candidate path has identical length of some alternate
|
||||
// path but it takes no fewer hops.
|
||||
continue
|
||||
}
|
||||
// cool, we found a better way to get to this node.
|
||||
// record new path data for this node and
|
||||
// update alt with new data and make sure it's on the heap.
|
||||
*ap = PathEnd{From: bestNode, Len: nextLen}
|
||||
labels[nb.To] = nb.Label
|
||||
d[nb.To] = g
|
||||
alt.f = g + h(nb.To)
|
||||
if alt.fx < 0 {
|
||||
heap.Push(&oh, alt)
|
||||
} else {
|
||||
heap.Fix(&oh, alt.fx)
|
||||
}
|
||||
} else {
|
||||
// bestNode being reached for the first time.
|
||||
*ap = PathEnd{From: bestNode, Len: nextLen}
|
||||
labels[nb.To] = nb.Label
|
||||
d[nb.To] = g
|
||||
alt.f = g + h(nb.To)
|
||||
alt.state = reached
|
||||
heap.Push(&oh, alt) // and it's now open for exploration
|
||||
}
|
||||
}
|
||||
}
|
||||
return // no path
|
||||
}
|
||||
|
||||
// AStarAPath finds a shortest path using the AStarA algorithm.
|
||||
//
|
||||
// This is a convenience method with a simpler result than the AStarA method.
|
||||
// See documentation on the AStarA method.
|
||||
//
|
||||
// If a path is found, the non-nil node path is returned with the total path
|
||||
// distance. Otherwise the returned path will be nil.
|
||||
func (g LabeledAdjacencyList) AStarAPath(start, end NI, h Heuristic, w WeightFunc) (LabeledPath, float64) {
|
||||
f, labels, d, _ := g.AStarA(w, start, end, h)
|
||||
return f.PathToLabeled(end, labels, nil), d
|
||||
}
|
||||
|
||||
// AStarM is AStarA optimized for monotonic heuristic estimates.
|
||||
//
|
||||
// Note that this function requires a monotonic heuristic. Results will
|
||||
// not be meaningful if argument h is non-monotonic.
|
||||
//
|
||||
// See AStarA for general usage. See Heuristic for notes on monotonicity.
|
||||
func (g LabeledAdjacencyList) AStarM(w WeightFunc, start, end NI, h Heuristic) (f FromList, labels []LI, dist float64, ok bool) {
|
||||
// NOTE: AStarM is largely code duplicated from AStarA.
|
||||
// Differences are noted in comments in this method.
|
||||
|
||||
f = NewFromList(len(g))
|
||||
labels = make([]LI, len(g))
|
||||
d := make([]float64, len(g))
|
||||
r := make([]rNode, len(g))
|
||||
for i := range r {
|
||||
r[i].nx = NI(i)
|
||||
}
|
||||
cr := &r[start]
|
||||
|
||||
// difference from AStarA:
|
||||
// instead of a bit to mark a reached node, there are two states,
|
||||
// open and closed. open marks nodes "open" for exploration.
|
||||
// nodes are marked open as they are reached, then marked
|
||||
// closed as they are found to be on the best path.
|
||||
cr.state = open
|
||||
|
||||
cr.f = h(start)
|
||||
rp := f.Paths
|
||||
rp[start] = PathEnd{Len: 1, From: -1}
|
||||
oh := openHeap{cr}
|
||||
for len(oh) > 0 {
|
||||
bestPath := heap.Pop(&oh).(*rNode)
|
||||
bestNode := bestPath.nx
|
||||
if bestNode == end {
|
||||
return f, labels, d[end], true
|
||||
}
|
||||
|
||||
// difference from AStarA:
|
||||
// move nodes to closed list as they are found to be best so far.
|
||||
bestPath.state = closed
|
||||
|
||||
bp := &rp[bestNode]
|
||||
nextLen := bp.Len + 1
|
||||
for _, nb := range g[bestNode] {
|
||||
alt := &r[nb.To]
|
||||
|
||||
// difference from AStarA:
|
||||
// Monotonicity means that f cannot be improved.
|
||||
if alt.state == closed {
|
||||
continue
|
||||
}
|
||||
|
||||
ap := &rp[alt.nx]
|
||||
g := d[bestNode] + w(nb.Label)
|
||||
|
||||
// difference from AStarA:
|
||||
// test for open state, not just reached
|
||||
if alt.state == open {
|
||||
|
||||
if g > d[nb.To] {
|
||||
continue
|
||||
}
|
||||
if g == d[nb.To] && nextLen >= ap.Len {
|
||||
continue
|
||||
}
|
||||
*ap = PathEnd{From: bestNode, Len: nextLen}
|
||||
labels[nb.To] = nb.Label
|
||||
d[nb.To] = g
|
||||
alt.f = g + h(nb.To)
|
||||
|
||||
// difference from AStarA:
|
||||
// we know alt was on the heap because we found it marked open
|
||||
heap.Fix(&oh, alt.fx)
|
||||
} else {
|
||||
*ap = PathEnd{From: bestNode, Len: nextLen}
|
||||
labels[nb.To] = nb.Label
|
||||
d[nb.To] = g
|
||||
alt.f = g + h(nb.To)
|
||||
|
||||
// difference from AStarA:
|
||||
// nodes are opened when first reached
|
||||
alt.state = open
|
||||
heap.Push(&oh, alt)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AStarMPath finds a shortest path using the AStarM algorithm.
|
||||
//
|
||||
// This is a convenience method with a simpler result than the AStarM method.
|
||||
// See documentation on the AStarM and AStarA methods.
|
||||
//
|
||||
// If a path is found, the non-nil node path is returned with the total path
|
||||
// distance. Otherwise the returned path will be nil.
|
||||
func (g LabeledAdjacencyList) AStarMPath(start, end NI, h Heuristic, w WeightFunc) (LabeledPath, float64) {
|
||||
f, labels, d, _ := g.AStarM(w, start, end, h)
|
||||
return f.PathToLabeled(end, labels, nil), d
|
||||
}
|
||||
|
||||
// implement container/heap
|
||||
func (h openHeap) Len() int { return len(h) }
|
||||
func (h openHeap) Less(i, j int) bool { return h[i].f < h[j].f }
|
||||
func (h openHeap) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
h[i].fx = i
|
||||
h[j].fx = j
|
||||
}
|
||||
func (p *openHeap) Push(x interface{}) {
|
||||
h := *p
|
||||
fx := len(h)
|
||||
h = append(h, x.(*rNode))
|
||||
h[fx].fx = fx
|
||||
*p = h
|
||||
}
|
||||
|
||||
func (p *openHeap) Pop() interface{} {
|
||||
h := *p
|
||||
last := len(h) - 1
|
||||
*p = h[:last]
|
||||
h[last].fx = -1
|
||||
return h[last]
|
||||
}
|
||||
|
||||
// BellmanFord finds shortest paths from a start node in a weighted directed
|
||||
// graph using the Bellman-Ford-Moore algorithm.
|
||||
//
|
||||
// WeightFunc w must translate arc labels to arc weights.
|
||||
// Negative arc weights are allowed but not negative cycles.
|
||||
// Loops and parallel arcs are allowed.
|
||||
//
|
||||
// If the algorithm completes without encountering a negative cycle the method
|
||||
// returns shortest paths encoded in a FromList, labels and path distances
|
||||
// indexed by node, and return value end = -1.
|
||||
//
|
||||
// If it encounters a negative cycle reachable from start it returns end >= 0.
|
||||
// In this case the cycle can be obtained by calling f.BellmanFordCycle(end).
|
||||
//
|
||||
// Negative cycles are only detected when reachable from start. A negative
|
||||
// cycle not reachable from start will not prevent the algorithm from finding
|
||||
// shortest paths from start.
|
||||
//
|
||||
// See also NegativeCycle to find a cycle anywhere in the graph, see
|
||||
// NegativeCycles for enumerating all negative cycles, and see
|
||||
// HasNegativeCycle for lighter-weight negative cycle detection,
|
||||
func (g LabeledDirected) BellmanFord(w WeightFunc, start NI) (f FromList, labels []LI, dist []float64, end NI) {
|
||||
a := g.LabeledAdjacencyList
|
||||
f = NewFromList(len(a))
|
||||
labels = make([]LI, len(a))
|
||||
dist = make([]float64, len(a))
|
||||
inf := math.Inf(1)
|
||||
for i := range dist {
|
||||
dist[i] = inf
|
||||
}
|
||||
rp := f.Paths
|
||||
rp[start] = PathEnd{Len: 1, From: -1}
|
||||
dist[start] = 0
|
||||
for _ = range a[1:] {
|
||||
imp := false
|
||||
for from, nbs := range a {
|
||||
fp := &rp[from]
|
||||
d1 := dist[from]
|
||||
for _, nb := range nbs {
|
||||
d2 := d1 + w(nb.Label)
|
||||
to := &rp[nb.To]
|
||||
// TODO improve to break ties
|
||||
if fp.Len > 0 && d2 < dist[nb.To] {
|
||||
*to = PathEnd{From: NI(from), Len: fp.Len + 1}
|
||||
labels[nb.To] = nb.Label
|
||||
dist[nb.To] = d2
|
||||
imp = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !imp {
|
||||
break
|
||||
}
|
||||
}
|
||||
for from, nbs := range a {
|
||||
d1 := dist[from]
|
||||
for _, nb := range nbs {
|
||||
if d1+w(nb.Label) < dist[nb.To] {
|
||||
// return nb as end of a path with negative cycle at root
|
||||
return f, labels, dist, NI(from)
|
||||
}
|
||||
}
|
||||
}
|
||||
return f, labels, dist, -1
|
||||
}
|
||||
|
||||
// BellmanFordCycle decodes a negative cycle detected by BellmanFord.
|
||||
//
|
||||
// Receiver f and argument end must be results returned from BellmanFord.
|
||||
func (f FromList) BellmanFordCycle(end NI) (c []NI) {
|
||||
p := f.Paths
|
||||
b := bits.New(len(p))
|
||||
for b.Bit(int(end)) == 0 {
|
||||
b.SetBit(int(end), 1)
|
||||
end = p[end].From
|
||||
}
|
||||
for b.Bit(int(end)) == 1 {
|
||||
c = append(c, end)
|
||||
b.SetBit(int(end), 0)
|
||||
end = p[end].From
|
||||
}
|
||||
for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 {
|
||||
c[i], c[j] = c[j], c[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasNegativeCycle returns true if the graph contains any negative cycle.
|
||||
//
|
||||
// HasNegativeCycle uses a Bellman-Ford-like algorithm, but finds negative
|
||||
// cycles anywhere in the graph. Also path information is not computed,
|
||||
// reducing memory use somewhat compared to BellmanFord.
|
||||
//
|
||||
// See also NegativeCycle to obtain the cycle, see NegativeCycles for
|
||||
// enumerating all negative cycles, and see BellmanFord for single source
|
||||
// shortest path searches with negative cycle detection.
|
||||
func (g LabeledDirected) HasNegativeCycle(w WeightFunc) bool {
|
||||
a := g.LabeledAdjacencyList
|
||||
dist := make([]float64, len(a))
|
||||
for _ = range a[1:] {
|
||||
imp := false
|
||||
for from, nbs := range a {
|
||||
d1 := dist[from]
|
||||
for _, nb := range nbs {
|
||||
d2 := d1 + w(nb.Label)
|
||||
if d2 < dist[nb.To] {
|
||||
dist[nb.To] = d2
|
||||
imp = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !imp {
|
||||
break
|
||||
}
|
||||
}
|
||||
for from, nbs := range a {
|
||||
d1 := dist[from]
|
||||
for _, nb := range nbs {
|
||||
if d1+w(nb.Label) < dist[nb.To] {
|
||||
return true // negative cycle
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NegativeCycle finds a negative cycle if one exists.
|
||||
//
|
||||
// NegativeCycle uses a Bellman-Ford-like algorithm, but finds negative
|
||||
// cycles anywhere in the graph. If a negative cycle exists, one will be
|
||||
// returned. The result is nil if no negative cycle exists.
|
||||
//
|
||||
// See also NegativeCycles for enumerating all negative cycles, see
|
||||
// HasNegativeCycle for lighter-weight cycle detection, and see
|
||||
// BellmanFord for single source shortest paths, also with negative cycle
|
||||
// detection.
|
||||
func (g LabeledDirected) NegativeCycle(w WeightFunc) (c []Half) {
|
||||
a := g.LabeledAdjacencyList
|
||||
f := NewFromList(len(a))
|
||||
p := f.Paths
|
||||
for n := range p {
|
||||
p[n] = PathEnd{From: -1, Len: 1}
|
||||
}
|
||||
labels := make([]LI, len(a))
|
||||
dist := make([]float64, len(a))
|
||||
for _ = range a {
|
||||
imp := false
|
||||
for from, nbs := range a {
|
||||
fp := &p[from]
|
||||
d1 := dist[from]
|
||||
for _, nb := range nbs {
|
||||
d2 := d1 + w(nb.Label)
|
||||
to := &p[nb.To]
|
||||
if fp.Len > 0 && d2 < dist[nb.To] {
|
||||
*to = PathEnd{From: NI(from), Len: fp.Len + 1}
|
||||
labels[nb.To] = nb.Label
|
||||
dist[nb.To] = d2
|
||||
imp = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !imp {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
vis := bits.New(len(a))
|
||||
a:
|
||||
for n := range a {
|
||||
end := n
|
||||
b := bits.New(len(a))
|
||||
for b.Bit(end) == 0 {
|
||||
if vis.Bit(end) == 1 {
|
||||
continue a
|
||||
}
|
||||
vis.SetBit(end, 1)
|
||||
b.SetBit(end, 1)
|
||||
end = int(p[end].From)
|
||||
if end < 0 {
|
||||
continue a
|
||||
}
|
||||
}
|
||||
for b.Bit(end) == 1 {
|
||||
c = append(c, Half{NI(end), labels[end]})
|
||||
b.SetBit(end, 0)
|
||||
end = int(p[end].From)
|
||||
}
|
||||
for i, j := 0, len(c)-1; i < j; i, j = i+1, j-1 {
|
||||
c[i], c[j] = c[j], c[i]
|
||||
}
|
||||
return c
|
||||
}
|
||||
return nil // no negative cycle
|
||||
}
|
||||
|
||||
// DAGMinDistPath finds a single shortest path.
|
||||
//
|
||||
// Shortest means minimum sum of arc weights.
|
||||
//
|
||||
// Returned is the path and distance as returned by FromList.PathTo.
|
||||
//
|
||||
// This is a convenience method. See DAGOptimalPaths for more options.
|
||||
func (g LabeledDirected) DAGMinDistPath(start, end NI, w WeightFunc) (LabeledPath, float64, error) {
|
||||
return g.dagPath(start, end, w, false)
|
||||
}
|
||||
|
||||
// DAGMaxDistPath finds a single longest path.
|
||||
//
|
||||
// Longest means maximum sum of arc weights.
|
||||
//
|
||||
// Returned is the path and distance as returned by FromList.PathTo.
|
||||
//
|
||||
// This is a convenience method. See DAGOptimalPaths for more options.
|
||||
func (g LabeledDirected) DAGMaxDistPath(start, end NI, w WeightFunc) (LabeledPath, float64, error) {
|
||||
return g.dagPath(start, end, w, true)
|
||||
}
|
||||
|
||||
func (g LabeledDirected) dagPath(start, end NI, w WeightFunc, longest bool) (LabeledPath, float64, error) {
|
||||
o, _ := g.Topological()
|
||||
if o == nil {
|
||||
return LabeledPath{}, 0, fmt.Errorf("not a DAG")
|
||||
}
|
||||
f, labels, dist, _ := g.DAGOptimalPaths(start, end, o, w, longest)
|
||||
if f.Paths[end].Len == 0 {
|
||||
return LabeledPath{}, 0, fmt.Errorf("no path from %d to %d", start, end)
|
||||
}
|
||||
return f.PathToLabeled(end, labels, nil), dist[end], nil
|
||||
}
|
||||
|
||||
// DAGOptimalPaths finds either longest or shortest distance paths in a
|
||||
// directed acyclic graph.
|
||||
//
|
||||
// Path distance is the sum of arc weights on the path.
|
||||
// Negative arc weights are allowed.
|
||||
// Where multiple paths exist with the same distance, the path length
|
||||
// (number of nodes) is used as a tie breaker.
|
||||
//
|
||||
// Receiver g must be a directed acyclic graph. Argument o must be either nil
|
||||
// or a topological ordering of g. If nil, a topologcal ordering is
|
||||
// computed internally. If longest is true, an optimal path is a longest
|
||||
// distance path. Otherwise it is a shortest distance path.
|
||||
//
|
||||
// Argument start is the start node for paths, end is the end node. If end
|
||||
// is a valid node number, the method returns as soon as the optimal path
|
||||
// to end is found. If end is -1, all optimal paths from start are found.
|
||||
//
|
||||
// Paths and path distances are encoded in the returned FromList, labels,
|
||||
// and dist slices. The number of nodes reached is returned as nReached.
|
||||
func (g LabeledDirected) DAGOptimalPaths(start, end NI, ordering []NI, w WeightFunc, longest bool) (f FromList, labels []LI, dist []float64, nReached int) {
|
||||
a := g.LabeledAdjacencyList
|
||||
f = NewFromList(len(a))
|
||||
f.Leaves = bits.New(len(a))
|
||||
labels = make([]LI, len(a))
|
||||
dist = make([]float64, len(a))
|
||||
if ordering == nil {
|
||||
ordering, _ = g.Topological()
|
||||
}
|
||||
// search ordering for start
|
||||
o := 0
|
||||
for ordering[o] != start {
|
||||
o++
|
||||
}
|
||||
var fBetter func(cand, ext float64) bool
|
||||
var iBetter func(cand, ext int) bool
|
||||
if longest {
|
||||
fBetter = func(cand, ext float64) bool { return cand > ext }
|
||||
iBetter = func(cand, ext int) bool { return cand > ext }
|
||||
} else {
|
||||
fBetter = func(cand, ext float64) bool { return cand < ext }
|
||||
iBetter = func(cand, ext int) bool { return cand < ext }
|
||||
}
|
||||
p := f.Paths
|
||||
p[start] = PathEnd{From: -1, Len: 1}
|
||||
f.MaxLen = 1
|
||||
leaves := &f.Leaves
|
||||
leaves.SetBit(int(start), 1)
|
||||
nReached = 1
|
||||
for n := start; n != end; n = ordering[o] {
|
||||
if p[n].Len > 0 && len(a[n]) > 0 {
|
||||
nDist := dist[n]
|
||||
candLen := p[n].Len + 1 // len for any candidate arc followed from n
|
||||
for _, to := range a[n] {
|
||||
leaves.SetBit(int(to.To), 1)
|
||||
candDist := nDist + w(to.Label)
|
||||
switch {
|
||||
case p[to.To].Len == 0: // first path to node to.To
|
||||
nReached++
|
||||
case fBetter(candDist, dist[to.To]): // better distance
|
||||
case candDist == dist[to.To] && iBetter(candLen, p[to.To].Len): // same distance but better path length
|
||||
default:
|
||||
continue
|
||||
}
|
||||
dist[to.To] = candDist
|
||||
p[to.To] = PathEnd{From: n, Len: candLen}
|
||||
labels[to.To] = to.Label
|
||||
if candLen > f.MaxLen {
|
||||
f.MaxLen = candLen
|
||||
}
|
||||
}
|
||||
leaves.SetBit(int(n), 0)
|
||||
}
|
||||
o++
|
||||
if o == len(ordering) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Dijkstra finds shortest paths by Dijkstra's algorithm.
|
||||
//
|
||||
// Shortest means shortest distance where distance is the
|
||||
// sum of arc weights. Where multiple paths exist with the same distance,
|
||||
// a path with the minimum number of nodes is returned.
|
||||
//
|
||||
// As usual for Dijkstra's algorithm, arc weights must be non-negative.
|
||||
// Graphs may be directed or undirected. Loops and parallel arcs are
|
||||
// allowed.
|
||||
//
|
||||
// Paths and path distances are encoded in the returned FromList and dist
|
||||
// slice. Returned labels are the labels of arcs followed to each node.
|
||||
// The number of nodes reached is returned as nReached.
|
||||
func (g LabeledAdjacencyList) Dijkstra(start, end NI, w WeightFunc) (f FromList, labels []LI, dist []float64, nReached int) {
|
||||
r := make([]tentResult, len(g))
|
||||
for i := range r {
|
||||
r[i].nx = NI(i)
|
||||
}
|
||||
f = NewFromList(len(g))
|
||||
labels = make([]LI, len(g))
|
||||
dist = make([]float64, len(g))
|
||||
current := start
|
||||
rp := f.Paths
|
||||
rp[current] = PathEnd{Len: 1, From: -1} // path length at start is 1 node
|
||||
cr := &r[current]
|
||||
cr.dist = 0 // distance at start is 0.
|
||||
cr.done = true // mark start done. it skips the heap.
|
||||
nDone := 1 // accumulated for a return value
|
||||
var t tent
|
||||
for current != end {
|
||||
nextLen := rp[current].Len + 1
|
||||
for _, nb := range g[current] {
|
||||
// d.arcVis++
|
||||
hr := &r[nb.To]
|
||||
if hr.done {
|
||||
continue // skip nodes already done
|
||||
}
|
||||
dist := cr.dist + w(nb.Label)
|
||||
vl := rp[nb.To].Len
|
||||
visited := vl > 0
|
||||
if visited {
|
||||
if dist > hr.dist {
|
||||
continue // distance is worse
|
||||
}
|
||||
// tie breaker is a nice touch and doesn't seem to
|
||||
// impact performance much.
|
||||
if dist == hr.dist && nextLen >= vl {
|
||||
continue // distance same, but number of nodes is no better
|
||||
}
|
||||
}
|
||||
// the path through current to this node is shortest so far.
|
||||
// record new path data for this node and update tentative set.
|
||||
hr.dist = dist
|
||||
rp[nb.To].Len = nextLen
|
||||
rp[nb.To].From = current
|
||||
labels[nb.To] = nb.Label
|
||||
if visited {
|
||||
heap.Fix(&t, hr.fx)
|
||||
} else {
|
||||
heap.Push(&t, hr)
|
||||
}
|
||||
}
|
||||
//d.ndVis++
|
||||
if len(t) == 0 {
|
||||
// no more reachable nodes. AllPaths normal return
|
||||
return f, labels, dist, nDone
|
||||
}
|
||||
// new current is node with smallest tentative distance
|
||||
cr = heap.Pop(&t).(*tentResult)
|
||||
cr.done = true
|
||||
nDone++
|
||||
current = cr.nx
|
||||
dist[current] = cr.dist // store final distance
|
||||
}
|
||||
// normal return for single shortest path search
|
||||
return f, labels, dist, -1
|
||||
}
|
||||
|
||||
// DijkstraPath finds a single shortest path.
|
||||
//
|
||||
// Returned is the path as returned by FromList.LabeledPathTo and the total
|
||||
// path distance.
|
||||
func (g LabeledAdjacencyList) DijkstraPath(start, end NI, w WeightFunc) (LabeledPath, float64) {
|
||||
f, labels, dist, _ := g.Dijkstra(start, end, w)
|
||||
return f.PathToLabeled(end, labels, nil), dist[end]
|
||||
}
|
||||
|
||||
// tent implements container/heap
|
||||
func (t tent) Len() int { return len(t) }
|
||||
func (t tent) Less(i, j int) bool { return t[i].dist < t[j].dist }
|
||||
func (t tent) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
t[i].fx = i
|
||||
t[j].fx = j
|
||||
}
|
||||
func (s *tent) Push(x interface{}) {
|
||||
nd := x.(*tentResult)
|
||||
nd.fx = len(*s)
|
||||
*s = append(*s, nd)
|
||||
}
|
||||
func (s *tent) Pop() interface{} {
|
||||
t := *s
|
||||
last := len(t) - 1
|
||||
*s = t[:last]
|
||||
return t[last]
|
||||
}
|
||||
|
||||
type tentResult struct {
|
||||
dist float64 // tentative distance, sum of arc weights
|
||||
nx NI // slice index, "node id"
|
||||
fx int // heap.Fix index
|
||||
done bool
|
||||
}
|
||||
|
||||
type tent []*tentResult
|
||||
817
vendor/github.com/soniakeys/graph/undir.go
generated
vendored
817
vendor/github.com/soniakeys/graph/undir.go
generated
vendored
@@ -1,817 +0,0 @@
|
||||
// Copyright 2014 Sonia Keys
|
||||
// License MIT: http://opensource.org/licenses/MIT
|
||||
|
||||
package graph
|
||||
|
||||
// undir.go has methods specific to undirected graphs, Undirected and
|
||||
// LabeledUndirected.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/soniakeys/bits"
|
||||
)
|
||||
|
||||
// AddEdge adds an edge to a graph.
|
||||
//
|
||||
// It can be useful for constructing undirected graphs.
|
||||
//
|
||||
// When n1 and n2 are distinct, it adds the arc n1->n2 and the reciprocal
|
||||
// n2->n1. When n1 and n2 are the same, it adds a single arc loop.
|
||||
//
|
||||
// The pointer receiver allows the method to expand the graph as needed
|
||||
// to include the values n1 and n2. If n1 or n2 happen to be greater than
|
||||
// len(*p) the method does not panic, but simply expands the graph.
|
||||
//
|
||||
// If you know or can compute the final graph order however, consider
|
||||
// preallocating to avoid any overhead of expanding the graph.
|
||||
// See second example, "More".
|
||||
func (p *Undirected) AddEdge(n1, n2 NI) {
|
||||
// Similar code in LabeledAdjacencyList.AddEdge.
|
||||
|
||||
// determine max of the two end points
|
||||
max := n1
|
||||
if n2 > max {
|
||||
max = n2
|
||||
}
|
||||
// expand graph if needed, to include both
|
||||
g := p.AdjacencyList
|
||||
if int(max) >= len(g) {
|
||||
p.AdjacencyList = make(AdjacencyList, max+1)
|
||||
copy(p.AdjacencyList, g)
|
||||
g = p.AdjacencyList
|
||||
}
|
||||
// create one half-arc,
|
||||
g[n1] = append(g[n1], n2)
|
||||
// and except for loops, create the reciprocal
|
||||
if n1 != n2 {
|
||||
g[n2] = append(g[n2], n1)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveEdge removes a single edge between nodes n1 and n2.
|
||||
//
|
||||
// It removes reciprocal arcs in the case of distinct n1 and n2 or removes
|
||||
// a single arc loop in the case of n1 == n2.
|
||||
//
|
||||
// Returns true if the specified edge is found and successfully removed,
|
||||
// false if the edge does not exist.
|
||||
func (g Undirected) RemoveEdge(n1, n2 NI) (ok bool) {
|
||||
ok, x1, x2 := g.HasEdge(n1, n2)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
a := g.AdjacencyList
|
||||
to := a[n1]
|
||||
last := len(to) - 1
|
||||
to[x1] = to[last]
|
||||
a[n1] = to[:last]
|
||||
if n1 == n2 {
|
||||
return
|
||||
}
|
||||
to = a[n2]
|
||||
last = len(to) - 1
|
||||
to[x2] = to[last]
|
||||
a[n2] = to[:last]
|
||||
return
|
||||
}
|
||||
|
||||
// ArcDensity returns density for a simple directed graph.
|
||||
//
|
||||
// Parameter n is order, or number of nodes of a simple directed graph.
|
||||
// Parameter a is the arc size, or number of directed arcs.
|
||||
//
|
||||
// Returned density is the fraction `a` over the total possible number of arcs
|
||||
// or a / (n * (n-1)).
|
||||
//
|
||||
// See also Density for density of a simple undirected graph.
|
||||
//
|
||||
// See also the corresponding methods AdjacencyList.ArcDensity and
|
||||
// LabeledAdjacencyList.ArcDensity.
|
||||
func ArcDensity(n, a int) float64 {
|
||||
return float64(a) / (float64(n) * float64(n-1))
|
||||
}
|
||||
|
||||
// Density returns density for a simple undirected graph.
|
||||
//
|
||||
// Parameter n is order, or number of nodes of a simple undirected graph.
|
||||
// Parameter m is the size, or number of undirected edges.
|
||||
//
|
||||
// Returned density is the fraction m over the total possible number of edges
|
||||
// or m / ((n * (n-1))/2).
|
||||
//
|
||||
// See also ArcDensity for simple directed graphs.
|
||||
//
|
||||
// See also the corresponding methods AdjacencyList.Density and
|
||||
// LabeledAdjacencyList.Density.
|
||||
func Density(n, m int) float64 {
|
||||
return float64(m) * 2 / (float64(n) * float64(n-1))
|
||||
}
|
||||
|
||||
// An EdgeVisitor is an argument to some traversal methods.
|
||||
//
|
||||
// Traversal methods call the visitor function for each edge visited.
|
||||
// Argument e is the edge being visited.
|
||||
type EdgeVisitor func(e Edge)
|
||||
|
||||
// Edges iterates over the edges of an undirected graph.
|
||||
//
|
||||
// Edge visitor v is called for each edge of the graph. That is, it is called
|
||||
// once for each reciprocal arc pair and once for each loop.
|
||||
//
|
||||
// See also LabeledUndirected.Edges for a labeled version.
|
||||
// See also Undirected.SimpleEdges for a version that emits only the simple
|
||||
// subgraph.
|
||||
func (g Undirected) Edges(v EdgeVisitor) {
|
||||
a := g.AdjacencyList
|
||||
unpaired := make(AdjacencyList, len(a))
|
||||
for fr, to := range a {
|
||||
arc: // for each arc in a
|
||||
for _, to := range to {
|
||||
if to == NI(fr) {
|
||||
v(Edge{NI(fr), to}) // output loop
|
||||
continue
|
||||
}
|
||||
// search unpaired arcs
|
||||
ut := unpaired[to]
|
||||
for i, u := range ut {
|
||||
if u == NI(fr) { // found reciprocal
|
||||
v(Edge{u, to}) // output edge
|
||||
last := len(ut) - 1
|
||||
ut[i] = ut[last]
|
||||
unpaired[to] = ut[:last]
|
||||
continue arc
|
||||
}
|
||||
}
|
||||
// reciprocal not found
|
||||
unpaired[fr] = append(unpaired[fr], to)
|
||||
}
|
||||
}
|
||||
// undefined behavior is that unpaired arcs are silently ignored.
|
||||
}
|
||||
|
||||
// FromList builds a forest with a tree spanning each connected component.
|
||||
//
|
||||
// For each component a root is chosen and spanning is done with the method
|
||||
// Undirected.SpanTree, and so is breadth-first. Returned is a FromList with
|
||||
// all spanned trees, a list of roots chosen, and a bool indicating if the
|
||||
// receiver graph g was found to be a simple graph connected as a forest.
|
||||
// Any cycles, loops, or parallel edges in any component will cause
|
||||
// simpleForest to be false, but FromList f will still be populated with
|
||||
// a valid and complete spanning forest.
|
||||
func (g Undirected) FromList() (f FromList, roots []NI, simpleForest bool) {
|
||||
p := make([]PathEnd, g.Order())
|
||||
for i := range p {
|
||||
p[i].From = -1
|
||||
}
|
||||
f.Paths = p
|
||||
simpleForest = true
|
||||
ts := 0
|
||||
for n := range g.AdjacencyList {
|
||||
if p[n].From >= 0 {
|
||||
continue
|
||||
}
|
||||
roots = append(roots, NI(n))
|
||||
ns, st := g.SpanTree(NI(n), &f)
|
||||
if !st {
|
||||
simpleForest = false
|
||||
}
|
||||
ts += ns
|
||||
if ts == len(p) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasEdge returns true if g has any edge between nodes n1 and n2.
|
||||
//
|
||||
// Also returned are indexes x1 and x2 such that g[n1][x1] == n2
|
||||
// and g[n2][x2] == n1. If no edge between n1 and n2 is present HasArc
|
||||
// returns `has` == false.
|
||||
//
|
||||
// See also HasArc. If you are interested only in the boolean result and
|
||||
// g is a well formed (passes IsUndirected) then HasArc is an adequate test.
|
||||
func (g Undirected) HasEdge(n1, n2 NI) (has bool, x1, x2 int) {
|
||||
if has, x1 = g.HasArc(n1, n2); !has {
|
||||
return has, x1, x1
|
||||
}
|
||||
has, x2 = g.HasArc(n2, n1)
|
||||
return
|
||||
}
|
||||
|
||||
// SimpleEdges iterates over the edges of the simple subgraph of an undirected
|
||||
// graph.
|
||||
//
|
||||
// Edge visitor v is called for each pair of distinct nodes that is connected
|
||||
// with an edge. That is, loops are ignored and parallel edges are reduced to
|
||||
// a single edge.
|
||||
//
|
||||
// See also Undirected.Edges for a version that emits all edges.
|
||||
func (g Undirected) SimpleEdges(v EdgeVisitor) {
|
||||
for fr, to := range g.AdjacencyList {
|
||||
e := bits.New(len(g.AdjacencyList))
|
||||
for _, to := range to {
|
||||
if to > NI(fr) && e.Bit(int(to)) == 0 {
|
||||
e.SetBit(int(to), 1)
|
||||
v(Edge{NI(fr), to})
|
||||
}
|
||||
}
|
||||
}
|
||||
// undefined behavior is that unpaired arcs may or may not be emitted.
|
||||
}
|
||||
|
||||
// SpanTree builds a tree spanning a connected component.
|
||||
//
|
||||
// The component is spanned by breadth-first search from the given root.
|
||||
// The resulting spanning tree in stored a FromList.
|
||||
//
|
||||
// If FromList.Paths is not the same length as g, it is allocated and
|
||||
// initialized. This allows a zero value FromList to be passed as f.
|
||||
// If FromList.Paths is the same length as g, it is used as is and is not
|
||||
// reinitialized. This allows multiple trees to be spanned in the same
|
||||
// FromList with successive calls.
|
||||
//
|
||||
// For nodes spanned, the Path member of the returned FromList is populated
|
||||
// with both From and Len values. The MaxLen member will be updated but
|
||||
// not Leaves.
|
||||
//
|
||||
// Returned is the number of nodes spanned, which will be the number of nodes
|
||||
// in the component, and a bool indicating if the component was found to be a
|
||||
// simply connected unrooted tree in the receiver graph g. Any cycles, loops,
|
||||
// or parallel edges in the component will cause simpleTree to be false, but
|
||||
// FromList f will still be populated with a valid and complete spanning tree.
|
||||
func (g Undirected) SpanTree(root NI, f *FromList) (nSpanned int, simpleTree bool) {
|
||||
a := g.AdjacencyList
|
||||
p := f.Paths
|
||||
if len(p) != len(a) {
|
||||
p = make([]PathEnd, len(a))
|
||||
for i := range p {
|
||||
p[i].From = -1
|
||||
}
|
||||
f.Paths = p
|
||||
}
|
||||
simpleTree = true
|
||||
p[root] = PathEnd{From: -1, Len: 1}
|
||||
type arc struct {
|
||||
from NI
|
||||
half NI
|
||||
}
|
||||
var next []arc
|
||||
frontier := []arc{{-1, root}}
|
||||
for len(frontier) > 0 {
|
||||
for _, fa := range frontier { // fa frontier arc
|
||||
nSpanned++
|
||||
l := p[fa.half].Len + 1
|
||||
for _, to := range a[fa.half] {
|
||||
if to == fa.from {
|
||||
continue
|
||||
}
|
||||
if p[to].Len > 0 {
|
||||
simpleTree = false
|
||||
continue
|
||||
}
|
||||
p[to] = PathEnd{From: fa.half, Len: l}
|
||||
if l > f.MaxLen {
|
||||
f.MaxLen = l
|
||||
}
|
||||
next = append(next, arc{fa.half, to})
|
||||
}
|
||||
}
|
||||
frontier, next = next, frontier[:0]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// TarjanBiconnectedComponents decomposes a graph into maximal biconnected
|
||||
// components, components for which if any node were removed the component
|
||||
// would remain connected.
|
||||
//
|
||||
// The receiver g must be a simple graph. The method calls the emit argument
|
||||
// for each component identified, as long as emit returns true. If emit
|
||||
// returns false, TarjanBiconnectedComponents returns immediately.
|
||||
//
|
||||
// See also the eqivalent labeled TarjanBiconnectedComponents.
|
||||
func (g Undirected) TarjanBiconnectedComponents(emit func([]Edge) bool) {
|
||||
// Implemented closely to pseudocode in "Depth-first search and linear
|
||||
// graph algorithms", Robert Tarjan, SIAM J. Comput. Vol. 1, No. 2,
|
||||
// June 1972.
|
||||
//
|
||||
// Note Tarjan's "adjacency structure" is graph.AdjacencyList,
|
||||
// His "adjacency list" is an element of a graph.AdjacencyList, also
|
||||
// termed a "to-list", "neighbor list", or "child list."
|
||||
a := g.AdjacencyList
|
||||
number := make([]int, len(a))
|
||||
lowpt := make([]int, len(a))
|
||||
var stack []Edge
|
||||
var i int
|
||||
var biconnect func(NI, NI) bool
|
||||
biconnect = func(v, u NI) bool {
|
||||
i++
|
||||
number[v] = i
|
||||
lowpt[v] = i
|
||||
for _, w := range a[v] {
|
||||
if number[w] == 0 {
|
||||
stack = append(stack, Edge{v, w})
|
||||
if !biconnect(w, v) {
|
||||
return false
|
||||
}
|
||||
if lowpt[w] < lowpt[v] {
|
||||
lowpt[v] = lowpt[w]
|
||||
}
|
||||
if lowpt[w] >= number[v] {
|
||||
var bcc []Edge
|
||||
top := len(stack) - 1
|
||||
for number[stack[top].N1] >= number[w] {
|
||||
bcc = append(bcc, stack[top])
|
||||
stack = stack[:top]
|
||||
top--
|
||||
}
|
||||
bcc = append(bcc, stack[top])
|
||||
stack = stack[:top]
|
||||
top--
|
||||
if !emit(bcc) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else if number[w] < number[v] && w != u {
|
||||
stack = append(stack, Edge{v, w})
|
||||
if number[w] < lowpt[v] {
|
||||
lowpt[v] = number[w]
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
for w := range a {
|
||||
if number[w] == 0 && !biconnect(NI(w), -1) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g Undirected) BlockCut(block func([]Edge) bool, cut func(NI) bool, isolated func(NI) bool) {
|
||||
a := g.AdjacencyList
|
||||
number := make([]int, len(a))
|
||||
lowpt := make([]int, len(a))
|
||||
var stack []Edge
|
||||
var i, rc int
|
||||
var biconnect func(NI, NI) bool
|
||||
biconnect = func(v, u NI) bool {
|
||||
i++
|
||||
number[v] = i
|
||||
lowpt[v] = i
|
||||
for _, w := range a[v] {
|
||||
if number[w] == 0 {
|
||||
if u < 0 {
|
||||
rc++
|
||||
}
|
||||
stack = append(stack, Edge{v, w})
|
||||
if !biconnect(w, v) {
|
||||
return false
|
||||
}
|
||||
if lowpt[w] < lowpt[v] {
|
||||
lowpt[v] = lowpt[w]
|
||||
}
|
||||
if lowpt[w] >= number[v] {
|
||||
if u >= 0 && !cut(v) {
|
||||
return false
|
||||
}
|
||||
var bcc []Edge
|
||||
top := len(stack) - 1
|
||||
for number[stack[top].N1] >= number[w] {
|
||||
bcc = append(bcc, stack[top])
|
||||
stack = stack[:top]
|
||||
top--
|
||||
}
|
||||
bcc = append(bcc, stack[top])
|
||||
stack = stack[:top]
|
||||
top--
|
||||
if !block(bcc) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else if number[w] < number[v] && w != u {
|
||||
stack = append(stack, Edge{v, w})
|
||||
if number[w] < lowpt[v] {
|
||||
lowpt[v] = number[w]
|
||||
}
|
||||
}
|
||||
}
|
||||
if u < 0 && rc > 1 {
|
||||
return cut(v)
|
||||
}
|
||||
return true
|
||||
}
|
||||
for w := range a {
|
||||
if number[w] > 0 {
|
||||
continue
|
||||
}
|
||||
if len(a[w]) == 0 {
|
||||
if !isolated(NI(w)) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
rc = 0
|
||||
if !biconnect(NI(w), -1) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddEdge adds an edge to a labeled graph.
|
||||
//
|
||||
// It can be useful for constructing undirected graphs.
|
||||
//
|
||||
// When n1 and n2 are distinct, it adds the arc n1->n2 and the reciprocal
|
||||
// n2->n1. When n1 and n2 are the same, it adds a single arc loop.
|
||||
//
|
||||
// If the edge already exists in *p, a parallel edge is added.
|
||||
//
|
||||
// The pointer receiver allows the method to expand the graph as needed
|
||||
// to include the values n1 and n2. If n1 or n2 happen to be greater than
|
||||
// len(*p) the method does not panic, but simply expands the graph.
|
||||
func (p *LabeledUndirected) AddEdge(e Edge, l LI) {
|
||||
// Similar code in AdjacencyList.AddEdge.
|
||||
|
||||
// determine max of the two end points
|
||||
max := e.N1
|
||||
if e.N2 > max {
|
||||
max = e.N2
|
||||
}
|
||||
// expand graph if needed, to include both
|
||||
g := p.LabeledAdjacencyList
|
||||
if max >= NI(len(g)) {
|
||||
p.LabeledAdjacencyList = make(LabeledAdjacencyList, max+1)
|
||||
copy(p.LabeledAdjacencyList, g)
|
||||
g = p.LabeledAdjacencyList
|
||||
}
|
||||
// create one half-arc,
|
||||
g[e.N1] = append(g[e.N1], Half{To: e.N2, Label: l})
|
||||
// and except for loops, create the reciprocal
|
||||
if e.N1 != e.N2 {
|
||||
g[e.N2] = append(g[e.N2], Half{To: e.N1, Label: l})
|
||||
}
|
||||
}
|
||||
|
||||
// A LabeledEdgeVisitor is an argument to some traversal methods.
|
||||
//
|
||||
// Traversal methods call the visitor function for each edge visited.
|
||||
// Argument e is the edge being visited.
|
||||
type LabeledEdgeVisitor func(e LabeledEdge)
|
||||
|
||||
// Edges iterates over the edges of a labeled undirected graph.
|
||||
//
|
||||
// Edge visitor v is called for each edge of the graph. That is, it is called
|
||||
// once for each reciprocal arc pair and once for each loop.
|
||||
//
|
||||
// See also Undirected.Edges for an unlabeled version.
|
||||
// See also the more simplistic LabeledAdjacencyList.ArcsAsEdges.
|
||||
func (g LabeledUndirected) Edges(v LabeledEdgeVisitor) {
|
||||
// similar code in LabeledAdjacencyList.InUndirected
|
||||
a := g.LabeledAdjacencyList
|
||||
unpaired := make(LabeledAdjacencyList, len(a))
|
||||
for fr, to := range a {
|
||||
arc: // for each arc in a
|
||||
for _, to := range to {
|
||||
if to.To == NI(fr) {
|
||||
v(LabeledEdge{Edge{NI(fr), to.To}, to.Label}) // output loop
|
||||
continue
|
||||
}
|
||||
// search unpaired arcs
|
||||
ut := unpaired[to.To]
|
||||
for i, u := range ut {
|
||||
if u.To == NI(fr) && u.Label == to.Label { // found reciprocal
|
||||
v(LabeledEdge{Edge{NI(fr), to.To}, to.Label}) // output edge
|
||||
last := len(ut) - 1
|
||||
ut[i] = ut[last]
|
||||
unpaired[to.To] = ut[:last]
|
||||
continue arc
|
||||
}
|
||||
}
|
||||
// reciprocal not found
|
||||
unpaired[fr] = append(unpaired[fr], to)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FromList builds a forest with a tree spanning each connected component in g.
|
||||
//
|
||||
// A root is chosen and spanning is done with the LabeledUndirected.SpanTree
|
||||
// method, and so is breadth-first. Returned is a FromList with all spanned
|
||||
// trees, labels corresponding to arcs in f,
|
||||
// a list of roots chosen, and a bool indicating if the receiver graph g was
|
||||
// found to be a simple graph connected as a forest. Any cycles, loops, or
|
||||
// parallel edges in any component will cause simpleForest to be false, but
|
||||
// FromList f will still be populated with a valid and complete spanning forest.
|
||||
|
||||
// FromList builds a forest with a tree spanning each connected component.
|
||||
//
|
||||
// For each component a root is chosen and spanning is done with the method
|
||||
// Undirected.SpanTree, and so is breadth-first. Returned is a FromList with
|
||||
// all spanned trees, labels corresponding to arcs in f, a list of roots
|
||||
// chosen, and a bool indicating if the receiver graph g was found to be a
|
||||
// simple graph connected as a forest. Any cycles, loops, or parallel edges
|
||||
// in any component will cause simpleForest to be false, but FromList f will
|
||||
// still be populated with a valid and complete spanning forest.
|
||||
func (g LabeledUndirected) FromList() (f FromList, labels []LI, roots []NI, simpleForest bool) {
|
||||
p := make([]PathEnd, g.Order())
|
||||
for i := range p {
|
||||
p[i].From = -1
|
||||
}
|
||||
f.Paths = p
|
||||
labels = make([]LI, len(p))
|
||||
simpleForest = true
|
||||
ts := 0
|
||||
for n := range g.LabeledAdjacencyList {
|
||||
if p[n].From >= 0 {
|
||||
continue
|
||||
}
|
||||
roots = append(roots, NI(n))
|
||||
ns, st := g.SpanTree(NI(n), &f, labels)
|
||||
if !st {
|
||||
simpleForest = false
|
||||
}
|
||||
ts += ns
|
||||
if ts == len(p) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SpanTree builds a tree spanning a connected component.
|
||||
//
|
||||
// The component is spanned by breadth-first search from the given root.
|
||||
// The resulting spanning tree in stored a FromList, and arc labels optionally
|
||||
// stored in a slice.
|
||||
//
|
||||
// If FromList.Paths is not the same length as g, it is allocated and
|
||||
// initialized. This allows a zero value FromList to be passed as f.
|
||||
// If FromList.Paths is the same length as g, it is used as is and is not
|
||||
// reinitialized. This allows multiple trees to be spanned in the same
|
||||
// FromList with successive calls.
|
||||
//
|
||||
// For nodes spanned, the Path member of returned FromList f is populated
|
||||
// populated with both From and Len values. The MaxLen member will be
|
||||
// updated but not Leaves.
|
||||
//
|
||||
// The labels slice will be populated only if it is same length as g.
|
||||
// Nil can be passed for example if labels are not needed.
|
||||
//
|
||||
// Returned is the number of nodes spanned, which will be the number of nodes
|
||||
// in the component, and a bool indicating if the component was found to be a
|
||||
// simply connected unrooted tree in the receiver graph g. Any cycles, loops,
|
||||
// or parallel edges in the component will cause simpleTree to be false, but
|
||||
// FromList f will still be populated with a valid and complete spanning tree.
|
||||
func (g LabeledUndirected) SpanTree(root NI, f *FromList, labels []LI) (nSpanned int, simple bool) {
|
||||
a := g.LabeledAdjacencyList
|
||||
p := f.Paths
|
||||
if len(p) != len(a) {
|
||||
p = make([]PathEnd, len(a))
|
||||
for i := range p {
|
||||
p[i].From = -1
|
||||
}
|
||||
f.Paths = p
|
||||
}
|
||||
simple = true
|
||||
p[root].Len = 1
|
||||
type arc struct {
|
||||
from NI
|
||||
half Half
|
||||
}
|
||||
var next []arc
|
||||
frontier := []arc{{-1, Half{root, -1}}}
|
||||
for len(frontier) > 0 {
|
||||
for _, fa := range frontier { // fa frontier arc
|
||||
nSpanned++
|
||||
l := p[fa.half.To].Len + 1
|
||||
for _, to := range a[fa.half.To] {
|
||||
if to.To == fa.from && to.Label == fa.half.Label {
|
||||
continue
|
||||
}
|
||||
if p[to.To].Len > 0 {
|
||||
simple = false
|
||||
continue
|
||||
}
|
||||
p[to.To] = PathEnd{From: fa.half.To, Len: l}
|
||||
if len(labels) == len(p) {
|
||||
labels[to.To] = to.Label
|
||||
}
|
||||
if l > f.MaxLen {
|
||||
f.MaxLen = l
|
||||
}
|
||||
next = append(next, arc{fa.half.To, to})
|
||||
}
|
||||
}
|
||||
frontier, next = next, frontier[:0]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HasEdge returns true if g has any edge between nodes n1 and n2.
|
||||
//
|
||||
// Also returned are indexes x1 and x2 such that g[n1][x1] == Half{n2, l}
|
||||
// and g[n2][x2] == {n1, l} for some label l. If no edge between n1 and n2
|
||||
// exists, HasArc returns `has` == false.
|
||||
//
|
||||
// See also HasArc. If you are only interested in the boolean result then
|
||||
// HasArc is an adequate test.
|
||||
func (g LabeledUndirected) HasEdge(n1, n2 NI) (has bool, x1, x2 int) {
|
||||
if has, x1 = g.HasArc(n1, n2); !has {
|
||||
return has, x1, x1
|
||||
}
|
||||
has, x2 = g.HasArcLabel(n2, n1, g.LabeledAdjacencyList[n1][x1].Label)
|
||||
return
|
||||
}
|
||||
|
||||
// HasEdgeLabel returns true if g has any edge between nodes n1 and n2 with
|
||||
// label l.
|
||||
//
|
||||
// Also returned are indexes x1 and x2 such that g[n1][x1] == Half{n2, l}
|
||||
// and g[n2][x2] == Half{n1, l}. If no edge between n1 and n2 with label l
|
||||
// is present HasArc returns `has` == false.
|
||||
func (g LabeledUndirected) HasEdgeLabel(n1, n2 NI, l LI) (has bool, x1, x2 int) {
|
||||
if has, x1 = g.HasArcLabel(n1, n2, l); !has {
|
||||
return has, x1, x1
|
||||
}
|
||||
has, x2 = g.HasArcLabel(n2, n1, l)
|
||||
return
|
||||
}
|
||||
|
||||
// RemoveEdge removes a single edge between nodes n1 and n2.
|
||||
//
|
||||
// It removes reciprocal arcs in the case of distinct n1 and n2 or removes
|
||||
// a single arc loop in the case of n1 == n2.
|
||||
//
|
||||
// If the specified edge is found and successfully removed, RemoveEdge returns
|
||||
// true and the label of the edge removed. If no edge exists between n1 and n2,
|
||||
// RemoveEdge returns false, 0.
|
||||
func (g LabeledUndirected) RemoveEdge(n1, n2 NI) (ok bool, label LI) {
|
||||
ok, x1, x2 := g.HasEdge(n1, n2)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
a := g.LabeledAdjacencyList
|
||||
to := a[n1]
|
||||
label = to[x1].Label // return value
|
||||
last := len(to) - 1
|
||||
to[x1] = to[last]
|
||||
a[n1] = to[:last]
|
||||
if n1 == n2 {
|
||||
return
|
||||
}
|
||||
to = a[n2]
|
||||
last = len(to) - 1
|
||||
to[x2] = to[last]
|
||||
a[n2] = to[:last]
|
||||
return
|
||||
}
|
||||
|
||||
// RemoveEdgeLabel removes a single edge between nodes n1 and n2 with label l.
|
||||
//
|
||||
// It removes reciprocal arcs in the case of distinct n1 and n2 or removes
|
||||
// a single arc loop in the case of n1 == n2.
|
||||
//
|
||||
// Returns true if the specified edge is found and successfully removed,
|
||||
// false if the edge does not exist.
|
||||
func (g LabeledUndirected) RemoveEdgeLabel(n1, n2 NI, l LI) (ok bool) {
|
||||
ok, x1, x2 := g.HasEdgeLabel(n1, n2, l)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
a := g.LabeledAdjacencyList
|
||||
to := a[n1]
|
||||
last := len(to) - 1
|
||||
to[x1] = to[last]
|
||||
a[n1] = to[:last]
|
||||
if n1 == n2 {
|
||||
return
|
||||
}
|
||||
to = a[n2]
|
||||
last = len(to) - 1
|
||||
to[x2] = to[last]
|
||||
a[n2] = to[:last]
|
||||
return
|
||||
}
|
||||
|
||||
// TarjanBiconnectedComponents decomposes a graph into maximal biconnected
|
||||
// components, components for which if any node were removed the component
|
||||
// would remain connected.
|
||||
//
|
||||
// The receiver g must be a simple graph. The method calls the emit argument
|
||||
// for each component identified, as long as emit returns true. If emit
|
||||
// returns false, TarjanBiconnectedComponents returns immediately.
|
||||
//
|
||||
// See also the eqivalent unlabeled TarjanBiconnectedComponents.
|
||||
func (g LabeledUndirected) TarjanBiconnectedComponents(emit func([]LabeledEdge) bool) {
|
||||
// Code nearly identical to unlabled version.
|
||||
number := make([]int, g.Order())
|
||||
lowpt := make([]int, g.Order())
|
||||
var stack []LabeledEdge
|
||||
var i int
|
||||
var biconnect func(NI, NI) bool
|
||||
biconnect = func(v, u NI) bool {
|
||||
i++
|
||||
number[v] = i
|
||||
lowpt[v] = i
|
||||
for _, w := range g.LabeledAdjacencyList[v] {
|
||||
if number[w.To] == 0 {
|
||||
stack = append(stack, LabeledEdge{Edge{v, w.To}, w.Label})
|
||||
if !biconnect(w.To, v) {
|
||||
return false
|
||||
}
|
||||
if lowpt[w.To] < lowpt[v] {
|
||||
lowpt[v] = lowpt[w.To]
|
||||
}
|
||||
if lowpt[w.To] >= number[v] {
|
||||
var bcc []LabeledEdge
|
||||
top := len(stack) - 1
|
||||
for number[stack[top].N1] >= number[w.To] {
|
||||
bcc = append(bcc, stack[top])
|
||||
stack = stack[:top]
|
||||
top--
|
||||
}
|
||||
bcc = append(bcc, stack[top])
|
||||
stack = stack[:top]
|
||||
top--
|
||||
if !emit(bcc) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else if number[w.To] < number[v] && w.To != u {
|
||||
stack = append(stack, LabeledEdge{Edge{v, w.To}, w.Label})
|
||||
if number[w.To] < lowpt[v] {
|
||||
lowpt[v] = number[w.To]
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
for w := range g.LabeledAdjacencyList {
|
||||
if number[w] == 0 && !biconnect(NI(w), -1) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eulerian) pushUndir() error {
|
||||
for u := e.top(); ; {
|
||||
e.uv.SetBit(int(u), 0)
|
||||
arcs := e.g[u]
|
||||
if len(arcs) == 0 {
|
||||
return nil
|
||||
}
|
||||
w := arcs[0]
|
||||
e.s++
|
||||
e.p[e.s] = w
|
||||
e.g[u] = arcs[1:] // consume arc
|
||||
// difference from directed counterpart in dir.go:
|
||||
// as long as it's not a loop, consume reciprocal arc as well
|
||||
if w != u {
|
||||
a2 := e.g[w]
|
||||
for x, rx := range a2 {
|
||||
if rx == u { // here it is
|
||||
last := len(a2) - 1
|
||||
a2[x] = a2[last] // someone else gets the seat
|
||||
e.g[w] = a2[:last] // and it's gone.
|
||||
goto l
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("graph not undirected. %d -> %d reciprocal not found", u, w)
|
||||
}
|
||||
l:
|
||||
u = w
|
||||
}
|
||||
}
|
||||
|
||||
func (e *labEulerian) pushUndir() error {
|
||||
for u := e.top(); ; {
|
||||
e.uv.SetBit(int(u.To), 0)
|
||||
arcs := e.g[u.To]
|
||||
if len(arcs) == 0 {
|
||||
return nil
|
||||
}
|
||||
w := arcs[0]
|
||||
e.s++
|
||||
e.p[e.s] = w
|
||||
e.g[u.To] = arcs[1:] // consume arc
|
||||
// difference from directed counterpart in dir.go:
|
||||
// as long as it's not a loop, consume reciprocal arc as well
|
||||
if w.To != u.To {
|
||||
a2 := e.g[w.To]
|
||||
for x, rx := range a2 {
|
||||
if rx.To == u.To && rx.Label == w.Label { // here it is
|
||||
last := len(a2) - 1
|
||||
a2[x] = a2[last] // someone else can have the seat
|
||||
e.g[w.To] = a2[:last] // and it's gone.
|
||||
goto l
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("graph not undirected. %d -> %v reciprocal not found", u.To, w)
|
||||
}
|
||||
l:
|
||||
u = w
|
||||
}
|
||||
}
|
||||
1138
vendor/github.com/soniakeys/graph/undir_RO.go
generated
vendored
1138
vendor/github.com/soniakeys/graph/undir_RO.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1138
vendor/github.com/soniakeys/graph/undir_cg.go
generated
vendored
1138
vendor/github.com/soniakeys/graph/undir_cg.go
generated
vendored
File diff suppressed because it is too large
Load Diff
11
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
11
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
@@ -2,8 +2,15 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
|
||||
// OpenPGP cipher.
|
||||
// Package cast5 implements CAST5, as defined in RFC 2144.
|
||||
//
|
||||
// CAST5 is a legacy cipher and its short block size makes it vulnerable to
|
||||
// birthday bound attacks (see https://sweet32.info). It should only be used
|
||||
// where compatibility with legacy systems, not security, is the goal.
|
||||
//
|
||||
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
|
||||
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
|
||||
// golang.org/x/crypto/chacha20poly1305).
|
||||
package cast5 // import "golang.org/x/crypto/cast5"
|
||||
|
||||
import "errors"
|
||||
|
||||
17
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
Normal file
17
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
// +build !gccgo,!appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
//go:noescape
|
||||
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
|
||||
}
|
||||
308
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
Normal file
308
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,308 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
// +build !gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define NUM_ROUNDS 10
|
||||
|
||||
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
|
||||
MOVD dst+0(FP), R1
|
||||
MOVD src+24(FP), R2
|
||||
MOVD src_len+32(FP), R3
|
||||
MOVD key+48(FP), R4
|
||||
MOVD nonce+56(FP), R6
|
||||
MOVD counter+64(FP), R7
|
||||
|
||||
MOVD $·constants(SB), R10
|
||||
MOVD $·incRotMatrix(SB), R11
|
||||
|
||||
MOVW (R7), R20
|
||||
|
||||
AND $~255, R3, R13
|
||||
ADD R2, R13, R12 // R12 for block end
|
||||
AND $255, R3, R13
|
||||
loop:
|
||||
MOVD $NUM_ROUNDS, R21
|
||||
VLD1 (R11), [V30.S4, V31.S4]
|
||||
|
||||
// load contants
|
||||
// VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
|
||||
WORD $0x4D60E940
|
||||
|
||||
// load keys
|
||||
// VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4]
|
||||
WORD $0x4DFFE884
|
||||
// VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4]
|
||||
WORD $0x4DFFE888
|
||||
SUB $32, R4
|
||||
|
||||
// load counter + nonce
|
||||
// VLD1R (R7), [V12.S4]
|
||||
WORD $0x4D40C8EC
|
||||
|
||||
// VLD3R (R6), [V13.S4, V14.S4, V15.S4]
|
||||
WORD $0x4D40E8CD
|
||||
|
||||
// update counter
|
||||
VADD V30.S4, V12.S4, V12.S4
|
||||
|
||||
chacha:
|
||||
// V0..V3 += V4..V7
|
||||
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 16)
|
||||
VADD V0.S4, V4.S4, V0.S4
|
||||
VADD V1.S4, V5.S4, V1.S4
|
||||
VADD V2.S4, V6.S4, V2.S4
|
||||
VADD V3.S4, V7.S4, V3.S4
|
||||
VEOR V12.B16, V0.B16, V12.B16
|
||||
VEOR V13.B16, V1.B16, V13.B16
|
||||
VEOR V14.B16, V2.B16, V14.B16
|
||||
VEOR V15.B16, V3.B16, V15.B16
|
||||
VREV32 V12.H8, V12.H8
|
||||
VREV32 V13.H8, V13.H8
|
||||
VREV32 V14.H8, V14.H8
|
||||
VREV32 V15.H8, V15.H8
|
||||
// V8..V11 += V12..V15
|
||||
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 12)
|
||||
VADD V8.S4, V12.S4, V8.S4
|
||||
VADD V9.S4, V13.S4, V9.S4
|
||||
VADD V10.S4, V14.S4, V10.S4
|
||||
VADD V11.S4, V15.S4, V11.S4
|
||||
VEOR V8.B16, V4.B16, V16.B16
|
||||
VEOR V9.B16, V5.B16, V17.B16
|
||||
VEOR V10.B16, V6.B16, V18.B16
|
||||
VEOR V11.B16, V7.B16, V19.B16
|
||||
VSHL $12, V16.S4, V4.S4
|
||||
VSHL $12, V17.S4, V5.S4
|
||||
VSHL $12, V18.S4, V6.S4
|
||||
VSHL $12, V19.S4, V7.S4
|
||||
VSRI $20, V16.S4, V4.S4
|
||||
VSRI $20, V17.S4, V5.S4
|
||||
VSRI $20, V18.S4, V6.S4
|
||||
VSRI $20, V19.S4, V7.S4
|
||||
|
||||
// V0..V3 += V4..V7
|
||||
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 8)
|
||||
VADD V0.S4, V4.S4, V0.S4
|
||||
VADD V1.S4, V5.S4, V1.S4
|
||||
VADD V2.S4, V6.S4, V2.S4
|
||||
VADD V3.S4, V7.S4, V3.S4
|
||||
VEOR V12.B16, V0.B16, V12.B16
|
||||
VEOR V13.B16, V1.B16, V13.B16
|
||||
VEOR V14.B16, V2.B16, V14.B16
|
||||
VEOR V15.B16, V3.B16, V15.B16
|
||||
VTBL V31.B16, [V12.B16], V12.B16
|
||||
VTBL V31.B16, [V13.B16], V13.B16
|
||||
VTBL V31.B16, [V14.B16], V14.B16
|
||||
VTBL V31.B16, [V15.B16], V15.B16
|
||||
|
||||
// V8..V11 += V12..V15
|
||||
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 7)
|
||||
VADD V12.S4, V8.S4, V8.S4
|
||||
VADD V13.S4, V9.S4, V9.S4
|
||||
VADD V14.S4, V10.S4, V10.S4
|
||||
VADD V15.S4, V11.S4, V11.S4
|
||||
VEOR V8.B16, V4.B16, V16.B16
|
||||
VEOR V9.B16, V5.B16, V17.B16
|
||||
VEOR V10.B16, V6.B16, V18.B16
|
||||
VEOR V11.B16, V7.B16, V19.B16
|
||||
VSHL $7, V16.S4, V4.S4
|
||||
VSHL $7, V17.S4, V5.S4
|
||||
VSHL $7, V18.S4, V6.S4
|
||||
VSHL $7, V19.S4, V7.S4
|
||||
VSRI $25, V16.S4, V4.S4
|
||||
VSRI $25, V17.S4, V5.S4
|
||||
VSRI $25, V18.S4, V6.S4
|
||||
VSRI $25, V19.S4, V7.S4
|
||||
|
||||
// V0..V3 += V5..V7, V4
|
||||
// V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16)
|
||||
VADD V0.S4, V5.S4, V0.S4
|
||||
VADD V1.S4, V6.S4, V1.S4
|
||||
VADD V2.S4, V7.S4, V2.S4
|
||||
VADD V3.S4, V4.S4, V3.S4
|
||||
VEOR V15.B16, V0.B16, V15.B16
|
||||
VEOR V12.B16, V1.B16, V12.B16
|
||||
VEOR V13.B16, V2.B16, V13.B16
|
||||
VEOR V14.B16, V3.B16, V14.B16
|
||||
VREV32 V12.H8, V12.H8
|
||||
VREV32 V13.H8, V13.H8
|
||||
VREV32 V14.H8, V14.H8
|
||||
VREV32 V15.H8, V15.H8
|
||||
|
||||
// V10 += V15; V5 <<<= ((V10 XOR V5), 12)
|
||||
// ...
|
||||
VADD V15.S4, V10.S4, V10.S4
|
||||
VADD V12.S4, V11.S4, V11.S4
|
||||
VADD V13.S4, V8.S4, V8.S4
|
||||
VADD V14.S4, V9.S4, V9.S4
|
||||
VEOR V10.B16, V5.B16, V16.B16
|
||||
VEOR V11.B16, V6.B16, V17.B16
|
||||
VEOR V8.B16, V7.B16, V18.B16
|
||||
VEOR V9.B16, V4.B16, V19.B16
|
||||
VSHL $12, V16.S4, V5.S4
|
||||
VSHL $12, V17.S4, V6.S4
|
||||
VSHL $12, V18.S4, V7.S4
|
||||
VSHL $12, V19.S4, V4.S4
|
||||
VSRI $20, V16.S4, V5.S4
|
||||
VSRI $20, V17.S4, V6.S4
|
||||
VSRI $20, V18.S4, V7.S4
|
||||
VSRI $20, V19.S4, V4.S4
|
||||
|
||||
// V0 += V5; V15 <<<= ((V0 XOR V15), 8)
|
||||
// ...
|
||||
VADD V5.S4, V0.S4, V0.S4
|
||||
VADD V6.S4, V1.S4, V1.S4
|
||||
VADD V7.S4, V2.S4, V2.S4
|
||||
VADD V4.S4, V3.S4, V3.S4
|
||||
VEOR V0.B16, V15.B16, V15.B16
|
||||
VEOR V1.B16, V12.B16, V12.B16
|
||||
VEOR V2.B16, V13.B16, V13.B16
|
||||
VEOR V3.B16, V14.B16, V14.B16
|
||||
VTBL V31.B16, [V12.B16], V12.B16
|
||||
VTBL V31.B16, [V13.B16], V13.B16
|
||||
VTBL V31.B16, [V14.B16], V14.B16
|
||||
VTBL V31.B16, [V15.B16], V15.B16
|
||||
|
||||
// V10 += V15; V5 <<<= ((V10 XOR V5), 7)
|
||||
// ...
|
||||
VADD V15.S4, V10.S4, V10.S4
|
||||
VADD V12.S4, V11.S4, V11.S4
|
||||
VADD V13.S4, V8.S4, V8.S4
|
||||
VADD V14.S4, V9.S4, V9.S4
|
||||
VEOR V10.B16, V5.B16, V16.B16
|
||||
VEOR V11.B16, V6.B16, V17.B16
|
||||
VEOR V8.B16, V7.B16, V18.B16
|
||||
VEOR V9.B16, V4.B16, V19.B16
|
||||
VSHL $7, V16.S4, V5.S4
|
||||
VSHL $7, V17.S4, V6.S4
|
||||
VSHL $7, V18.S4, V7.S4
|
||||
VSHL $7, V19.S4, V4.S4
|
||||
VSRI $25, V16.S4, V5.S4
|
||||
VSRI $25, V17.S4, V6.S4
|
||||
VSRI $25, V18.S4, V7.S4
|
||||
VSRI $25, V19.S4, V4.S4
|
||||
|
||||
SUB $1, R21
|
||||
CBNZ R21, chacha
|
||||
|
||||
// VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4]
|
||||
WORD $0x4D60E950
|
||||
|
||||
// VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4]
|
||||
WORD $0x4DFFE894
|
||||
VADD V30.S4, V12.S4, V12.S4
|
||||
VADD V16.S4, V0.S4, V0.S4
|
||||
VADD V17.S4, V1.S4, V1.S4
|
||||
VADD V18.S4, V2.S4, V2.S4
|
||||
VADD V19.S4, V3.S4, V3.S4
|
||||
// VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4]
|
||||
WORD $0x4DFFE898
|
||||
// restore R4
|
||||
SUB $32, R4
|
||||
|
||||
// load counter + nonce
|
||||
// VLD1R (R7), [V28.S4]
|
||||
WORD $0x4D40C8FC
|
||||
// VLD3R (R6), [V29.S4, V30.S4, V31.S4]
|
||||
WORD $0x4D40E8DD
|
||||
|
||||
VADD V20.S4, V4.S4, V4.S4
|
||||
VADD V21.S4, V5.S4, V5.S4
|
||||
VADD V22.S4, V6.S4, V6.S4
|
||||
VADD V23.S4, V7.S4, V7.S4
|
||||
VADD V24.S4, V8.S4, V8.S4
|
||||
VADD V25.S4, V9.S4, V9.S4
|
||||
VADD V26.S4, V10.S4, V10.S4
|
||||
VADD V27.S4, V11.S4, V11.S4
|
||||
VADD V28.S4, V12.S4, V12.S4
|
||||
VADD V29.S4, V13.S4, V13.S4
|
||||
VADD V30.S4, V14.S4, V14.S4
|
||||
VADD V31.S4, V15.S4, V15.S4
|
||||
|
||||
VZIP1 V1.S4, V0.S4, V16.S4
|
||||
VZIP2 V1.S4, V0.S4, V17.S4
|
||||
VZIP1 V3.S4, V2.S4, V18.S4
|
||||
VZIP2 V3.S4, V2.S4, V19.S4
|
||||
VZIP1 V5.S4, V4.S4, V20.S4
|
||||
VZIP2 V5.S4, V4.S4, V21.S4
|
||||
VZIP1 V7.S4, V6.S4, V22.S4
|
||||
VZIP2 V7.S4, V6.S4, V23.S4
|
||||
VZIP1 V9.S4, V8.S4, V24.S4
|
||||
VZIP2 V9.S4, V8.S4, V25.S4
|
||||
VZIP1 V11.S4, V10.S4, V26.S4
|
||||
VZIP2 V11.S4, V10.S4, V27.S4
|
||||
VZIP1 V13.S4, V12.S4, V28.S4
|
||||
VZIP2 V13.S4, V12.S4, V29.S4
|
||||
VZIP1 V15.S4, V14.S4, V30.S4
|
||||
VZIP2 V15.S4, V14.S4, V31.S4
|
||||
VZIP1 V18.D2, V16.D2, V0.D2
|
||||
VZIP2 V18.D2, V16.D2, V4.D2
|
||||
VZIP1 V19.D2, V17.D2, V8.D2
|
||||
VZIP2 V19.D2, V17.D2, V12.D2
|
||||
VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16]
|
||||
|
||||
VZIP1 V22.D2, V20.D2, V1.D2
|
||||
VZIP2 V22.D2, V20.D2, V5.D2
|
||||
VZIP1 V23.D2, V21.D2, V9.D2
|
||||
VZIP2 V23.D2, V21.D2, V13.D2
|
||||
VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16]
|
||||
VZIP1 V26.D2, V24.D2, V2.D2
|
||||
VZIP2 V26.D2, V24.D2, V6.D2
|
||||
VZIP1 V27.D2, V25.D2, V10.D2
|
||||
VZIP2 V27.D2, V25.D2, V14.D2
|
||||
VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16]
|
||||
VZIP1 V30.D2, V28.D2, V3.D2
|
||||
VZIP2 V30.D2, V28.D2, V7.D2
|
||||
VZIP1 V31.D2, V29.D2, V11.D2
|
||||
VZIP2 V31.D2, V29.D2, V15.D2
|
||||
VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16]
|
||||
VEOR V0.B16, V16.B16, V16.B16
|
||||
VEOR V1.B16, V17.B16, V17.B16
|
||||
VEOR V2.B16, V18.B16, V18.B16
|
||||
VEOR V3.B16, V19.B16, V19.B16
|
||||
VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1)
|
||||
VEOR V4.B16, V20.B16, V20.B16
|
||||
VEOR V5.B16, V21.B16, V21.B16
|
||||
VEOR V6.B16, V22.B16, V22.B16
|
||||
VEOR V7.B16, V23.B16, V23.B16
|
||||
VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1)
|
||||
VEOR V8.B16, V24.B16, V24.B16
|
||||
VEOR V9.B16, V25.B16, V25.B16
|
||||
VEOR V10.B16, V26.B16, V26.B16
|
||||
VEOR V11.B16, V27.B16, V27.B16
|
||||
VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1)
|
||||
VEOR V12.B16, V28.B16, V28.B16
|
||||
VEOR V13.B16, V29.B16, V29.B16
|
||||
VEOR V14.B16, V30.B16, V30.B16
|
||||
VEOR V15.B16, V31.B16, V31.B16
|
||||
VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1)
|
||||
|
||||
ADD $4, R20
|
||||
MOVW R20, (R7) // update counter
|
||||
|
||||
CMP R2, R12
|
||||
BGT loop
|
||||
|
||||
RET
|
||||
|
||||
|
||||
DATA ·constants+0x00(SB)/4, $0x61707865
|
||||
DATA ·constants+0x04(SB)/4, $0x3320646e
|
||||
DATA ·constants+0x08(SB)/4, $0x79622d32
|
||||
DATA ·constants+0x0c(SB)/4, $0x6b206574
|
||||
GLOBL ·constants(SB), NOPTR|RODATA, $32
|
||||
|
||||
DATA ·incRotMatrix+0x00(SB)/4, $0x00000000
|
||||
DATA ·incRotMatrix+0x04(SB)/4, $0x00000001
|
||||
DATA ·incRotMatrix+0x08(SB)/4, $0x00000002
|
||||
DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003
|
||||
DATA ·incRotMatrix+0x10(SB)/4, $0x02010003
|
||||
DATA ·incRotMatrix+0x14(SB)/4, $0x06050407
|
||||
DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B
|
||||
DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F
|
||||
GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32
|
||||
364
vendor/golang.org/x/crypto/chacha20/chacha_generic.go
generated
vendored
Normal file
364
vendor/golang.org/x/crypto/chacha20/chacha_generic.go
generated
vendored
Normal file
@@ -0,0 +1,364 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms
|
||||
// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01.
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
|
||||
"golang.org/x/crypto/internal/subtle"
|
||||
)
|
||||
|
||||
const (
|
||||
// KeySize is the size of the key used by this cipher, in bytes.
|
||||
KeySize = 32
|
||||
|
||||
// NonceSize is the size of the nonce used with the standard variant of this
|
||||
// cipher, in bytes.
|
||||
//
|
||||
// Note that this is too short to be safely generated at random if the same
|
||||
// key is reused more than 2³² times.
|
||||
NonceSize = 12
|
||||
|
||||
// NonceSizeX is the size of the nonce used with the XChaCha20 variant of
|
||||
// this cipher, in bytes.
|
||||
NonceSizeX = 24
|
||||
)
|
||||
|
||||
// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key
|
||||
// and nonce. A *Cipher implements the cipher.Stream interface.
|
||||
type Cipher struct {
|
||||
// The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter
|
||||
// (incremented after each block), and 3 of nonce.
|
||||
key [8]uint32
|
||||
counter uint32
|
||||
nonce [3]uint32
|
||||
|
||||
// The last len bytes of buf are leftover key stream bytes from the previous
|
||||
// XORKeyStream invocation. The size of buf depends on how many blocks are
|
||||
// computed at a time.
|
||||
buf [bufSize]byte
|
||||
len int
|
||||
|
||||
// The counter-independent results of the first round are cached after they
|
||||
// are computed the first time.
|
||||
precompDone bool
|
||||
p1, p5, p9, p13 uint32
|
||||
p2, p6, p10, p14 uint32
|
||||
p3, p7, p11, p15 uint32
|
||||
}
|
||||
|
||||
var _ cipher.Stream = (*Cipher)(nil)
|
||||
|
||||
// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given
|
||||
// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided,
|
||||
// the XChaCha20 construction will be used. It returns an error if key or nonce
|
||||
// have any other length.
|
||||
//
|
||||
// Note that ChaCha20, like all stream ciphers, is not authenticated and allows
|
||||
// attackers to silently tamper with the plaintext. For this reason, it is more
|
||||
// appropriate as a building block than as a standalone encryption mechanism.
|
||||
// Instead, consider using package golang.org/x/crypto/chacha20poly1305.
|
||||
func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) {
|
||||
// This function is split into a wrapper so that the Cipher allocation will
|
||||
// be inlined, and depending on how the caller uses the return value, won't
|
||||
// escape to the heap.
|
||||
c := &Cipher{}
|
||||
return newUnauthenticatedCipher(c, key, nonce)
|
||||
}
|
||||
|
||||
func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) {
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("chacha20: wrong key size")
|
||||
}
|
||||
if len(nonce) == NonceSizeX {
|
||||
// XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a
|
||||
// derived key, allowing it to operate on a nonce of 24 bytes. See
|
||||
// draft-irtf-cfrg-xchacha-01, Section 2.3.
|
||||
key, _ = HChaCha20(key, nonce[0:16])
|
||||
cNonce := make([]byte, NonceSize)
|
||||
copy(cNonce[4:12], nonce[16:24])
|
||||
nonce = cNonce
|
||||
} else if len(nonce) != NonceSize {
|
||||
return nil, errors.New("chacha20: wrong nonce size")
|
||||
}
|
||||
|
||||
c.key = [8]uint32{
|
||||
binary.LittleEndian.Uint32(key[0:4]),
|
||||
binary.LittleEndian.Uint32(key[4:8]),
|
||||
binary.LittleEndian.Uint32(key[8:12]),
|
||||
binary.LittleEndian.Uint32(key[12:16]),
|
||||
binary.LittleEndian.Uint32(key[16:20]),
|
||||
binary.LittleEndian.Uint32(key[20:24]),
|
||||
binary.LittleEndian.Uint32(key[24:28]),
|
||||
binary.LittleEndian.Uint32(key[28:32]),
|
||||
}
|
||||
c.nonce = [3]uint32{
|
||||
binary.LittleEndian.Uint32(nonce[0:4]),
|
||||
binary.LittleEndian.Uint32(nonce[4:8]),
|
||||
binary.LittleEndian.Uint32(nonce[8:12]),
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// The constant first 4 words of the ChaCha20 state.
|
||||
const (
|
||||
j0 uint32 = 0x61707865 // expa
|
||||
j1 uint32 = 0x3320646e // nd 3
|
||||
j2 uint32 = 0x79622d32 // 2-by
|
||||
j3 uint32 = 0x6b206574 // te k
|
||||
)
|
||||
|
||||
const blockSize = 64
|
||||
|
||||
// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words.
|
||||
// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16
|
||||
// words each round, in columnar or diagonal groups of 4 at a time.
|
||||
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
|
||||
a += b
|
||||
d ^= a
|
||||
d = bits.RotateLeft32(d, 16)
|
||||
c += d
|
||||
b ^= c
|
||||
b = bits.RotateLeft32(b, 12)
|
||||
a += b
|
||||
d ^= a
|
||||
d = bits.RotateLeft32(d, 8)
|
||||
c += d
|
||||
b ^= c
|
||||
b = bits.RotateLeft32(b, 7)
|
||||
return a, b, c, d
|
||||
}
|
||||
|
||||
// XORKeyStream XORs each byte in the given slice with a byte from the
|
||||
// cipher's key stream. Dst and src must overlap entirely or not at all.
|
||||
//
|
||||
// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
|
||||
// to pass a dst bigger than src, and in that case, XORKeyStream will
|
||||
// only update dst[:len(src)] and will not touch the rest of dst.
|
||||
//
|
||||
// Multiple calls to XORKeyStream behave as if the concatenation of
|
||||
// the src buffers was passed in a single run. That is, Cipher
|
||||
// maintains state and does not reset at each XORKeyStream call.
|
||||
func (s *Cipher) XORKeyStream(dst, src []byte) {
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
if len(dst) < len(src) {
|
||||
panic("chacha20: output smaller than input")
|
||||
}
|
||||
dst = dst[:len(src)]
|
||||
if subtle.InexactOverlap(dst, src) {
|
||||
panic("chacha20: invalid buffer overlap")
|
||||
}
|
||||
|
||||
// First, drain any remaining key stream from a previous XORKeyStream.
|
||||
if s.len != 0 {
|
||||
keyStream := s.buf[bufSize-s.len:]
|
||||
if len(src) < len(keyStream) {
|
||||
keyStream = keyStream[:len(src)]
|
||||
}
|
||||
_ = src[len(keyStream)-1] // bounds check elimination hint
|
||||
for i, b := range keyStream {
|
||||
dst[i] = src[i] ^ b
|
||||
}
|
||||
s.len -= len(keyStream)
|
||||
src = src[len(keyStream):]
|
||||
dst = dst[len(keyStream):]
|
||||
}
|
||||
|
||||
const blocksPerBuf = bufSize / blockSize
|
||||
numBufs := (uint64(len(src)) + bufSize - 1) / bufSize
|
||||
if uint64(s.counter)+numBufs*blocksPerBuf >= 1<<32 {
|
||||
panic("chacha20: counter overflow")
|
||||
}
|
||||
|
||||
// xorKeyStreamBlocks implementations expect input lengths that are a
|
||||
// multiple of bufSize. Platform-specific ones process multiple blocks at a
|
||||
// time, so have bufSizes that are a multiple of blockSize.
|
||||
|
||||
rem := len(src) % bufSize
|
||||
full := len(src) - rem
|
||||
|
||||
if full > 0 {
|
||||
s.xorKeyStreamBlocks(dst[:full], src[:full])
|
||||
}
|
||||
|
||||
// If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and
|
||||
// keep the leftover keystream for the next XORKeyStream invocation.
|
||||
if rem > 0 {
|
||||
s.buf = [bufSize]byte{}
|
||||
copy(s.buf[:], src[full:])
|
||||
s.xorKeyStreamBlocks(s.buf[:], s.buf[:])
|
||||
s.len = bufSize - copy(dst[full:], s.buf[:])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
|
||||
if len(dst) != len(src) || len(dst)%blockSize != 0 {
|
||||
panic("chacha20: internal error: wrong dst and/or src length")
|
||||
}
|
||||
|
||||
// To generate each block of key stream, the initial cipher state
|
||||
// (represented below) is passed through 20 rounds of shuffling,
|
||||
// alternatively applying quarterRounds by columns (like 1, 5, 9, 13)
|
||||
// or by diagonals (like 1, 6, 11, 12).
|
||||
//
|
||||
// 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc
|
||||
// 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk
|
||||
// 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk
|
||||
// 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn
|
||||
//
|
||||
// c=constant k=key b=blockcount n=nonce
|
||||
var (
|
||||
c0, c1, c2, c3 = j0, j1, j2, j3
|
||||
c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3]
|
||||
c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7]
|
||||
_, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2]
|
||||
)
|
||||
|
||||
// Three quarters of the first round don't depend on the counter, so we can
|
||||
// calculate them here, and reuse them for multiple blocks in the loop, and
|
||||
// for future XORKeyStream invocations.
|
||||
if !s.precompDone {
|
||||
s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13)
|
||||
s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14)
|
||||
s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15)
|
||||
s.precompDone = true
|
||||
}
|
||||
|
||||
for i := 0; i < len(src); i += blockSize {
|
||||
// The remainder of the first column round.
|
||||
fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter)
|
||||
|
||||
// The second diagonal round.
|
||||
x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15)
|
||||
x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12)
|
||||
x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13)
|
||||
x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14)
|
||||
|
||||
// The remaining 18 rounds.
|
||||
for i := 0; i < 9; i++ {
|
||||
// Column round.
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
// Diagonal round.
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
// Finally, add back the initial state to generate the key stream.
|
||||
x0 += c0
|
||||
x1 += c1
|
||||
x2 += c2
|
||||
x3 += c3
|
||||
x4 += c4
|
||||
x5 += c5
|
||||
x6 += c6
|
||||
x7 += c7
|
||||
x8 += c8
|
||||
x9 += c9
|
||||
x10 += c10
|
||||
x11 += c11
|
||||
x12 += s.counter
|
||||
x13 += c13
|
||||
x14 += c14
|
||||
x15 += c15
|
||||
|
||||
s.counter += 1
|
||||
if s.counter == 0 {
|
||||
panic("chacha20: internal error: counter overflow")
|
||||
}
|
||||
|
||||
in, out := src[i:], dst[i:]
|
||||
in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint
|
||||
|
||||
// XOR the key stream with the source and write out the result.
|
||||
xor(out[0:], in[0:], x0)
|
||||
xor(out[4:], in[4:], x1)
|
||||
xor(out[8:], in[8:], x2)
|
||||
xor(out[12:], in[12:], x3)
|
||||
xor(out[16:], in[16:], x4)
|
||||
xor(out[20:], in[20:], x5)
|
||||
xor(out[24:], in[24:], x6)
|
||||
xor(out[28:], in[28:], x7)
|
||||
xor(out[32:], in[32:], x8)
|
||||
xor(out[36:], in[36:], x9)
|
||||
xor(out[40:], in[40:], x10)
|
||||
xor(out[44:], in[44:], x11)
|
||||
xor(out[48:], in[48:], x12)
|
||||
xor(out[52:], in[52:], x13)
|
||||
xor(out[56:], in[56:], x14)
|
||||
xor(out[60:], in[60:], x15)
|
||||
}
|
||||
}
|
||||
|
||||
// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes
|
||||
// key and a 16 bytes nonce. It returns an error if key or nonce have any other
|
||||
// length. It is used as part of the XChaCha20 construction.
|
||||
func HChaCha20(key, nonce []byte) ([]byte, error) {
|
||||
// This function is split into a wrapper so that the slice allocation will
|
||||
// be inlined, and depending on how the caller uses the return value, won't
|
||||
// escape to the heap.
|
||||
out := make([]byte, 32)
|
||||
return hChaCha20(out, key, nonce)
|
||||
}
|
||||
|
||||
func hChaCha20(out, key, nonce []byte) ([]byte, error) {
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("chacha20: wrong HChaCha20 key size")
|
||||
}
|
||||
if len(nonce) != 16 {
|
||||
return nil, errors.New("chacha20: wrong HChaCha20 nonce size")
|
||||
}
|
||||
|
||||
x0, x1, x2, x3 := j0, j1, j2, j3
|
||||
x4 := binary.LittleEndian.Uint32(key[0:4])
|
||||
x5 := binary.LittleEndian.Uint32(key[4:8])
|
||||
x6 := binary.LittleEndian.Uint32(key[8:12])
|
||||
x7 := binary.LittleEndian.Uint32(key[12:16])
|
||||
x8 := binary.LittleEndian.Uint32(key[16:20])
|
||||
x9 := binary.LittleEndian.Uint32(key[20:24])
|
||||
x10 := binary.LittleEndian.Uint32(key[24:28])
|
||||
x11 := binary.LittleEndian.Uint32(key[28:32])
|
||||
x12 := binary.LittleEndian.Uint32(nonce[0:4])
|
||||
x13 := binary.LittleEndian.Uint32(nonce[4:8])
|
||||
x14 := binary.LittleEndian.Uint32(nonce[8:12])
|
||||
x15 := binary.LittleEndian.Uint32(nonce[12:16])
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// Diagonal round.
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
// Column round.
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
_ = out[31] // bounds check elimination hint
|
||||
binary.LittleEndian.PutUint32(out[0:4], x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], x12)
|
||||
binary.LittleEndian.PutUint32(out[20:24], x13)
|
||||
binary.LittleEndian.PutUint32(out[24:28], x14)
|
||||
binary.LittleEndian.PutUint32(out[28:32], x15)
|
||||
return out, nil
|
||||
}
|
||||
13
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
Normal file
13
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
const bufSize = blockSize
|
||||
|
||||
func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
s.xorKeyStreamBlocksGeneric(dst, src)
|
||||
}
|
||||
16
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
generated
vendored
Normal file
16
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo,!appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
//go:noescape
|
||||
func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter)
|
||||
}
|
||||
449
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
generated
vendored
Normal file
449
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
generated
vendored
Normal file
@@ -0,0 +1,449 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on CRYPTOGAMS code with the following comment:
|
||||
// # ====================================================================
|
||||
// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
// # project. The module is, however, dual licensed under OpenSSL and
|
||||
// # CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
// # details see http://www.openssl.org/~appro/cryptogams/.
|
||||
// # ====================================================================
|
||||
|
||||
// Code for the perl script that generates the ppc64 assembler
|
||||
// can be found in the cryptogams repository at the link below. It is based on
|
||||
// the original from openssl.
|
||||
|
||||
// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91
|
||||
|
||||
// The differences in this and the original implementation are
|
||||
// due to the calling conventions and initialization of constants.
|
||||
|
||||
// +build !gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define OUT R3
|
||||
#define INP R4
|
||||
#define LEN R5
|
||||
#define KEY R6
|
||||
#define CNT R7
|
||||
#define TMP R15
|
||||
|
||||
#define CONSTBASE R16
|
||||
#define BLOCKS R17
|
||||
|
||||
DATA consts<>+0x00(SB)/8, $0x3320646e61707865
|
||||
DATA consts<>+0x08(SB)/8, $0x6b20657479622d32
|
||||
DATA consts<>+0x10(SB)/8, $0x0000000000000001
|
||||
DATA consts<>+0x18(SB)/8, $0x0000000000000000
|
||||
DATA consts<>+0x20(SB)/8, $0x0000000000000004
|
||||
DATA consts<>+0x28(SB)/8, $0x0000000000000000
|
||||
DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d
|
||||
DATA consts<>+0x38(SB)/8, $0x0203000106070405
|
||||
DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c
|
||||
DATA consts<>+0x48(SB)/8, $0x0102030005060704
|
||||
DATA consts<>+0x50(SB)/8, $0x6170786561707865
|
||||
DATA consts<>+0x58(SB)/8, $0x6170786561707865
|
||||
DATA consts<>+0x60(SB)/8, $0x3320646e3320646e
|
||||
DATA consts<>+0x68(SB)/8, $0x3320646e3320646e
|
||||
DATA consts<>+0x70(SB)/8, $0x79622d3279622d32
|
||||
DATA consts<>+0x78(SB)/8, $0x79622d3279622d32
|
||||
DATA consts<>+0x80(SB)/8, $0x6b2065746b206574
|
||||
DATA consts<>+0x88(SB)/8, $0x6b2065746b206574
|
||||
DATA consts<>+0x90(SB)/8, $0x0000000100000000
|
||||
DATA consts<>+0x98(SB)/8, $0x0000000300000002
|
||||
GLOBL consts<>(SB), RODATA, $0xa0
|
||||
|
||||
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
|
||||
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
||||
MOVD out+0(FP), OUT
|
||||
MOVD inp+8(FP), INP
|
||||
MOVD len+16(FP), LEN
|
||||
MOVD key+24(FP), KEY
|
||||
MOVD counter+32(FP), CNT
|
||||
|
||||
// Addressing for constants
|
||||
MOVD $consts<>+0x00(SB), CONSTBASE
|
||||
MOVD $16, R8
|
||||
MOVD $32, R9
|
||||
MOVD $48, R10
|
||||
MOVD $64, R11
|
||||
SRD $6, LEN, BLOCKS
|
||||
// V16
|
||||
LXVW4X (CONSTBASE)(R0), VS48
|
||||
ADD $80,CONSTBASE
|
||||
|
||||
// Load key into V17,V18
|
||||
LXVW4X (KEY)(R0), VS49
|
||||
LXVW4X (KEY)(R8), VS50
|
||||
|
||||
// Load CNT, NONCE into V19
|
||||
LXVW4X (CNT)(R0), VS51
|
||||
|
||||
// Clear V27
|
||||
VXOR V27, V27, V27
|
||||
|
||||
// V28
|
||||
LXVW4X (CONSTBASE)(R11), VS60
|
||||
|
||||
// splat slot from V19 -> V26
|
||||
VSPLTW $0, V19, V26
|
||||
|
||||
VSLDOI $4, V19, V27, V19
|
||||
VSLDOI $12, V27, V19, V19
|
||||
|
||||
VADDUWM V26, V28, V26
|
||||
|
||||
MOVD $10, R14
|
||||
MOVD R14, CTR
|
||||
|
||||
loop_outer_vsx:
|
||||
// V0, V1, V2, V3
|
||||
LXVW4X (R0)(CONSTBASE), VS32
|
||||
LXVW4X (R8)(CONSTBASE), VS33
|
||||
LXVW4X (R9)(CONSTBASE), VS34
|
||||
LXVW4X (R10)(CONSTBASE), VS35
|
||||
|
||||
// splat values from V17, V18 into V4-V11
|
||||
VSPLTW $0, V17, V4
|
||||
VSPLTW $1, V17, V5
|
||||
VSPLTW $2, V17, V6
|
||||
VSPLTW $3, V17, V7
|
||||
VSPLTW $0, V18, V8
|
||||
VSPLTW $1, V18, V9
|
||||
VSPLTW $2, V18, V10
|
||||
VSPLTW $3, V18, V11
|
||||
|
||||
// VOR
|
||||
VOR V26, V26, V12
|
||||
|
||||
// splat values from V19 -> V13, V14, V15
|
||||
VSPLTW $1, V19, V13
|
||||
VSPLTW $2, V19, V14
|
||||
VSPLTW $3, V19, V15
|
||||
|
||||
// splat const values
|
||||
VSPLTISW $-16, V27
|
||||
VSPLTISW $12, V28
|
||||
VSPLTISW $8, V29
|
||||
VSPLTISW $7, V30
|
||||
|
||||
loop_vsx:
|
||||
VADDUWM V0, V4, V0
|
||||
VADDUWM V1, V5, V1
|
||||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VXOR V12, V0, V12
|
||||
VXOR V13, V1, V13
|
||||
VXOR V14, V2, V14
|
||||
VXOR V15, V3, V15
|
||||
|
||||
VRLW V12, V27, V12
|
||||
VRLW V13, V27, V13
|
||||
VRLW V14, V27, V14
|
||||
VRLW V15, V27, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
VADDUWM V10, V14, V10
|
||||
VADDUWM V11, V15, V11
|
||||
|
||||
VXOR V4, V8, V4
|
||||
VXOR V5, V9, V5
|
||||
VXOR V6, V10, V6
|
||||
VXOR V7, V11, V7
|
||||
|
||||
VRLW V4, V28, V4
|
||||
VRLW V5, V28, V5
|
||||
VRLW V6, V28, V6
|
||||
VRLW V7, V28, V7
|
||||
|
||||
VADDUWM V0, V4, V0
|
||||
VADDUWM V1, V5, V1
|
||||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VXOR V12, V0, V12
|
||||
VXOR V13, V1, V13
|
||||
VXOR V14, V2, V14
|
||||
VXOR V15, V3, V15
|
||||
|
||||
VRLW V12, V29, V12
|
||||
VRLW V13, V29, V13
|
||||
VRLW V14, V29, V14
|
||||
VRLW V15, V29, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
VADDUWM V10, V14, V10
|
||||
VADDUWM V11, V15, V11
|
||||
|
||||
VXOR V4, V8, V4
|
||||
VXOR V5, V9, V5
|
||||
VXOR V6, V10, V6
|
||||
VXOR V7, V11, V7
|
||||
|
||||
VRLW V4, V30, V4
|
||||
VRLW V5, V30, V5
|
||||
VRLW V6, V30, V6
|
||||
VRLW V7, V30, V7
|
||||
|
||||
VADDUWM V0, V5, V0
|
||||
VADDUWM V1, V6, V1
|
||||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VXOR V15, V0, V15
|
||||
VXOR V12, V1, V12
|
||||
VXOR V13, V2, V13
|
||||
VXOR V14, V3, V14
|
||||
|
||||
VRLW V15, V27, V15
|
||||
VRLW V12, V27, V12
|
||||
VRLW V13, V27, V13
|
||||
VRLW V14, V27, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
VADDUWM V8, V13, V8
|
||||
VADDUWM V9, V14, V9
|
||||
|
||||
VXOR V5, V10, V5
|
||||
VXOR V6, V11, V6
|
||||
VXOR V7, V8, V7
|
||||
VXOR V4, V9, V4
|
||||
|
||||
VRLW V5, V28, V5
|
||||
VRLW V6, V28, V6
|
||||
VRLW V7, V28, V7
|
||||
VRLW V4, V28, V4
|
||||
|
||||
VADDUWM V0, V5, V0
|
||||
VADDUWM V1, V6, V1
|
||||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VXOR V15, V0, V15
|
||||
VXOR V12, V1, V12
|
||||
VXOR V13, V2, V13
|
||||
VXOR V14, V3, V14
|
||||
|
||||
VRLW V15, V29, V15
|
||||
VRLW V12, V29, V12
|
||||
VRLW V13, V29, V13
|
||||
VRLW V14, V29, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
VADDUWM V8, V13, V8
|
||||
VADDUWM V9, V14, V9
|
||||
|
||||
VXOR V5, V10, V5
|
||||
VXOR V6, V11, V6
|
||||
VXOR V7, V8, V7
|
||||
VXOR V4, V9, V4
|
||||
|
||||
VRLW V5, V30, V5
|
||||
VRLW V6, V30, V6
|
||||
VRLW V7, V30, V7
|
||||
VRLW V4, V30, V4
|
||||
BC 16, LT, loop_vsx
|
||||
|
||||
VADDUWM V12, V26, V12
|
||||
|
||||
WORD $0x13600F8C // VMRGEW V0, V1, V27
|
||||
WORD $0x13821F8C // VMRGEW V2, V3, V28
|
||||
|
||||
WORD $0x10000E8C // VMRGOW V0, V1, V0
|
||||
WORD $0x10421E8C // VMRGOW V2, V3, V2
|
||||
|
||||
WORD $0x13A42F8C // VMRGEW V4, V5, V29
|
||||
WORD $0x13C63F8C // VMRGEW V6, V7, V30
|
||||
|
||||
XXPERMDI VS32, VS34, $0, VS33
|
||||
XXPERMDI VS32, VS34, $3, VS35
|
||||
XXPERMDI VS59, VS60, $0, VS32
|
||||
XXPERMDI VS59, VS60, $3, VS34
|
||||
|
||||
WORD $0x10842E8C // VMRGOW V4, V5, V4
|
||||
WORD $0x10C63E8C // VMRGOW V6, V7, V6
|
||||
|
||||
WORD $0x13684F8C // VMRGEW V8, V9, V27
|
||||
WORD $0x138A5F8C // VMRGEW V10, V11, V28
|
||||
|
||||
XXPERMDI VS36, VS38, $0, VS37
|
||||
XXPERMDI VS36, VS38, $3, VS39
|
||||
XXPERMDI VS61, VS62, $0, VS36
|
||||
XXPERMDI VS61, VS62, $3, VS38
|
||||
|
||||
WORD $0x11084E8C // VMRGOW V8, V9, V8
|
||||
WORD $0x114A5E8C // VMRGOW V10, V11, V10
|
||||
|
||||
WORD $0x13AC6F8C // VMRGEW V12, V13, V29
|
||||
WORD $0x13CE7F8C // VMRGEW V14, V15, V30
|
||||
|
||||
XXPERMDI VS40, VS42, $0, VS41
|
||||
XXPERMDI VS40, VS42, $3, VS43
|
||||
XXPERMDI VS59, VS60, $0, VS40
|
||||
XXPERMDI VS59, VS60, $3, VS42
|
||||
|
||||
WORD $0x118C6E8C // VMRGOW V12, V13, V12
|
||||
WORD $0x11CE7E8C // VMRGOW V14, V15, V14
|
||||
|
||||
VSPLTISW $4, V27
|
||||
VADDUWM V26, V27, V26
|
||||
|
||||
XXPERMDI VS44, VS46, $0, VS45
|
||||
XXPERMDI VS44, VS46, $3, VS47
|
||||
XXPERMDI VS61, VS62, $0, VS44
|
||||
XXPERMDI VS61, VS62, $3, VS46
|
||||
|
||||
VADDUWM V0, V16, V0
|
||||
VADDUWM V4, V17, V4
|
||||
VADDUWM V8, V18, V8
|
||||
VADDUWM V12, V19, V12
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
// Bottom of loop
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
|
||||
VXOR V27, V0, V27
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(R10)
|
||||
ADD $64, OUT
|
||||
BEQ done_vsx
|
||||
|
||||
VADDUWM V1, V16, V0
|
||||
VADDUWM V5, V17, V4
|
||||
VADDUWM V9, V18, V8
|
||||
VADDUWM V13, V19, V12
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
VXOR V27, V0, V27
|
||||
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(V10)
|
||||
ADD $64, OUT
|
||||
BEQ done_vsx
|
||||
|
||||
VADDUWM V2, V16, V0
|
||||
VADDUWM V6, V17, V4
|
||||
VADDUWM V10, V18, V8
|
||||
VADDUWM V14, V19, V12
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
|
||||
VXOR V27, V0, V27
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(R10)
|
||||
ADD $64, OUT
|
||||
BEQ done_vsx
|
||||
|
||||
VADDUWM V3, V16, V0
|
||||
VADDUWM V7, V17, V4
|
||||
VADDUWM V11, V18, V8
|
||||
VADDUWM V15, V19, V12
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
|
||||
VXOR V27, V0, V27
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(R10)
|
||||
ADD $64, OUT
|
||||
|
||||
MOVD $10, R14
|
||||
MOVD R14, CTR
|
||||
BNE loop_outer_vsx
|
||||
|
||||
done_vsx:
|
||||
// Increment counter by number of 64 byte blocks
|
||||
MOVD (CNT), R14
|
||||
ADD BLOCKS, R14
|
||||
MOVD R14, (CNT)
|
||||
RET
|
||||
|
||||
tail_vsx:
|
||||
ADD $32, R1, R11
|
||||
MOVD LEN, CTR
|
||||
|
||||
// Save values on stack to copy from
|
||||
STXVW4X VS32, (R11)(R0)
|
||||
STXVW4X VS36, (R11)(R8)
|
||||
STXVW4X VS40, (R11)(R9)
|
||||
STXVW4X VS44, (R11)(R10)
|
||||
ADD $-1, R11, R12
|
||||
ADD $-1, INP
|
||||
ADD $-1, OUT
|
||||
|
||||
looptail_vsx:
|
||||
// Copying the result to OUT
|
||||
// in bytes.
|
||||
MOVBZU 1(R12), KEY
|
||||
MOVBZU 1(INP), TMP
|
||||
XOR KEY, TMP, KEY
|
||||
MOVBU KEY, 1(OUT)
|
||||
BC 16, LT, looptail_vsx
|
||||
|
||||
// Clear the stack values
|
||||
STXVW4X VS48, (R11)(R0)
|
||||
STXVW4X VS48, (R11)(R8)
|
||||
STXVW4X VS48, (R11)(R9)
|
||||
STXVW4X VS48, (R11)(R10)
|
||||
BR done_vsx
|
||||
26
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
Normal file
26
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo,!appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
import "golang.org/x/sys/cpu"
|
||||
|
||||
var haveAsm = cpu.S390X.HasVX
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
|
||||
// be called when the vector facility is available. Implementation in asm_s390x.s.
|
||||
//go:noescape
|
||||
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
if cpu.S390X.HasVX {
|
||||
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
|
||||
} else {
|
||||
c.xorKeyStreamBlocksGeneric(dst, src)
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,!gccgo,!appengine
|
||||
// +build !gccgo,!appengine
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
@@ -24,15 +24,6 @@ DATA ·constants<>+0x14(SB)/4, $0x3320646e
|
||||
DATA ·constants<>+0x18(SB)/4, $0x79622d32
|
||||
DATA ·constants<>+0x1c(SB)/4, $0x6b206574
|
||||
|
||||
// EXRL targets:
|
||||
TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0
|
||||
MVC $1, (R1), (R8)
|
||||
RET
|
||||
|
||||
TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0
|
||||
MVC $1, (R8), (R9)
|
||||
RET
|
||||
|
||||
#define BSWAP V5
|
||||
#define J0 V6
|
||||
#define KEY0 V7
|
||||
@@ -144,7 +135,7 @@ TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0
|
||||
VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
|
||||
VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
|
||||
|
||||
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
|
||||
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
|
||||
MOVD $·constants<>(SB), R1
|
||||
MOVD dst+0(FP), R2 // R2=&dst[0]
|
||||
@@ -152,25 +143,10 @@ TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
|
||||
MOVD key+48(FP), R5 // R5=key
|
||||
MOVD nonce+56(FP), R6 // R6=nonce
|
||||
MOVD counter+64(FP), R7 // R7=counter
|
||||
MOVD buf+72(FP), R8 // R8=buf
|
||||
MOVD len+80(FP), R9 // R9=len
|
||||
|
||||
// load BSWAP and J0
|
||||
VLM (R1), BSWAP, J0
|
||||
|
||||
// set up tail buffer
|
||||
ADD $-1, R4, R12
|
||||
MOVBZ R12, R12
|
||||
CMPUBEQ R12, $255, aligned
|
||||
MOVD R4, R1
|
||||
AND $~255, R1
|
||||
MOVD $(R3)(R1*1), R1
|
||||
EXRL $·mvcSrcToBuf(SB), R12
|
||||
MOVD $255, R0
|
||||
SUB R12, R0
|
||||
MOVD R0, (R9) // update len
|
||||
|
||||
aligned:
|
||||
// setup
|
||||
MOVD $95, R0
|
||||
VLM (R5), KEY0, KEY1
|
||||
@@ -217,9 +193,7 @@ loop:
|
||||
|
||||
// decrement length
|
||||
ADD $-256, R4
|
||||
BLT tail
|
||||
|
||||
continue:
|
||||
// rearrange vectors
|
||||
SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
|
||||
ADDV(J0, X0, X1, X2, X3)
|
||||
@@ -245,39 +219,6 @@ continue:
|
||||
MOVD $256(R3), R3
|
||||
|
||||
CMPBNE R4, $0, chacha
|
||||
CMPUBEQ R12, $255, return
|
||||
EXRL $·mvcBufToDst(SB), R12 // len was updated during setup
|
||||
|
||||
return:
|
||||
VSTEF $0, CTR, (R7)
|
||||
RET
|
||||
|
||||
tail:
|
||||
MOVD R2, R9
|
||||
MOVD R8, R2
|
||||
MOVD R8, R3
|
||||
MOVD $0, R4
|
||||
JMP continue
|
||||
|
||||
// func hasVectorFacility() bool
|
||||
TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
|
||||
MOVD $x-24(SP), R1
|
||||
XC $24, 0(R1), 0(R1) // clear the storage
|
||||
MOVD $2, R0 // R0 is the number of double words stored -1
|
||||
WORD $0xB2B01000 // STFLE 0(R1)
|
||||
XOR R0, R0 // reset the value of R0
|
||||
MOVBZ z-8(SP), R1
|
||||
AND $0x40, R1
|
||||
BEQ novector
|
||||
|
||||
vectorinstalled:
|
||||
// check if the vector instruction has been enabled
|
||||
VLEIB $0, $0xF, V16
|
||||
VLGVB $0, V16, R1
|
||||
CMPBNE R1, $0xF, novector
|
||||
MOVB $1, ret+0(FP) // have vx
|
||||
RET
|
||||
|
||||
novector:
|
||||
MOVB $0, ret+0(FP) // no vx
|
||||
RET
|
||||
@@ -4,9 +4,7 @@
|
||||
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
import "runtime"
|
||||
|
||||
// Platforms that have fast unaligned 32-bit little endian accesses.
|
||||
const unaligned = runtime.GOARCH == "386" ||
|
||||
8
vendor/golang.org/x/crypto/curve25519/const_amd64.h
generated
vendored
8
vendor/golang.org/x/crypto/curve25519/const_amd64.h
generated
vendored
@@ -1,8 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
#define REDMASK51 0x0007FFFFFFFFFFFF
|
||||
20
vendor/golang.org/x/crypto/curve25519/const_amd64.s
generated
vendored
20
vendor/golang.org/x/crypto/curve25519/const_amd64.s
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// These constants cannot be encoded in non-MOVQ immediates.
|
||||
// We access them directly from memory instead.
|
||||
|
||||
DATA ·_121666_213(SB)/8, $996687872
|
||||
GLOBL ·_121666_213(SB), 8, $8
|
||||
|
||||
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
|
||||
GLOBL ·_2P0(SB), 8, $8
|
||||
|
||||
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
|
||||
GLOBL ·_2P1234(SB), 8, $8
|
||||
65
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
generated
vendored
65
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
generated
vendored
@@ -1,65 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// func cswap(inout *[4][5]uint64, v uint64)
|
||||
TEXT ·cswap(SB),7,$0
|
||||
MOVQ inout+0(FP),DI
|
||||
MOVQ v+8(FP),SI
|
||||
|
||||
SUBQ $1, SI
|
||||
NOTQ SI
|
||||
MOVQ SI, X15
|
||||
PSHUFD $0x44, X15, X15
|
||||
|
||||
MOVOU 0(DI), X0
|
||||
MOVOU 16(DI), X2
|
||||
MOVOU 32(DI), X4
|
||||
MOVOU 48(DI), X6
|
||||
MOVOU 64(DI), X8
|
||||
MOVOU 80(DI), X1
|
||||
MOVOU 96(DI), X3
|
||||
MOVOU 112(DI), X5
|
||||
MOVOU 128(DI), X7
|
||||
MOVOU 144(DI), X9
|
||||
|
||||
MOVO X1, X10
|
||||
MOVO X3, X11
|
||||
MOVO X5, X12
|
||||
MOVO X7, X13
|
||||
MOVO X9, X14
|
||||
|
||||
PXOR X0, X10
|
||||
PXOR X2, X11
|
||||
PXOR X4, X12
|
||||
PXOR X6, X13
|
||||
PXOR X8, X14
|
||||
PAND X15, X10
|
||||
PAND X15, X11
|
||||
PAND X15, X12
|
||||
PAND X15, X13
|
||||
PAND X15, X14
|
||||
PXOR X10, X0
|
||||
PXOR X10, X1
|
||||
PXOR X11, X2
|
||||
PXOR X11, X3
|
||||
PXOR X12, X4
|
||||
PXOR X12, X5
|
||||
PXOR X13, X6
|
||||
PXOR X13, X7
|
||||
PXOR X14, X8
|
||||
PXOR X14, X9
|
||||
|
||||
MOVOU X0, 0(DI)
|
||||
MOVOU X2, 16(DI)
|
||||
MOVOU X4, 32(DI)
|
||||
MOVOU X6, 48(DI)
|
||||
MOVOU X8, 64(DI)
|
||||
MOVOU X1, 80(DI)
|
||||
MOVOU X3, 96(DI)
|
||||
MOVOU X5, 112(DI)
|
||||
MOVOU X7, 128(DI)
|
||||
MOVOU X9, 144(DI)
|
||||
RET
|
||||
897
vendor/golang.org/x/crypto/curve25519/curve25519.go
generated
vendored
897
vendor/golang.org/x/crypto/curve25519/curve25519.go
generated
vendored
@@ -1,834 +1,95 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// We have an implementation in amd64 assembly so this code is only run on
|
||||
// non-amd64 platforms. The amd64 assembly does not support gccgo.
|
||||
// +build !amd64 gccgo appengine
|
||||
|
||||
package curve25519
|
||||
// Package curve25519 provides an implementation of the X25519 function, which
|
||||
// performs scalar multiplication on the elliptic curve known as Curve25519.
|
||||
// See RFC 7748.
|
||||
package curve25519 // import "golang.org/x/crypto/curve25519"
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// This code is a port of the public domain, "ref10" implementation of
|
||||
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
|
||||
// ScalarMult sets dst to the product scalar * point.
|
||||
//
|
||||
// Deprecated: when provided a low-order point, ScalarMult will set dst to all
|
||||
// zeroes, irrespective of the scalar. Instead, use the X25519 function, which
|
||||
// will return an error.
|
||||
func ScalarMult(dst, scalar, point *[32]byte) {
|
||||
scalarMult(dst, scalar, point)
|
||||
}
|
||||
|
||||
// fieldElement represents an element of the field GF(2^255 - 19). An element
|
||||
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
|
||||
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
|
||||
// context.
|
||||
type fieldElement [10]int32
|
||||
// ScalarBaseMult sets dst to the product scalar * base where base is the
|
||||
// standard generator.
|
||||
//
|
||||
// It is recommended to use the X25519 function with Basepoint instead, as
|
||||
// copying into fixed size arrays can lead to unexpected bugs.
|
||||
func ScalarBaseMult(dst, scalar *[32]byte) {
|
||||
ScalarMult(dst, scalar, &basePoint)
|
||||
}
|
||||
|
||||
func feZero(fe *fieldElement) {
|
||||
for i := range fe {
|
||||
fe[i] = 0
|
||||
const (
|
||||
// ScalarSize is the size of the scalar input to X25519.
|
||||
ScalarSize = 32
|
||||
// PointSize is the size of the point input to X25519.
|
||||
PointSize = 32
|
||||
)
|
||||
|
||||
// Basepoint is the canonical Curve25519 generator.
|
||||
var Basepoint []byte
|
||||
|
||||
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
func init() { Basepoint = basePoint[:] }
|
||||
|
||||
func checkBasepoint() {
|
||||
if subtle.ConstantTimeCompare(Basepoint, []byte{
|
||||
0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}) != 1 {
|
||||
panic("curve25519: global Basepoint value was modified")
|
||||
}
|
||||
}
|
||||
|
||||
func feOne(fe *fieldElement) {
|
||||
feZero(fe)
|
||||
fe[0] = 1
|
||||
// X25519 returns the result of the scalar multiplication (scalar * point),
|
||||
// according to RFC 7748, Section 5. scalar, point and the return value are
|
||||
// slices of 32 bytes.
|
||||
//
|
||||
// scalar can be generated at random, for example with crypto/rand. point should
|
||||
// be either Basepoint or the output of another X25519 call.
|
||||
//
|
||||
// If point is Basepoint (but not if it's a different slice with the same
|
||||
// contents) a precomputed implementation might be used for performance.
|
||||
func X25519(scalar, point []byte) ([]byte, error) {
|
||||
// Outline the body of function, to let the allocation be inlined in the
|
||||
// caller, and possibly avoid escaping to the heap.
|
||||
var dst [32]byte
|
||||
return x25519(&dst, scalar, point)
|
||||
}
|
||||
|
||||
func feAdd(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] + b[i]
|
||||
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
|
||||
var in [32]byte
|
||||
if l := len(scalar); l != 32 {
|
||||
return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32)
|
||||
}
|
||||
}
|
||||
|
||||
func feSub(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] - b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feCopy(dst, src *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = src[i]
|
||||
}
|
||||
}
|
||||
|
||||
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
|
||||
//
|
||||
// Preconditions: b in {0,1}.
|
||||
func feCSwap(f, g *fieldElement, b int32) {
|
||||
b = -b
|
||||
for i := range f {
|
||||
t := b & (f[i] ^ g[i])
|
||||
f[i] ^= t
|
||||
g[i] ^= t
|
||||
}
|
||||
}
|
||||
|
||||
// load3 reads a 24-bit, little-endian value from in.
|
||||
func load3(in []byte) int64 {
|
||||
var r int64
|
||||
r = int64(in[0])
|
||||
r |= int64(in[1]) << 8
|
||||
r |= int64(in[2]) << 16
|
||||
return r
|
||||
}
|
||||
|
||||
// load4 reads a 32-bit, little-endian value from in.
|
||||
func load4(in []byte) int64 {
|
||||
return int64(binary.LittleEndian.Uint32(in))
|
||||
}
|
||||
|
||||
func feFromBytes(dst *fieldElement, src *[32]byte) {
|
||||
h0 := load4(src[:])
|
||||
h1 := load3(src[4:]) << 6
|
||||
h2 := load3(src[7:]) << 5
|
||||
h3 := load3(src[10:]) << 3
|
||||
h4 := load3(src[13:]) << 2
|
||||
h5 := load4(src[16:])
|
||||
h6 := load3(src[20:]) << 7
|
||||
h7 := load3(src[23:]) << 5
|
||||
h8 := load3(src[26:]) << 4
|
||||
h9 := load3(src[29:]) << 2
|
||||
|
||||
var carry [10]int64
|
||||
carry[9] = (h9 + 1<<24) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + 1<<24) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + 1<<24) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + 1<<24) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + 1<<24) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + 1<<25) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + 1<<25) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + 1<<25) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + 1<<25) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + 1<<25) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
dst[0] = int32(h0)
|
||||
dst[1] = int32(h1)
|
||||
dst[2] = int32(h2)
|
||||
dst[3] = int32(h3)
|
||||
dst[4] = int32(h4)
|
||||
dst[5] = int32(h5)
|
||||
dst[6] = int32(h6)
|
||||
dst[7] = int32(h7)
|
||||
dst[8] = int32(h8)
|
||||
dst[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feToBytes marshals h to s.
|
||||
// Preconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Write p=2^255-19; q=floor(h/p).
|
||||
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
|
||||
//
|
||||
// Proof:
|
||||
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
|
||||
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
|
||||
//
|
||||
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
|
||||
// Then 0<y<1.
|
||||
//
|
||||
// Write r=h-pq.
|
||||
// Have 0<=r<=p-1=2^255-20.
|
||||
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
|
||||
//
|
||||
// Write x=r+19(2^-255)r+y.
|
||||
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
|
||||
//
|
||||
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
|
||||
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
|
||||
func feToBytes(s *[32]byte, h *fieldElement) {
|
||||
var carry [10]int32
|
||||
|
||||
q := (19*h[9] + (1 << 24)) >> 25
|
||||
q = (h[0] + q) >> 26
|
||||
q = (h[1] + q) >> 25
|
||||
q = (h[2] + q) >> 26
|
||||
q = (h[3] + q) >> 25
|
||||
q = (h[4] + q) >> 26
|
||||
q = (h[5] + q) >> 25
|
||||
q = (h[6] + q) >> 26
|
||||
q = (h[7] + q) >> 25
|
||||
q = (h[8] + q) >> 26
|
||||
q = (h[9] + q) >> 25
|
||||
|
||||
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
|
||||
h[0] += 19 * q
|
||||
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
|
||||
|
||||
carry[0] = h[0] >> 26
|
||||
h[1] += carry[0]
|
||||
h[0] -= carry[0] << 26
|
||||
carry[1] = h[1] >> 25
|
||||
h[2] += carry[1]
|
||||
h[1] -= carry[1] << 25
|
||||
carry[2] = h[2] >> 26
|
||||
h[3] += carry[2]
|
||||
h[2] -= carry[2] << 26
|
||||
carry[3] = h[3] >> 25
|
||||
h[4] += carry[3]
|
||||
h[3] -= carry[3] << 25
|
||||
carry[4] = h[4] >> 26
|
||||
h[5] += carry[4]
|
||||
h[4] -= carry[4] << 26
|
||||
carry[5] = h[5] >> 25
|
||||
h[6] += carry[5]
|
||||
h[5] -= carry[5] << 25
|
||||
carry[6] = h[6] >> 26
|
||||
h[7] += carry[6]
|
||||
h[6] -= carry[6] << 26
|
||||
carry[7] = h[7] >> 25
|
||||
h[8] += carry[7]
|
||||
h[7] -= carry[7] << 25
|
||||
carry[8] = h[8] >> 26
|
||||
h[9] += carry[8]
|
||||
h[8] -= carry[8] << 26
|
||||
carry[9] = h[9] >> 25
|
||||
h[9] -= carry[9] << 25
|
||||
// h10 = carry9
|
||||
|
||||
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
|
||||
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
|
||||
// evidently 2^255 h10-2^255 q = 0.
|
||||
// Goal: Output h[0]+...+2^230 h[9].
|
||||
|
||||
s[0] = byte(h[0] >> 0)
|
||||
s[1] = byte(h[0] >> 8)
|
||||
s[2] = byte(h[0] >> 16)
|
||||
s[3] = byte((h[0] >> 24) | (h[1] << 2))
|
||||
s[4] = byte(h[1] >> 6)
|
||||
s[5] = byte(h[1] >> 14)
|
||||
s[6] = byte((h[1] >> 22) | (h[2] << 3))
|
||||
s[7] = byte(h[2] >> 5)
|
||||
s[8] = byte(h[2] >> 13)
|
||||
s[9] = byte((h[2] >> 21) | (h[3] << 5))
|
||||
s[10] = byte(h[3] >> 3)
|
||||
s[11] = byte(h[3] >> 11)
|
||||
s[12] = byte((h[3] >> 19) | (h[4] << 6))
|
||||
s[13] = byte(h[4] >> 2)
|
||||
s[14] = byte(h[4] >> 10)
|
||||
s[15] = byte(h[4] >> 18)
|
||||
s[16] = byte(h[5] >> 0)
|
||||
s[17] = byte(h[5] >> 8)
|
||||
s[18] = byte(h[5] >> 16)
|
||||
s[19] = byte((h[5] >> 24) | (h[6] << 1))
|
||||
s[20] = byte(h[6] >> 7)
|
||||
s[21] = byte(h[6] >> 15)
|
||||
s[22] = byte((h[6] >> 23) | (h[7] << 3))
|
||||
s[23] = byte(h[7] >> 5)
|
||||
s[24] = byte(h[7] >> 13)
|
||||
s[25] = byte((h[7] >> 21) | (h[8] << 4))
|
||||
s[26] = byte(h[8] >> 4)
|
||||
s[27] = byte(h[8] >> 12)
|
||||
s[28] = byte((h[8] >> 20) | (h[9] << 6))
|
||||
s[29] = byte(h[9] >> 2)
|
||||
s[30] = byte(h[9] >> 10)
|
||||
s[31] = byte(h[9] >> 18)
|
||||
}
|
||||
|
||||
// feMul calculates h = f * g
|
||||
// Can overlap h with f or g.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Notes on implementation strategy:
|
||||
//
|
||||
// Using schoolbook multiplication.
|
||||
// Karatsuba would save a little in some cost models.
|
||||
//
|
||||
// Most multiplications by 2 and 19 are 32-bit precomputations;
|
||||
// cheaper than 64-bit postcomputations.
|
||||
//
|
||||
// There is one remaining multiplication by 19 in the carry chain;
|
||||
// one *19 precomputation can be merged into this,
|
||||
// but the resulting data flow is considerably less clean.
|
||||
//
|
||||
// There are 12 carries below.
|
||||
// 10 of them are 2-way parallelizable and vectorizable.
|
||||
// Can get away with 11 carries, but then data flow is much deeper.
|
||||
//
|
||||
// With tighter constraints on inputs can squeeze carries into int32.
|
||||
func feMul(h, f, g *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
g0 := g[0]
|
||||
g1 := g[1]
|
||||
g2 := g[2]
|
||||
g3 := g[3]
|
||||
g4 := g[4]
|
||||
g5 := g[5]
|
||||
g6 := g[6]
|
||||
g7 := g[7]
|
||||
g8 := g[8]
|
||||
g9 := g[9]
|
||||
g1_19 := 19 * g1 // 1.4*2^29
|
||||
g2_19 := 19 * g2 // 1.4*2^30; still ok
|
||||
g3_19 := 19 * g3
|
||||
g4_19 := 19 * g4
|
||||
g5_19 := 19 * g5
|
||||
g6_19 := 19 * g6
|
||||
g7_19 := 19 * g7
|
||||
g8_19 := 19 * g8
|
||||
g9_19 := 19 * g9
|
||||
f1_2 := 2 * f1
|
||||
f3_2 := 2 * f3
|
||||
f5_2 := 2 * f5
|
||||
f7_2 := 2 * f7
|
||||
f9_2 := 2 * f9
|
||||
f0g0 := int64(f0) * int64(g0)
|
||||
f0g1 := int64(f0) * int64(g1)
|
||||
f0g2 := int64(f0) * int64(g2)
|
||||
f0g3 := int64(f0) * int64(g3)
|
||||
f0g4 := int64(f0) * int64(g4)
|
||||
f0g5 := int64(f0) * int64(g5)
|
||||
f0g6 := int64(f0) * int64(g6)
|
||||
f0g7 := int64(f0) * int64(g7)
|
||||
f0g8 := int64(f0) * int64(g8)
|
||||
f0g9 := int64(f0) * int64(g9)
|
||||
f1g0 := int64(f1) * int64(g0)
|
||||
f1g1_2 := int64(f1_2) * int64(g1)
|
||||
f1g2 := int64(f1) * int64(g2)
|
||||
f1g3_2 := int64(f1_2) * int64(g3)
|
||||
f1g4 := int64(f1) * int64(g4)
|
||||
f1g5_2 := int64(f1_2) * int64(g5)
|
||||
f1g6 := int64(f1) * int64(g6)
|
||||
f1g7_2 := int64(f1_2) * int64(g7)
|
||||
f1g8 := int64(f1) * int64(g8)
|
||||
f1g9_38 := int64(f1_2) * int64(g9_19)
|
||||
f2g0 := int64(f2) * int64(g0)
|
||||
f2g1 := int64(f2) * int64(g1)
|
||||
f2g2 := int64(f2) * int64(g2)
|
||||
f2g3 := int64(f2) * int64(g3)
|
||||
f2g4 := int64(f2) * int64(g4)
|
||||
f2g5 := int64(f2) * int64(g5)
|
||||
f2g6 := int64(f2) * int64(g6)
|
||||
f2g7 := int64(f2) * int64(g7)
|
||||
f2g8_19 := int64(f2) * int64(g8_19)
|
||||
f2g9_19 := int64(f2) * int64(g9_19)
|
||||
f3g0 := int64(f3) * int64(g0)
|
||||
f3g1_2 := int64(f3_2) * int64(g1)
|
||||
f3g2 := int64(f3) * int64(g2)
|
||||
f3g3_2 := int64(f3_2) * int64(g3)
|
||||
f3g4 := int64(f3) * int64(g4)
|
||||
f3g5_2 := int64(f3_2) * int64(g5)
|
||||
f3g6 := int64(f3) * int64(g6)
|
||||
f3g7_38 := int64(f3_2) * int64(g7_19)
|
||||
f3g8_19 := int64(f3) * int64(g8_19)
|
||||
f3g9_38 := int64(f3_2) * int64(g9_19)
|
||||
f4g0 := int64(f4) * int64(g0)
|
||||
f4g1 := int64(f4) * int64(g1)
|
||||
f4g2 := int64(f4) * int64(g2)
|
||||
f4g3 := int64(f4) * int64(g3)
|
||||
f4g4 := int64(f4) * int64(g4)
|
||||
f4g5 := int64(f4) * int64(g5)
|
||||
f4g6_19 := int64(f4) * int64(g6_19)
|
||||
f4g7_19 := int64(f4) * int64(g7_19)
|
||||
f4g8_19 := int64(f4) * int64(g8_19)
|
||||
f4g9_19 := int64(f4) * int64(g9_19)
|
||||
f5g0 := int64(f5) * int64(g0)
|
||||
f5g1_2 := int64(f5_2) * int64(g1)
|
||||
f5g2 := int64(f5) * int64(g2)
|
||||
f5g3_2 := int64(f5_2) * int64(g3)
|
||||
f5g4 := int64(f5) * int64(g4)
|
||||
f5g5_38 := int64(f5_2) * int64(g5_19)
|
||||
f5g6_19 := int64(f5) * int64(g6_19)
|
||||
f5g7_38 := int64(f5_2) * int64(g7_19)
|
||||
f5g8_19 := int64(f5) * int64(g8_19)
|
||||
f5g9_38 := int64(f5_2) * int64(g9_19)
|
||||
f6g0 := int64(f6) * int64(g0)
|
||||
f6g1 := int64(f6) * int64(g1)
|
||||
f6g2 := int64(f6) * int64(g2)
|
||||
f6g3 := int64(f6) * int64(g3)
|
||||
f6g4_19 := int64(f6) * int64(g4_19)
|
||||
f6g5_19 := int64(f6) * int64(g5_19)
|
||||
f6g6_19 := int64(f6) * int64(g6_19)
|
||||
f6g7_19 := int64(f6) * int64(g7_19)
|
||||
f6g8_19 := int64(f6) * int64(g8_19)
|
||||
f6g9_19 := int64(f6) * int64(g9_19)
|
||||
f7g0 := int64(f7) * int64(g0)
|
||||
f7g1_2 := int64(f7_2) * int64(g1)
|
||||
f7g2 := int64(f7) * int64(g2)
|
||||
f7g3_38 := int64(f7_2) * int64(g3_19)
|
||||
f7g4_19 := int64(f7) * int64(g4_19)
|
||||
f7g5_38 := int64(f7_2) * int64(g5_19)
|
||||
f7g6_19 := int64(f7) * int64(g6_19)
|
||||
f7g7_38 := int64(f7_2) * int64(g7_19)
|
||||
f7g8_19 := int64(f7) * int64(g8_19)
|
||||
f7g9_38 := int64(f7_2) * int64(g9_19)
|
||||
f8g0 := int64(f8) * int64(g0)
|
||||
f8g1 := int64(f8) * int64(g1)
|
||||
f8g2_19 := int64(f8) * int64(g2_19)
|
||||
f8g3_19 := int64(f8) * int64(g3_19)
|
||||
f8g4_19 := int64(f8) * int64(g4_19)
|
||||
f8g5_19 := int64(f8) * int64(g5_19)
|
||||
f8g6_19 := int64(f8) * int64(g6_19)
|
||||
f8g7_19 := int64(f8) * int64(g7_19)
|
||||
f8g8_19 := int64(f8) * int64(g8_19)
|
||||
f8g9_19 := int64(f8) * int64(g9_19)
|
||||
f9g0 := int64(f9) * int64(g0)
|
||||
f9g1_38 := int64(f9_2) * int64(g1_19)
|
||||
f9g2_19 := int64(f9) * int64(g2_19)
|
||||
f9g3_38 := int64(f9_2) * int64(g3_19)
|
||||
f9g4_19 := int64(f9) * int64(g4_19)
|
||||
f9g5_38 := int64(f9_2) * int64(g5_19)
|
||||
f9g6_19 := int64(f9) * int64(g6_19)
|
||||
f9g7_38 := int64(f9_2) * int64(g7_19)
|
||||
f9g8_19 := int64(f9) * int64(g8_19)
|
||||
f9g9_38 := int64(f9_2) * int64(g9_19)
|
||||
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
|
||||
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
|
||||
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
|
||||
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
|
||||
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
|
||||
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
|
||||
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
|
||||
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
|
||||
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
|
||||
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
|
||||
var carry [10]int64
|
||||
|
||||
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
|
||||
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
|
||||
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
|
||||
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
// |h0| <= 2^25
|
||||
// |h4| <= 2^25
|
||||
// |h1| <= 1.51*2^58
|
||||
// |h5| <= 1.51*2^58
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
// |h1| <= 2^24; from now on fits into int32
|
||||
// |h5| <= 2^24; from now on fits into int32
|
||||
// |h2| <= 1.21*2^59
|
||||
// |h6| <= 1.21*2^59
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
// |h2| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h6| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h3| <= 1.51*2^58
|
||||
// |h7| <= 1.51*2^58
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
// |h3| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h7| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h4| <= 1.52*2^33
|
||||
// |h8| <= 1.52*2^33
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
// |h4| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h8| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h5| <= 1.01*2^24
|
||||
// |h9| <= 1.51*2^58
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
// |h9| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h0| <= 1.8*2^37
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
// |h0| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h1| <= 1.01*2^24
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feSquare calculates h = f*f. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feSquare(h, f *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
f0_2 := 2 * f0
|
||||
f1_2 := 2 * f1
|
||||
f2_2 := 2 * f2
|
||||
f3_2 := 2 * f3
|
||||
f4_2 := 2 * f4
|
||||
f5_2 := 2 * f5
|
||||
f6_2 := 2 * f6
|
||||
f7_2 := 2 * f7
|
||||
f5_38 := 38 * f5 // 1.31*2^30
|
||||
f6_19 := 19 * f6 // 1.31*2^30
|
||||
f7_38 := 38 * f7 // 1.31*2^30
|
||||
f8_19 := 19 * f8 // 1.31*2^30
|
||||
f9_38 := 38 * f9 // 1.31*2^30
|
||||
f0f0 := int64(f0) * int64(f0)
|
||||
f0f1_2 := int64(f0_2) * int64(f1)
|
||||
f0f2_2 := int64(f0_2) * int64(f2)
|
||||
f0f3_2 := int64(f0_2) * int64(f3)
|
||||
f0f4_2 := int64(f0_2) * int64(f4)
|
||||
f0f5_2 := int64(f0_2) * int64(f5)
|
||||
f0f6_2 := int64(f0_2) * int64(f6)
|
||||
f0f7_2 := int64(f0_2) * int64(f7)
|
||||
f0f8_2 := int64(f0_2) * int64(f8)
|
||||
f0f9_2 := int64(f0_2) * int64(f9)
|
||||
f1f1_2 := int64(f1_2) * int64(f1)
|
||||
f1f2_2 := int64(f1_2) * int64(f2)
|
||||
f1f3_4 := int64(f1_2) * int64(f3_2)
|
||||
f1f4_2 := int64(f1_2) * int64(f4)
|
||||
f1f5_4 := int64(f1_2) * int64(f5_2)
|
||||
f1f6_2 := int64(f1_2) * int64(f6)
|
||||
f1f7_4 := int64(f1_2) * int64(f7_2)
|
||||
f1f8_2 := int64(f1_2) * int64(f8)
|
||||
f1f9_76 := int64(f1_2) * int64(f9_38)
|
||||
f2f2 := int64(f2) * int64(f2)
|
||||
f2f3_2 := int64(f2_2) * int64(f3)
|
||||
f2f4_2 := int64(f2_2) * int64(f4)
|
||||
f2f5_2 := int64(f2_2) * int64(f5)
|
||||
f2f6_2 := int64(f2_2) * int64(f6)
|
||||
f2f7_2 := int64(f2_2) * int64(f7)
|
||||
f2f8_38 := int64(f2_2) * int64(f8_19)
|
||||
f2f9_38 := int64(f2) * int64(f9_38)
|
||||
f3f3_2 := int64(f3_2) * int64(f3)
|
||||
f3f4_2 := int64(f3_2) * int64(f4)
|
||||
f3f5_4 := int64(f3_2) * int64(f5_2)
|
||||
f3f6_2 := int64(f3_2) * int64(f6)
|
||||
f3f7_76 := int64(f3_2) * int64(f7_38)
|
||||
f3f8_38 := int64(f3_2) * int64(f8_19)
|
||||
f3f9_76 := int64(f3_2) * int64(f9_38)
|
||||
f4f4 := int64(f4) * int64(f4)
|
||||
f4f5_2 := int64(f4_2) * int64(f5)
|
||||
f4f6_38 := int64(f4_2) * int64(f6_19)
|
||||
f4f7_38 := int64(f4) * int64(f7_38)
|
||||
f4f8_38 := int64(f4_2) * int64(f8_19)
|
||||
f4f9_38 := int64(f4) * int64(f9_38)
|
||||
f5f5_38 := int64(f5) * int64(f5_38)
|
||||
f5f6_38 := int64(f5_2) * int64(f6_19)
|
||||
f5f7_76 := int64(f5_2) * int64(f7_38)
|
||||
f5f8_38 := int64(f5_2) * int64(f8_19)
|
||||
f5f9_76 := int64(f5_2) * int64(f9_38)
|
||||
f6f6_19 := int64(f6) * int64(f6_19)
|
||||
f6f7_38 := int64(f6) * int64(f7_38)
|
||||
f6f8_38 := int64(f6_2) * int64(f8_19)
|
||||
f6f9_38 := int64(f6) * int64(f9_38)
|
||||
f7f7_38 := int64(f7) * int64(f7_38)
|
||||
f7f8_38 := int64(f7_2) * int64(f8_19)
|
||||
f7f9_76 := int64(f7_2) * int64(f9_38)
|
||||
f8f8_19 := int64(f8) * int64(f8_19)
|
||||
f8f9_38 := int64(f8) * int64(f9_38)
|
||||
f9f9_38 := int64(f9) * int64(f9_38)
|
||||
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
|
||||
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
|
||||
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
|
||||
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
|
||||
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
|
||||
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
|
||||
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
|
||||
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
|
||||
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
|
||||
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
|
||||
var carry [10]int64
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feMul121666 calculates h = f * 121666. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feMul121666(h, f *fieldElement) {
|
||||
h0 := int64(f[0]) * 121666
|
||||
h1 := int64(f[1]) * 121666
|
||||
h2 := int64(f[2]) * 121666
|
||||
h3 := int64(f[3]) * 121666
|
||||
h4 := int64(f[4]) * 121666
|
||||
h5 := int64(f[5]) * 121666
|
||||
h6 := int64(f[6]) * 121666
|
||||
h7 := int64(f[7]) * 121666
|
||||
h8 := int64(f[8]) * 121666
|
||||
h9 := int64(f[9]) * 121666
|
||||
var carry [10]int64
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feInvert sets out = z^-1.
|
||||
func feInvert(out, z *fieldElement) {
|
||||
var t0, t1, t2, t3 fieldElement
|
||||
var i int
|
||||
|
||||
feSquare(&t0, z)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t0, &t0)
|
||||
}
|
||||
feSquare(&t1, &t0)
|
||||
for i = 1; i < 2; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(&t1, z, &t1)
|
||||
feMul(&t0, &t0, &t1)
|
||||
feSquare(&t2, &t0)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t1, &t2)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 20; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 100; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t1, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(out, &t1, &t0)
|
||||
}
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
|
||||
copy(e[:], in[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
|
||||
feFromBytes(&x1, base)
|
||||
feOne(&x2)
|
||||
feCopy(&x3, &x1)
|
||||
feOne(&z3)
|
||||
|
||||
swap := int32(0)
|
||||
for pos := 254; pos >= 0; pos-- {
|
||||
b := e[pos/8] >> uint(pos&7)
|
||||
b &= 1
|
||||
swap ^= int32(b)
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
swap = int32(b)
|
||||
|
||||
feSub(&tmp0, &x3, &z3)
|
||||
feSub(&tmp1, &x2, &z2)
|
||||
feAdd(&x2, &x2, &z2)
|
||||
feAdd(&z2, &x3, &z3)
|
||||
feMul(&z3, &tmp0, &x2)
|
||||
feMul(&z2, &z2, &tmp1)
|
||||
feSquare(&tmp0, &tmp1)
|
||||
feSquare(&tmp1, &x2)
|
||||
feAdd(&x3, &z3, &z2)
|
||||
feSub(&z2, &z3, &z2)
|
||||
feMul(&x2, &tmp1, &tmp0)
|
||||
feSub(&tmp1, &tmp1, &tmp0)
|
||||
feSquare(&z2, &z2)
|
||||
feMul121666(&z3, &tmp1)
|
||||
feSquare(&x3, &x3)
|
||||
feAdd(&tmp0, &tmp0, &z3)
|
||||
feMul(&z3, &x1, &z2)
|
||||
feMul(&z2, &tmp1, &tmp0)
|
||||
}
|
||||
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
|
||||
feInvert(&z2, &z2)
|
||||
feMul(&x2, &x2, &z2)
|
||||
feToBytes(out, &x2)
|
||||
if l := len(point); l != 32 {
|
||||
return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32)
|
||||
}
|
||||
copy(in[:], scalar)
|
||||
if &point[0] == &Basepoint[0] {
|
||||
checkBasepoint()
|
||||
ScalarBaseMult(dst, &in)
|
||||
} else {
|
||||
var base, zero [32]byte
|
||||
copy(base[:], point)
|
||||
ScalarMult(dst, &in, &base)
|
||||
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
|
||||
return nil, fmt.Errorf("bad input point: low order point")
|
||||
}
|
||||
}
|
||||
return dst[:], nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
// +build amd64,!gccgo,!appengine,!purego
|
||||
|
||||
package curve25519
|
||||
|
||||
@@ -5,9 +5,84 @@
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
// +build amd64,!gccgo,!appengine,!purego
|
||||
|
||||
#include "const_amd64.h"
|
||||
#define REDMASK51 0x0007FFFFFFFFFFFF
|
||||
|
||||
// These constants cannot be encoded in non-MOVQ immediates.
|
||||
// We access them directly from memory instead.
|
||||
|
||||
DATA ·_121666_213(SB)/8, $996687872
|
||||
GLOBL ·_121666_213(SB), 8, $8
|
||||
|
||||
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
|
||||
GLOBL ·_2P0(SB), 8, $8
|
||||
|
||||
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
|
||||
GLOBL ·_2P1234(SB), 8, $8
|
||||
|
||||
// func freeze(inout *[5]uint64)
|
||||
TEXT ·freeze(SB),7,$0-8
|
||||
MOVQ inout+0(FP), DI
|
||||
|
||||
MOVQ 0(DI),SI
|
||||
MOVQ 8(DI),DX
|
||||
MOVQ 16(DI),CX
|
||||
MOVQ 24(DI),R8
|
||||
MOVQ 32(DI),R9
|
||||
MOVQ $REDMASK51,AX
|
||||
MOVQ AX,R10
|
||||
SUBQ $18,R10
|
||||
MOVQ $3,R11
|
||||
REDUCELOOP:
|
||||
MOVQ SI,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,SI
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,DX
|
||||
ADDQ R12,CX
|
||||
MOVQ CX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,CX
|
||||
ADDQ R12,R8
|
||||
MOVQ R8,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R8
|
||||
ADDQ R12,R9
|
||||
MOVQ R9,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R9
|
||||
IMUL3Q $19,R12,R12
|
||||
ADDQ R12,SI
|
||||
SUBQ $1,R11
|
||||
JA REDUCELOOP
|
||||
MOVQ $1,R12
|
||||
CMPQ R10,SI
|
||||
CMOVQLT R11,R12
|
||||
CMPQ AX,DX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,CX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R8
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R9
|
||||
CMOVQNE R11,R12
|
||||
NEGQ R12
|
||||
ANDQ R12,AX
|
||||
ANDQ R12,R10
|
||||
SUBQ R10,SI
|
||||
SUBQ AX,DX
|
||||
SUBQ AX,CX
|
||||
SUBQ AX,R8
|
||||
SUBQ AX,R9
|
||||
MOVQ SI,0(DI)
|
||||
MOVQ DX,8(DI)
|
||||
MOVQ CX,16(DI)
|
||||
MOVQ R8,24(DI)
|
||||
MOVQ R9,32(DI)
|
||||
RET
|
||||
|
||||
// func ladderstep(inout *[5][5]uint64)
|
||||
TEXT ·ladderstep(SB),0,$296-8
|
||||
@@ -121,18 +196,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -236,18 +311,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -441,18 +516,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -591,18 +666,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -731,18 +806,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -846,18 +921,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -996,18 +1071,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -1146,18 +1221,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -1332,18 +1407,18 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ $REDMASK51,DX
|
||||
SHLQ $13,CX:SI
|
||||
SHLQ $13,SI,CX
|
||||
ANDQ DX,SI
|
||||
SHLQ $13,R9:R8
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ DX,R8
|
||||
ADDQ CX,R8
|
||||
SHLQ $13,R11:R10
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ DX,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ DX,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ DX,R14
|
||||
ADDQ R13,R14
|
||||
IMUL3Q $19,R15,CX
|
||||
@@ -1375,3 +1450,344 @@ TEXT ·ladderstep(SB),0,$296-8
|
||||
MOVQ AX,104(DI)
|
||||
MOVQ R10,112(DI)
|
||||
RET
|
||||
|
||||
// func cswap(inout *[4][5]uint64, v uint64)
|
||||
TEXT ·cswap(SB),7,$0
|
||||
MOVQ inout+0(FP),DI
|
||||
MOVQ v+8(FP),SI
|
||||
|
||||
SUBQ $1, SI
|
||||
NOTQ SI
|
||||
MOVQ SI, X15
|
||||
PSHUFD $0x44, X15, X15
|
||||
|
||||
MOVOU 0(DI), X0
|
||||
MOVOU 16(DI), X2
|
||||
MOVOU 32(DI), X4
|
||||
MOVOU 48(DI), X6
|
||||
MOVOU 64(DI), X8
|
||||
MOVOU 80(DI), X1
|
||||
MOVOU 96(DI), X3
|
||||
MOVOU 112(DI), X5
|
||||
MOVOU 128(DI), X7
|
||||
MOVOU 144(DI), X9
|
||||
|
||||
MOVO X1, X10
|
||||
MOVO X3, X11
|
||||
MOVO X5, X12
|
||||
MOVO X7, X13
|
||||
MOVO X9, X14
|
||||
|
||||
PXOR X0, X10
|
||||
PXOR X2, X11
|
||||
PXOR X4, X12
|
||||
PXOR X6, X13
|
||||
PXOR X8, X14
|
||||
PAND X15, X10
|
||||
PAND X15, X11
|
||||
PAND X15, X12
|
||||
PAND X15, X13
|
||||
PAND X15, X14
|
||||
PXOR X10, X0
|
||||
PXOR X10, X1
|
||||
PXOR X11, X2
|
||||
PXOR X11, X3
|
||||
PXOR X12, X4
|
||||
PXOR X12, X5
|
||||
PXOR X13, X6
|
||||
PXOR X13, X7
|
||||
PXOR X14, X8
|
||||
PXOR X14, X9
|
||||
|
||||
MOVOU X0, 0(DI)
|
||||
MOVOU X2, 16(DI)
|
||||
MOVOU X4, 32(DI)
|
||||
MOVOU X6, 48(DI)
|
||||
MOVOU X8, 64(DI)
|
||||
MOVOU X1, 80(DI)
|
||||
MOVOU X3, 96(DI)
|
||||
MOVOU X5, 112(DI)
|
||||
MOVOU X7, 128(DI)
|
||||
MOVOU X9, 144(DI)
|
||||
RET
|
||||
|
||||
// func mul(dest, a, b *[5]uint64)
|
||||
TEXT ·mul(SB),0,$16-24
|
||||
MOVQ dest+0(FP), DI
|
||||
MOVQ a+8(FP), SI
|
||||
MOVQ b+16(FP), DX
|
||||
|
||||
MOVQ DX,CX
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,0(SP)
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R8
|
||||
MOVQ DX,R9
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,8(SP)
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 8(CX)
|
||||
MOVQ AX,R10
|
||||
MOVQ DX,R11
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R12
|
||||
MOVQ DX,R13
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 24(CX)
|
||||
MOVQ AX,R14
|
||||
MOVQ DX,R15
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 32(CX)
|
||||
MOVQ AX,BX
|
||||
MOVQ DX,BP
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 32(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,R8,R9
|
||||
ANDQ SI,R8
|
||||
SHLQ $13,R10,R11
|
||||
ANDQ SI,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R12,R13
|
||||
ANDQ SI,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R14,R15
|
||||
ANDQ SI,R14
|
||||
ADDQ R13,R14
|
||||
SHLQ $13,BX,BP
|
||||
ANDQ SI,BX
|
||||
ADDQ R15,BX
|
||||
IMUL3Q $19,BP,DX
|
||||
ADDQ DX,R8
|
||||
MOVQ R8,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R10,DX
|
||||
MOVQ DX,CX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R8
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,CX
|
||||
ADDQ R14,DX
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R9
|
||||
ADDQ BX,DX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,AX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,R8
|
||||
ANDQ SI,R10
|
||||
MOVQ R8,0(DI)
|
||||
MOVQ CX,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
||||
|
||||
// func square(out, in *[5]uint64)
|
||||
TEXT ·square(SB),7,$0-16
|
||||
MOVQ out+0(FP), DI
|
||||
MOVQ in+8(FP), SI
|
||||
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(SI)
|
||||
MOVQ AX,CX
|
||||
MOVQ DX,R8
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 8(SI)
|
||||
MOVQ AX,R9
|
||||
MOVQ DX,R10
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
MOVQ AX,R11
|
||||
MOVQ DX,R12
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
MOVQ AX,R13
|
||||
MOVQ DX,R14
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 32(SI)
|
||||
MOVQ AX,R15
|
||||
MOVQ DX,BX
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,CX,R8
|
||||
ANDQ SI,CX
|
||||
SHLQ $13,R9,R10
|
||||
ANDQ SI,R9
|
||||
ADDQ R8,R9
|
||||
SHLQ $13,R11,R12
|
||||
ANDQ SI,R11
|
||||
ADDQ R10,R11
|
||||
SHLQ $13,R13,R14
|
||||
ANDQ SI,R13
|
||||
ADDQ R12,R13
|
||||
SHLQ $13,R15,BX
|
||||
ANDQ SI,R15
|
||||
ADDQ R14,R15
|
||||
IMUL3Q $19,BX,DX
|
||||
ADDQ DX,CX
|
||||
MOVQ CX,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R9,DX
|
||||
ANDQ SI,CX
|
||||
MOVQ DX,R8
|
||||
SHRQ $51,DX
|
||||
ADDQ R11,DX
|
||||
ANDQ SI,R8
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ADDQ R13,DX
|
||||
ANDQ SI,R9
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ADDQ R15,DX
|
||||
ANDQ SI,AX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,CX
|
||||
ANDQ SI,R10
|
||||
MOVQ CX,0(DI)
|
||||
MOVQ R8,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
||||
828
vendor/golang.org/x/crypto/curve25519/curve25519_generic.go
generated
vendored
Normal file
828
vendor/golang.org/x/crypto/curve25519/curve25519_generic.go
generated
vendored
Normal file
@@ -0,0 +1,828 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package curve25519
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// This code is a port of the public domain, "ref10" implementation of
|
||||
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
|
||||
|
||||
// fieldElement represents an element of the field GF(2^255 - 19). An element
|
||||
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
|
||||
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
|
||||
// context.
|
||||
type fieldElement [10]int32
|
||||
|
||||
func feZero(fe *fieldElement) {
|
||||
for i := range fe {
|
||||
fe[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func feOne(fe *fieldElement) {
|
||||
feZero(fe)
|
||||
fe[0] = 1
|
||||
}
|
||||
|
||||
func feAdd(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] + b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feSub(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] - b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feCopy(dst, src *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = src[i]
|
||||
}
|
||||
}
|
||||
|
||||
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
|
||||
//
|
||||
// Preconditions: b in {0,1}.
|
||||
func feCSwap(f, g *fieldElement, b int32) {
|
||||
b = -b
|
||||
for i := range f {
|
||||
t := b & (f[i] ^ g[i])
|
||||
f[i] ^= t
|
||||
g[i] ^= t
|
||||
}
|
||||
}
|
||||
|
||||
// load3 reads a 24-bit, little-endian value from in.
|
||||
func load3(in []byte) int64 {
|
||||
var r int64
|
||||
r = int64(in[0])
|
||||
r |= int64(in[1]) << 8
|
||||
r |= int64(in[2]) << 16
|
||||
return r
|
||||
}
|
||||
|
||||
// load4 reads a 32-bit, little-endian value from in.
|
||||
func load4(in []byte) int64 {
|
||||
return int64(binary.LittleEndian.Uint32(in))
|
||||
}
|
||||
|
||||
func feFromBytes(dst *fieldElement, src *[32]byte) {
|
||||
h0 := load4(src[:])
|
||||
h1 := load3(src[4:]) << 6
|
||||
h2 := load3(src[7:]) << 5
|
||||
h3 := load3(src[10:]) << 3
|
||||
h4 := load3(src[13:]) << 2
|
||||
h5 := load4(src[16:])
|
||||
h6 := load3(src[20:]) << 7
|
||||
h7 := load3(src[23:]) << 5
|
||||
h8 := load3(src[26:]) << 4
|
||||
h9 := (load3(src[29:]) & 0x7fffff) << 2
|
||||
|
||||
var carry [10]int64
|
||||
carry[9] = (h9 + 1<<24) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + 1<<24) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + 1<<24) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + 1<<24) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + 1<<24) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + 1<<25) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + 1<<25) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + 1<<25) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + 1<<25) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + 1<<25) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
dst[0] = int32(h0)
|
||||
dst[1] = int32(h1)
|
||||
dst[2] = int32(h2)
|
||||
dst[3] = int32(h3)
|
||||
dst[4] = int32(h4)
|
||||
dst[5] = int32(h5)
|
||||
dst[6] = int32(h6)
|
||||
dst[7] = int32(h7)
|
||||
dst[8] = int32(h8)
|
||||
dst[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feToBytes marshals h to s.
|
||||
// Preconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Write p=2^255-19; q=floor(h/p).
|
||||
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
|
||||
//
|
||||
// Proof:
|
||||
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
|
||||
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
|
||||
//
|
||||
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
|
||||
// Then 0<y<1.
|
||||
//
|
||||
// Write r=h-pq.
|
||||
// Have 0<=r<=p-1=2^255-20.
|
||||
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
|
||||
//
|
||||
// Write x=r+19(2^-255)r+y.
|
||||
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
|
||||
//
|
||||
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
|
||||
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
|
||||
func feToBytes(s *[32]byte, h *fieldElement) {
|
||||
var carry [10]int32
|
||||
|
||||
q := (19*h[9] + (1 << 24)) >> 25
|
||||
q = (h[0] + q) >> 26
|
||||
q = (h[1] + q) >> 25
|
||||
q = (h[2] + q) >> 26
|
||||
q = (h[3] + q) >> 25
|
||||
q = (h[4] + q) >> 26
|
||||
q = (h[5] + q) >> 25
|
||||
q = (h[6] + q) >> 26
|
||||
q = (h[7] + q) >> 25
|
||||
q = (h[8] + q) >> 26
|
||||
q = (h[9] + q) >> 25
|
||||
|
||||
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
|
||||
h[0] += 19 * q
|
||||
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
|
||||
|
||||
carry[0] = h[0] >> 26
|
||||
h[1] += carry[0]
|
||||
h[0] -= carry[0] << 26
|
||||
carry[1] = h[1] >> 25
|
||||
h[2] += carry[1]
|
||||
h[1] -= carry[1] << 25
|
||||
carry[2] = h[2] >> 26
|
||||
h[3] += carry[2]
|
||||
h[2] -= carry[2] << 26
|
||||
carry[3] = h[3] >> 25
|
||||
h[4] += carry[3]
|
||||
h[3] -= carry[3] << 25
|
||||
carry[4] = h[4] >> 26
|
||||
h[5] += carry[4]
|
||||
h[4] -= carry[4] << 26
|
||||
carry[5] = h[5] >> 25
|
||||
h[6] += carry[5]
|
||||
h[5] -= carry[5] << 25
|
||||
carry[6] = h[6] >> 26
|
||||
h[7] += carry[6]
|
||||
h[6] -= carry[6] << 26
|
||||
carry[7] = h[7] >> 25
|
||||
h[8] += carry[7]
|
||||
h[7] -= carry[7] << 25
|
||||
carry[8] = h[8] >> 26
|
||||
h[9] += carry[8]
|
||||
h[8] -= carry[8] << 26
|
||||
carry[9] = h[9] >> 25
|
||||
h[9] -= carry[9] << 25
|
||||
// h10 = carry9
|
||||
|
||||
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
|
||||
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
|
||||
// evidently 2^255 h10-2^255 q = 0.
|
||||
// Goal: Output h[0]+...+2^230 h[9].
|
||||
|
||||
s[0] = byte(h[0] >> 0)
|
||||
s[1] = byte(h[0] >> 8)
|
||||
s[2] = byte(h[0] >> 16)
|
||||
s[3] = byte((h[0] >> 24) | (h[1] << 2))
|
||||
s[4] = byte(h[1] >> 6)
|
||||
s[5] = byte(h[1] >> 14)
|
||||
s[6] = byte((h[1] >> 22) | (h[2] << 3))
|
||||
s[7] = byte(h[2] >> 5)
|
||||
s[8] = byte(h[2] >> 13)
|
||||
s[9] = byte((h[2] >> 21) | (h[3] << 5))
|
||||
s[10] = byte(h[3] >> 3)
|
||||
s[11] = byte(h[3] >> 11)
|
||||
s[12] = byte((h[3] >> 19) | (h[4] << 6))
|
||||
s[13] = byte(h[4] >> 2)
|
||||
s[14] = byte(h[4] >> 10)
|
||||
s[15] = byte(h[4] >> 18)
|
||||
s[16] = byte(h[5] >> 0)
|
||||
s[17] = byte(h[5] >> 8)
|
||||
s[18] = byte(h[5] >> 16)
|
||||
s[19] = byte((h[5] >> 24) | (h[6] << 1))
|
||||
s[20] = byte(h[6] >> 7)
|
||||
s[21] = byte(h[6] >> 15)
|
||||
s[22] = byte((h[6] >> 23) | (h[7] << 3))
|
||||
s[23] = byte(h[7] >> 5)
|
||||
s[24] = byte(h[7] >> 13)
|
||||
s[25] = byte((h[7] >> 21) | (h[8] << 4))
|
||||
s[26] = byte(h[8] >> 4)
|
||||
s[27] = byte(h[8] >> 12)
|
||||
s[28] = byte((h[8] >> 20) | (h[9] << 6))
|
||||
s[29] = byte(h[9] >> 2)
|
||||
s[30] = byte(h[9] >> 10)
|
||||
s[31] = byte(h[9] >> 18)
|
||||
}
|
||||
|
||||
// feMul calculates h = f * g
|
||||
// Can overlap h with f or g.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Notes on implementation strategy:
|
||||
//
|
||||
// Using schoolbook multiplication.
|
||||
// Karatsuba would save a little in some cost models.
|
||||
//
|
||||
// Most multiplications by 2 and 19 are 32-bit precomputations;
|
||||
// cheaper than 64-bit postcomputations.
|
||||
//
|
||||
// There is one remaining multiplication by 19 in the carry chain;
|
||||
// one *19 precomputation can be merged into this,
|
||||
// but the resulting data flow is considerably less clean.
|
||||
//
|
||||
// There are 12 carries below.
|
||||
// 10 of them are 2-way parallelizable and vectorizable.
|
||||
// Can get away with 11 carries, but then data flow is much deeper.
|
||||
//
|
||||
// With tighter constraints on inputs can squeeze carries into int32.
|
||||
func feMul(h, f, g *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
g0 := g[0]
|
||||
g1 := g[1]
|
||||
g2 := g[2]
|
||||
g3 := g[3]
|
||||
g4 := g[4]
|
||||
g5 := g[5]
|
||||
g6 := g[6]
|
||||
g7 := g[7]
|
||||
g8 := g[8]
|
||||
g9 := g[9]
|
||||
g1_19 := 19 * g1 // 1.4*2^29
|
||||
g2_19 := 19 * g2 // 1.4*2^30; still ok
|
||||
g3_19 := 19 * g3
|
||||
g4_19 := 19 * g4
|
||||
g5_19 := 19 * g5
|
||||
g6_19 := 19 * g6
|
||||
g7_19 := 19 * g7
|
||||
g8_19 := 19 * g8
|
||||
g9_19 := 19 * g9
|
||||
f1_2 := 2 * f1
|
||||
f3_2 := 2 * f3
|
||||
f5_2 := 2 * f5
|
||||
f7_2 := 2 * f7
|
||||
f9_2 := 2 * f9
|
||||
f0g0 := int64(f0) * int64(g0)
|
||||
f0g1 := int64(f0) * int64(g1)
|
||||
f0g2 := int64(f0) * int64(g2)
|
||||
f0g3 := int64(f0) * int64(g3)
|
||||
f0g4 := int64(f0) * int64(g4)
|
||||
f0g5 := int64(f0) * int64(g5)
|
||||
f0g6 := int64(f0) * int64(g6)
|
||||
f0g7 := int64(f0) * int64(g7)
|
||||
f0g8 := int64(f0) * int64(g8)
|
||||
f0g9 := int64(f0) * int64(g9)
|
||||
f1g0 := int64(f1) * int64(g0)
|
||||
f1g1_2 := int64(f1_2) * int64(g1)
|
||||
f1g2 := int64(f1) * int64(g2)
|
||||
f1g3_2 := int64(f1_2) * int64(g3)
|
||||
f1g4 := int64(f1) * int64(g4)
|
||||
f1g5_2 := int64(f1_2) * int64(g5)
|
||||
f1g6 := int64(f1) * int64(g6)
|
||||
f1g7_2 := int64(f1_2) * int64(g7)
|
||||
f1g8 := int64(f1) * int64(g8)
|
||||
f1g9_38 := int64(f1_2) * int64(g9_19)
|
||||
f2g0 := int64(f2) * int64(g0)
|
||||
f2g1 := int64(f2) * int64(g1)
|
||||
f2g2 := int64(f2) * int64(g2)
|
||||
f2g3 := int64(f2) * int64(g3)
|
||||
f2g4 := int64(f2) * int64(g4)
|
||||
f2g5 := int64(f2) * int64(g5)
|
||||
f2g6 := int64(f2) * int64(g6)
|
||||
f2g7 := int64(f2) * int64(g7)
|
||||
f2g8_19 := int64(f2) * int64(g8_19)
|
||||
f2g9_19 := int64(f2) * int64(g9_19)
|
||||
f3g0 := int64(f3) * int64(g0)
|
||||
f3g1_2 := int64(f3_2) * int64(g1)
|
||||
f3g2 := int64(f3) * int64(g2)
|
||||
f3g3_2 := int64(f3_2) * int64(g3)
|
||||
f3g4 := int64(f3) * int64(g4)
|
||||
f3g5_2 := int64(f3_2) * int64(g5)
|
||||
f3g6 := int64(f3) * int64(g6)
|
||||
f3g7_38 := int64(f3_2) * int64(g7_19)
|
||||
f3g8_19 := int64(f3) * int64(g8_19)
|
||||
f3g9_38 := int64(f3_2) * int64(g9_19)
|
||||
f4g0 := int64(f4) * int64(g0)
|
||||
f4g1 := int64(f4) * int64(g1)
|
||||
f4g2 := int64(f4) * int64(g2)
|
||||
f4g3 := int64(f4) * int64(g3)
|
||||
f4g4 := int64(f4) * int64(g4)
|
||||
f4g5 := int64(f4) * int64(g5)
|
||||
f4g6_19 := int64(f4) * int64(g6_19)
|
||||
f4g7_19 := int64(f4) * int64(g7_19)
|
||||
f4g8_19 := int64(f4) * int64(g8_19)
|
||||
f4g9_19 := int64(f4) * int64(g9_19)
|
||||
f5g0 := int64(f5) * int64(g0)
|
||||
f5g1_2 := int64(f5_2) * int64(g1)
|
||||
f5g2 := int64(f5) * int64(g2)
|
||||
f5g3_2 := int64(f5_2) * int64(g3)
|
||||
f5g4 := int64(f5) * int64(g4)
|
||||
f5g5_38 := int64(f5_2) * int64(g5_19)
|
||||
f5g6_19 := int64(f5) * int64(g6_19)
|
||||
f5g7_38 := int64(f5_2) * int64(g7_19)
|
||||
f5g8_19 := int64(f5) * int64(g8_19)
|
||||
f5g9_38 := int64(f5_2) * int64(g9_19)
|
||||
f6g0 := int64(f6) * int64(g0)
|
||||
f6g1 := int64(f6) * int64(g1)
|
||||
f6g2 := int64(f6) * int64(g2)
|
||||
f6g3 := int64(f6) * int64(g3)
|
||||
f6g4_19 := int64(f6) * int64(g4_19)
|
||||
f6g5_19 := int64(f6) * int64(g5_19)
|
||||
f6g6_19 := int64(f6) * int64(g6_19)
|
||||
f6g7_19 := int64(f6) * int64(g7_19)
|
||||
f6g8_19 := int64(f6) * int64(g8_19)
|
||||
f6g9_19 := int64(f6) * int64(g9_19)
|
||||
f7g0 := int64(f7) * int64(g0)
|
||||
f7g1_2 := int64(f7_2) * int64(g1)
|
||||
f7g2 := int64(f7) * int64(g2)
|
||||
f7g3_38 := int64(f7_2) * int64(g3_19)
|
||||
f7g4_19 := int64(f7) * int64(g4_19)
|
||||
f7g5_38 := int64(f7_2) * int64(g5_19)
|
||||
f7g6_19 := int64(f7) * int64(g6_19)
|
||||
f7g7_38 := int64(f7_2) * int64(g7_19)
|
||||
f7g8_19 := int64(f7) * int64(g8_19)
|
||||
f7g9_38 := int64(f7_2) * int64(g9_19)
|
||||
f8g0 := int64(f8) * int64(g0)
|
||||
f8g1 := int64(f8) * int64(g1)
|
||||
f8g2_19 := int64(f8) * int64(g2_19)
|
||||
f8g3_19 := int64(f8) * int64(g3_19)
|
||||
f8g4_19 := int64(f8) * int64(g4_19)
|
||||
f8g5_19 := int64(f8) * int64(g5_19)
|
||||
f8g6_19 := int64(f8) * int64(g6_19)
|
||||
f8g7_19 := int64(f8) * int64(g7_19)
|
||||
f8g8_19 := int64(f8) * int64(g8_19)
|
||||
f8g9_19 := int64(f8) * int64(g9_19)
|
||||
f9g0 := int64(f9) * int64(g0)
|
||||
f9g1_38 := int64(f9_2) * int64(g1_19)
|
||||
f9g2_19 := int64(f9) * int64(g2_19)
|
||||
f9g3_38 := int64(f9_2) * int64(g3_19)
|
||||
f9g4_19 := int64(f9) * int64(g4_19)
|
||||
f9g5_38 := int64(f9_2) * int64(g5_19)
|
||||
f9g6_19 := int64(f9) * int64(g6_19)
|
||||
f9g7_38 := int64(f9_2) * int64(g7_19)
|
||||
f9g8_19 := int64(f9) * int64(g8_19)
|
||||
f9g9_38 := int64(f9_2) * int64(g9_19)
|
||||
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
|
||||
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
|
||||
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
|
||||
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
|
||||
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
|
||||
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
|
||||
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
|
||||
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
|
||||
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
|
||||
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
|
||||
var carry [10]int64
|
||||
|
||||
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
|
||||
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
|
||||
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
|
||||
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
// |h0| <= 2^25
|
||||
// |h4| <= 2^25
|
||||
// |h1| <= 1.51*2^58
|
||||
// |h5| <= 1.51*2^58
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
// |h1| <= 2^24; from now on fits into int32
|
||||
// |h5| <= 2^24; from now on fits into int32
|
||||
// |h2| <= 1.21*2^59
|
||||
// |h6| <= 1.21*2^59
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
// |h2| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h6| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h3| <= 1.51*2^58
|
||||
// |h7| <= 1.51*2^58
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
// |h3| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h7| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h4| <= 1.52*2^33
|
||||
// |h8| <= 1.52*2^33
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
// |h4| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h8| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h5| <= 1.01*2^24
|
||||
// |h9| <= 1.51*2^58
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
// |h9| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h0| <= 1.8*2^37
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
// |h0| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h1| <= 1.01*2^24
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feSquare calculates h = f*f. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feSquare(h, f *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
f0_2 := 2 * f0
|
||||
f1_2 := 2 * f1
|
||||
f2_2 := 2 * f2
|
||||
f3_2 := 2 * f3
|
||||
f4_2 := 2 * f4
|
||||
f5_2 := 2 * f5
|
||||
f6_2 := 2 * f6
|
||||
f7_2 := 2 * f7
|
||||
f5_38 := 38 * f5 // 1.31*2^30
|
||||
f6_19 := 19 * f6 // 1.31*2^30
|
||||
f7_38 := 38 * f7 // 1.31*2^30
|
||||
f8_19 := 19 * f8 // 1.31*2^30
|
||||
f9_38 := 38 * f9 // 1.31*2^30
|
||||
f0f0 := int64(f0) * int64(f0)
|
||||
f0f1_2 := int64(f0_2) * int64(f1)
|
||||
f0f2_2 := int64(f0_2) * int64(f2)
|
||||
f0f3_2 := int64(f0_2) * int64(f3)
|
||||
f0f4_2 := int64(f0_2) * int64(f4)
|
||||
f0f5_2 := int64(f0_2) * int64(f5)
|
||||
f0f6_2 := int64(f0_2) * int64(f6)
|
||||
f0f7_2 := int64(f0_2) * int64(f7)
|
||||
f0f8_2 := int64(f0_2) * int64(f8)
|
||||
f0f9_2 := int64(f0_2) * int64(f9)
|
||||
f1f1_2 := int64(f1_2) * int64(f1)
|
||||
f1f2_2 := int64(f1_2) * int64(f2)
|
||||
f1f3_4 := int64(f1_2) * int64(f3_2)
|
||||
f1f4_2 := int64(f1_2) * int64(f4)
|
||||
f1f5_4 := int64(f1_2) * int64(f5_2)
|
||||
f1f6_2 := int64(f1_2) * int64(f6)
|
||||
f1f7_4 := int64(f1_2) * int64(f7_2)
|
||||
f1f8_2 := int64(f1_2) * int64(f8)
|
||||
f1f9_76 := int64(f1_2) * int64(f9_38)
|
||||
f2f2 := int64(f2) * int64(f2)
|
||||
f2f3_2 := int64(f2_2) * int64(f3)
|
||||
f2f4_2 := int64(f2_2) * int64(f4)
|
||||
f2f5_2 := int64(f2_2) * int64(f5)
|
||||
f2f6_2 := int64(f2_2) * int64(f6)
|
||||
f2f7_2 := int64(f2_2) * int64(f7)
|
||||
f2f8_38 := int64(f2_2) * int64(f8_19)
|
||||
f2f9_38 := int64(f2) * int64(f9_38)
|
||||
f3f3_2 := int64(f3_2) * int64(f3)
|
||||
f3f4_2 := int64(f3_2) * int64(f4)
|
||||
f3f5_4 := int64(f3_2) * int64(f5_2)
|
||||
f3f6_2 := int64(f3_2) * int64(f6)
|
||||
f3f7_76 := int64(f3_2) * int64(f7_38)
|
||||
f3f8_38 := int64(f3_2) * int64(f8_19)
|
||||
f3f9_76 := int64(f3_2) * int64(f9_38)
|
||||
f4f4 := int64(f4) * int64(f4)
|
||||
f4f5_2 := int64(f4_2) * int64(f5)
|
||||
f4f6_38 := int64(f4_2) * int64(f6_19)
|
||||
f4f7_38 := int64(f4) * int64(f7_38)
|
||||
f4f8_38 := int64(f4_2) * int64(f8_19)
|
||||
f4f9_38 := int64(f4) * int64(f9_38)
|
||||
f5f5_38 := int64(f5) * int64(f5_38)
|
||||
f5f6_38 := int64(f5_2) * int64(f6_19)
|
||||
f5f7_76 := int64(f5_2) * int64(f7_38)
|
||||
f5f8_38 := int64(f5_2) * int64(f8_19)
|
||||
f5f9_76 := int64(f5_2) * int64(f9_38)
|
||||
f6f6_19 := int64(f6) * int64(f6_19)
|
||||
f6f7_38 := int64(f6) * int64(f7_38)
|
||||
f6f8_38 := int64(f6_2) * int64(f8_19)
|
||||
f6f9_38 := int64(f6) * int64(f9_38)
|
||||
f7f7_38 := int64(f7) * int64(f7_38)
|
||||
f7f8_38 := int64(f7_2) * int64(f8_19)
|
||||
f7f9_76 := int64(f7_2) * int64(f9_38)
|
||||
f8f8_19 := int64(f8) * int64(f8_19)
|
||||
f8f9_38 := int64(f8) * int64(f9_38)
|
||||
f9f9_38 := int64(f9) * int64(f9_38)
|
||||
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
|
||||
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
|
||||
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
|
||||
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
|
||||
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
|
||||
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
|
||||
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
|
||||
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
|
||||
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
|
||||
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
|
||||
var carry [10]int64
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feMul121666 calculates h = f * 121666. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feMul121666(h, f *fieldElement) {
|
||||
h0 := int64(f[0]) * 121666
|
||||
h1 := int64(f[1]) * 121666
|
||||
h2 := int64(f[2]) * 121666
|
||||
h3 := int64(f[3]) * 121666
|
||||
h4 := int64(f[4]) * 121666
|
||||
h5 := int64(f[5]) * 121666
|
||||
h6 := int64(f[6]) * 121666
|
||||
h7 := int64(f[7]) * 121666
|
||||
h8 := int64(f[8]) * 121666
|
||||
h9 := int64(f[9]) * 121666
|
||||
var carry [10]int64
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feInvert sets out = z^-1.
|
||||
func feInvert(out, z *fieldElement) {
|
||||
var t0, t1, t2, t3 fieldElement
|
||||
var i int
|
||||
|
||||
feSquare(&t0, z)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t0, &t0)
|
||||
}
|
||||
feSquare(&t1, &t0)
|
||||
for i = 1; i < 2; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(&t1, z, &t1)
|
||||
feMul(&t0, &t0, &t1)
|
||||
feSquare(&t2, &t0)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t1, &t2)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 20; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 100; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t1, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(out, &t1, &t0)
|
||||
}
|
||||
|
||||
func scalarMultGeneric(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
|
||||
copy(e[:], in[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
|
||||
feFromBytes(&x1, base)
|
||||
feOne(&x2)
|
||||
feCopy(&x3, &x1)
|
||||
feOne(&z3)
|
||||
|
||||
swap := int32(0)
|
||||
for pos := 254; pos >= 0; pos-- {
|
||||
b := e[pos/8] >> uint(pos&7)
|
||||
b &= 1
|
||||
swap ^= int32(b)
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
swap = int32(b)
|
||||
|
||||
feSub(&tmp0, &x3, &z3)
|
||||
feSub(&tmp1, &x2, &z2)
|
||||
feAdd(&x2, &x2, &z2)
|
||||
feAdd(&z2, &x3, &z3)
|
||||
feMul(&z3, &tmp0, &x2)
|
||||
feMul(&z2, &z2, &tmp1)
|
||||
feSquare(&tmp0, &tmp1)
|
||||
feSquare(&tmp1, &x2)
|
||||
feAdd(&x3, &z3, &z2)
|
||||
feSub(&z2, &z3, &z2)
|
||||
feMul(&x2, &tmp1, &tmp0)
|
||||
feSub(&tmp1, &tmp1, &tmp0)
|
||||
feSquare(&z2, &z2)
|
||||
feMul121666(&z3, &tmp1)
|
||||
feSquare(&x3, &x3)
|
||||
feAdd(&tmp0, &tmp0, &z3)
|
||||
feMul(&z3, &x1, &z2)
|
||||
feMul(&z2, &tmp1, &tmp0)
|
||||
}
|
||||
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
|
||||
feInvert(&z2, &z2)
|
||||
feMul(&x2, &x2, &z2)
|
||||
feToBytes(out, &x2)
|
||||
}
|
||||
11
vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go
generated
vendored
Normal file
11
vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 gccgo appengine purego
|
||||
|
||||
package curve25519
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
scalarMultGeneric(out, in, base)
|
||||
}
|
||||
23
vendor/golang.org/x/crypto/curve25519/doc.go
generated
vendored
23
vendor/golang.org/x/crypto/curve25519/doc.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package curve25519 provides an implementation of scalar multiplication on
|
||||
// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
|
||||
package curve25519 // import "golang.org/x/crypto/curve25519"
|
||||
|
||||
// basePoint is the x coordinate of the generator of the curve.
|
||||
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
// ScalarMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points and all values are in little-endian form.
|
||||
func ScalarMult(dst, in, base *[32]byte) {
|
||||
scalarMult(dst, in, base)
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points, base is the standard generator and all values
|
||||
// are in little-endian form.
|
||||
func ScalarBaseMult(dst, in *[32]byte) {
|
||||
ScalarMult(dst, in, &basePoint)
|
||||
}
|
||||
73
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
generated
vendored
73
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
generated
vendored
@@ -1,73 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func freeze(inout *[5]uint64)
|
||||
TEXT ·freeze(SB),7,$0-8
|
||||
MOVQ inout+0(FP), DI
|
||||
|
||||
MOVQ 0(DI),SI
|
||||
MOVQ 8(DI),DX
|
||||
MOVQ 16(DI),CX
|
||||
MOVQ 24(DI),R8
|
||||
MOVQ 32(DI),R9
|
||||
MOVQ $REDMASK51,AX
|
||||
MOVQ AX,R10
|
||||
SUBQ $18,R10
|
||||
MOVQ $3,R11
|
||||
REDUCELOOP:
|
||||
MOVQ SI,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,SI
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,DX
|
||||
ADDQ R12,CX
|
||||
MOVQ CX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,CX
|
||||
ADDQ R12,R8
|
||||
MOVQ R8,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R8
|
||||
ADDQ R12,R9
|
||||
MOVQ R9,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R9
|
||||
IMUL3Q $19,R12,R12
|
||||
ADDQ R12,SI
|
||||
SUBQ $1,R11
|
||||
JA REDUCELOOP
|
||||
MOVQ $1,R12
|
||||
CMPQ R10,SI
|
||||
CMOVQLT R11,R12
|
||||
CMPQ AX,DX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,CX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R8
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R9
|
||||
CMOVQNE R11,R12
|
||||
NEGQ R12
|
||||
ANDQ R12,AX
|
||||
ANDQ R12,R10
|
||||
SUBQ R10,SI
|
||||
SUBQ AX,DX
|
||||
SUBQ AX,CX
|
||||
SUBQ AX,R8
|
||||
SUBQ AX,R9
|
||||
MOVQ SI,0(DI)
|
||||
MOVQ DX,8(DI)
|
||||
MOVQ CX,16(DI)
|
||||
MOVQ R8,24(DI)
|
||||
MOVQ R9,32(DI)
|
||||
RET
|
||||
169
vendor/golang.org/x/crypto/curve25519/mul_amd64.s
generated
vendored
169
vendor/golang.org/x/crypto/curve25519/mul_amd64.s
generated
vendored
@@ -1,169 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func mul(dest, a, b *[5]uint64)
|
||||
TEXT ·mul(SB),0,$16-24
|
||||
MOVQ dest+0(FP), DI
|
||||
MOVQ a+8(FP), SI
|
||||
MOVQ b+16(FP), DX
|
||||
|
||||
MOVQ DX,CX
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,0(SP)
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R8
|
||||
MOVQ DX,R9
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,8(SP)
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 8(CX)
|
||||
MOVQ AX,R10
|
||||
MOVQ DX,R11
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R12
|
||||
MOVQ DX,R13
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 24(CX)
|
||||
MOVQ AX,R14
|
||||
MOVQ DX,R15
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 32(CX)
|
||||
MOVQ AX,BX
|
||||
MOVQ DX,BP
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 0(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 32(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,R9:R8
|
||||
ANDQ SI,R8
|
||||
SHLQ $13,R11:R10
|
||||
ANDQ SI,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
ANDQ SI,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
ANDQ SI,R14
|
||||
ADDQ R13,R14
|
||||
SHLQ $13,BP:BX
|
||||
ANDQ SI,BX
|
||||
ADDQ R15,BX
|
||||
IMUL3Q $19,BP,DX
|
||||
ADDQ DX,R8
|
||||
MOVQ R8,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R10,DX
|
||||
MOVQ DX,CX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R8
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,CX
|
||||
ADDQ R14,DX
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R9
|
||||
ADDQ BX,DX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,AX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,R8
|
||||
ANDQ SI,R10
|
||||
MOVQ R8,0(DI)
|
||||
MOVQ CX,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
||||
132
vendor/golang.org/x/crypto/curve25519/square_amd64.s
generated
vendored
132
vendor/golang.org/x/crypto/curve25519/square_amd64.s
generated
vendored
@@ -1,132 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "const_amd64.h"
|
||||
|
||||
// func square(out, in *[5]uint64)
|
||||
TEXT ·square(SB),7,$0-16
|
||||
MOVQ out+0(FP), DI
|
||||
MOVQ in+8(FP), SI
|
||||
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(SI)
|
||||
MOVQ AX,CX
|
||||
MOVQ DX,R8
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 8(SI)
|
||||
MOVQ AX,R9
|
||||
MOVQ DX,R10
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
MOVQ AX,R11
|
||||
MOVQ DX,R12
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
MOVQ AX,R13
|
||||
MOVQ DX,R14
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 32(SI)
|
||||
MOVQ AX,R15
|
||||
MOVQ DX,BX
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ $REDMASK51,SI
|
||||
SHLQ $13,R8:CX
|
||||
ANDQ SI,CX
|
||||
SHLQ $13,R10:R9
|
||||
ANDQ SI,R9
|
||||
ADDQ R8,R9
|
||||
SHLQ $13,R12:R11
|
||||
ANDQ SI,R11
|
||||
ADDQ R10,R11
|
||||
SHLQ $13,R14:R13
|
||||
ANDQ SI,R13
|
||||
ADDQ R12,R13
|
||||
SHLQ $13,BX:R15
|
||||
ANDQ SI,R15
|
||||
ADDQ R14,R15
|
||||
IMUL3Q $19,BX,DX
|
||||
ADDQ DX,CX
|
||||
MOVQ CX,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R9,DX
|
||||
ANDQ SI,CX
|
||||
MOVQ DX,R8
|
||||
SHRQ $51,DX
|
||||
ADDQ R11,DX
|
||||
ANDQ SI,R8
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ADDQ R13,DX
|
||||
ANDQ SI,R9
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ADDQ R15,DX
|
||||
ANDQ SI,AX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,CX
|
||||
ANDQ SI,R10
|
||||
MOVQ CX,0(DI)
|
||||
MOVQ R8,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
RET
|
||||
5
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
5
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
@@ -2,6 +2,11 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// In Go 1.13, the ed25519 package was promoted to the standard library as
|
||||
// crypto/ed25519, and this package became a wrapper for the standard library one.
|
||||
//
|
||||
// +build !go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
|
||||
73
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
Normal file
73
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
//
|
||||
// Beginning with Go 1.13, the functionality of this package was moved to the
|
||||
// standard library as crypto/ed25519. This package only acts as a compatibility
|
||||
// wrapper.
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
|
||||
SeedSize = 32
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PublicKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PublicKey = ed25519.PublicKey
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PrivateKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PrivateKey = ed25519.PrivateKey
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
return ed25519.GenerateKey(rand)
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
// len(seed) is not SeedSize. This function is provided for interoperability
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
return ed25519.NewKeyFromSeed(seed)
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
return ed25519.Sign(privateKey, message)
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
return ed25519.Verify(publicKey, message, sig)
|
||||
}
|
||||
264
vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
generated
vendored
264
vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
generated
vendored
@@ -1,264 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ChaCha20 implements the core ChaCha20 function as specified
|
||||
// in https://tools.ietf.org/html/rfc7539#section-2.3.
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
|
||||
"golang.org/x/crypto/internal/subtle"
|
||||
)
|
||||
|
||||
// assert that *Cipher implements cipher.Stream
|
||||
var _ cipher.Stream = (*Cipher)(nil)
|
||||
|
||||
// Cipher is a stateful instance of ChaCha20 using a particular key
|
||||
// and nonce. A *Cipher implements the cipher.Stream interface.
|
||||
type Cipher struct {
|
||||
key [8]uint32
|
||||
counter uint32 // incremented after each block
|
||||
nonce [3]uint32
|
||||
buf [bufSize]byte // buffer for unused keystream bytes
|
||||
len int // number of unused keystream bytes at end of buf
|
||||
}
|
||||
|
||||
// New creates a new ChaCha20 stream cipher with the given key and nonce.
|
||||
// The initial counter value is set to 0.
|
||||
func New(key [8]uint32, nonce [3]uint32) *Cipher {
|
||||
return &Cipher{key: key, nonce: nonce}
|
||||
}
|
||||
|
||||
// ChaCha20 constants spelling "expand 32-byte k"
|
||||
const (
|
||||
j0 uint32 = 0x61707865
|
||||
j1 uint32 = 0x3320646e
|
||||
j2 uint32 = 0x79622d32
|
||||
j3 uint32 = 0x6b206574
|
||||
)
|
||||
|
||||
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
|
||||
a += b
|
||||
d ^= a
|
||||
d = (d << 16) | (d >> 16)
|
||||
c += d
|
||||
b ^= c
|
||||
b = (b << 12) | (b >> 20)
|
||||
a += b
|
||||
d ^= a
|
||||
d = (d << 8) | (d >> 24)
|
||||
c += d
|
||||
b ^= c
|
||||
b = (b << 7) | (b >> 25)
|
||||
return a, b, c, d
|
||||
}
|
||||
|
||||
// XORKeyStream XORs each byte in the given slice with a byte from the
|
||||
// cipher's key stream. Dst and src must overlap entirely or not at all.
|
||||
//
|
||||
// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
|
||||
// to pass a dst bigger than src, and in that case, XORKeyStream will
|
||||
// only update dst[:len(src)] and will not touch the rest of dst.
|
||||
//
|
||||
// Multiple calls to XORKeyStream behave as if the concatenation of
|
||||
// the src buffers was passed in a single run. That is, Cipher
|
||||
// maintains state and does not reset at each XORKeyStream call.
|
||||
func (s *Cipher) XORKeyStream(dst, src []byte) {
|
||||
if len(dst) < len(src) {
|
||||
panic("chacha20: output smaller than input")
|
||||
}
|
||||
if subtle.InexactOverlap(dst[:len(src)], src) {
|
||||
panic("chacha20: invalid buffer overlap")
|
||||
}
|
||||
|
||||
// xor src with buffered keystream first
|
||||
if s.len != 0 {
|
||||
buf := s.buf[len(s.buf)-s.len:]
|
||||
if len(src) < len(buf) {
|
||||
buf = buf[:len(src)]
|
||||
}
|
||||
td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint
|
||||
for i, b := range buf {
|
||||
td[i] = ts[i] ^ b
|
||||
}
|
||||
s.len -= len(buf)
|
||||
if s.len != 0 {
|
||||
return
|
||||
}
|
||||
s.buf = [len(s.buf)]byte{} // zero the empty buffer
|
||||
src = src[len(buf):]
|
||||
dst = dst[len(buf):]
|
||||
}
|
||||
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
if haveAsm {
|
||||
if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 {
|
||||
panic("chacha20: counter overflow")
|
||||
}
|
||||
s.xorKeyStreamAsm(dst, src)
|
||||
return
|
||||
}
|
||||
|
||||
// set up a 64-byte buffer to pad out the final block if needed
|
||||
// (hoisted out of the main loop to avoid spills)
|
||||
rem := len(src) % 64 // length of final block
|
||||
fin := len(src) - rem // index of final block
|
||||
if rem > 0 {
|
||||
copy(s.buf[len(s.buf)-64:], src[fin:])
|
||||
}
|
||||
|
||||
// pre-calculate most of the first round
|
||||
s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0])
|
||||
s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1])
|
||||
s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2])
|
||||
|
||||
n := len(src)
|
||||
src, dst = src[:n:n], dst[:n:n] // BCE hint
|
||||
for i := 0; i < n; i += 64 {
|
||||
// calculate the remainder of the first round
|
||||
s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter)
|
||||
|
||||
// execute the second round
|
||||
x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15)
|
||||
x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12)
|
||||
x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13)
|
||||
x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14)
|
||||
|
||||
// execute the remaining 18 rounds
|
||||
for i := 0; i < 9; i++ {
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
x0 += j0
|
||||
x1 += j1
|
||||
x2 += j2
|
||||
x3 += j3
|
||||
|
||||
x4 += s.key[0]
|
||||
x5 += s.key[1]
|
||||
x6 += s.key[2]
|
||||
x7 += s.key[3]
|
||||
x8 += s.key[4]
|
||||
x9 += s.key[5]
|
||||
x10 += s.key[6]
|
||||
x11 += s.key[7]
|
||||
|
||||
x12 += s.counter
|
||||
x13 += s.nonce[0]
|
||||
x14 += s.nonce[1]
|
||||
x15 += s.nonce[2]
|
||||
|
||||
// increment the counter
|
||||
s.counter += 1
|
||||
if s.counter == 0 {
|
||||
panic("chacha20: counter overflow")
|
||||
}
|
||||
|
||||
// pad to 64 bytes if needed
|
||||
in, out := src[i:], dst[i:]
|
||||
if i == fin {
|
||||
// src[fin:] has already been copied into s.buf before
|
||||
// the main loop
|
||||
in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:]
|
||||
}
|
||||
in, out = in[:64], out[:64] // BCE hint
|
||||
|
||||
// XOR the key stream with the source and write out the result
|
||||
xor(out[0:], in[0:], x0)
|
||||
xor(out[4:], in[4:], x1)
|
||||
xor(out[8:], in[8:], x2)
|
||||
xor(out[12:], in[12:], x3)
|
||||
xor(out[16:], in[16:], x4)
|
||||
xor(out[20:], in[20:], x5)
|
||||
xor(out[24:], in[24:], x6)
|
||||
xor(out[28:], in[28:], x7)
|
||||
xor(out[32:], in[32:], x8)
|
||||
xor(out[36:], in[36:], x9)
|
||||
xor(out[40:], in[40:], x10)
|
||||
xor(out[44:], in[44:], x11)
|
||||
xor(out[48:], in[48:], x12)
|
||||
xor(out[52:], in[52:], x13)
|
||||
xor(out[56:], in[56:], x14)
|
||||
xor(out[60:], in[60:], x15)
|
||||
}
|
||||
// copy any trailing bytes out of the buffer and into dst
|
||||
if rem != 0 {
|
||||
s.len = 64 - rem
|
||||
copy(dst[fin:], s.buf[len(s.buf)-64:])
|
||||
}
|
||||
}
|
||||
|
||||
// Advance discards bytes in the key stream until the next 64 byte block
|
||||
// boundary is reached and updates the counter accordingly. If the key
|
||||
// stream is already at a block boundary no bytes will be discarded and
|
||||
// the counter will be unchanged.
|
||||
func (s *Cipher) Advance() {
|
||||
s.len -= s.len % 64
|
||||
if s.len == 0 {
|
||||
s.buf = [len(s.buf)]byte{}
|
||||
}
|
||||
}
|
||||
|
||||
// XORKeyStream crypts bytes from in to out using the given key and counters.
|
||||
// In and out must overlap entirely or not at all. Counter contains the raw
|
||||
// ChaCha20 counter bytes (i.e. block counter followed by nonce).
|
||||
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
|
||||
s := Cipher{
|
||||
key: [8]uint32{
|
||||
binary.LittleEndian.Uint32(key[0:4]),
|
||||
binary.LittleEndian.Uint32(key[4:8]),
|
||||
binary.LittleEndian.Uint32(key[8:12]),
|
||||
binary.LittleEndian.Uint32(key[12:16]),
|
||||
binary.LittleEndian.Uint32(key[16:20]),
|
||||
binary.LittleEndian.Uint32(key[20:24]),
|
||||
binary.LittleEndian.Uint32(key[24:28]),
|
||||
binary.LittleEndian.Uint32(key[28:32]),
|
||||
},
|
||||
nonce: [3]uint32{
|
||||
binary.LittleEndian.Uint32(counter[4:8]),
|
||||
binary.LittleEndian.Uint32(counter[8:12]),
|
||||
binary.LittleEndian.Uint32(counter[12:16]),
|
||||
},
|
||||
counter: binary.LittleEndian.Uint32(counter[0:4]),
|
||||
}
|
||||
s.XORKeyStream(out, in)
|
||||
}
|
||||
|
||||
// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a
|
||||
// nonce. It should only be used as part of the XChaCha20 construction.
|
||||
func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 {
|
||||
x0, x1, x2, x3 := j0, j1, j2, j3
|
||||
x4, x5, x6, x7 := key[0], key[1], key[2], key[3]
|
||||
x8, x9, x10, x11 := key[4], key[5], key[6], key[7]
|
||||
x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3]
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
var out [8]uint32
|
||||
out[0], out[1], out[2], out[3] = x0, x1, x2, x3
|
||||
out[4], out[5], out[6], out[7] = x12, x13, x14, x15
|
||||
return out
|
||||
}
|
||||
16
vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
generated
vendored
16
vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !s390x gccgo appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
const (
|
||||
bufSize = 64
|
||||
haveAsm = false
|
||||
)
|
||||
|
||||
func (*Cipher) xorKeyStreamAsm(dst, src []byte) {
|
||||
panic("not implemented")
|
||||
}
|
||||
30
vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
generated
vendored
30
vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,!gccgo,!appengine
|
||||
|
||||
package chacha20
|
||||
|
||||
var haveAsm = hasVectorFacility()
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
// hasVectorFacility reports whether the machine supports the vector
|
||||
// facility (vx).
|
||||
// Implementation in asm_s390x.s.
|
||||
func hasVectorFacility() bool
|
||||
|
||||
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
|
||||
// be called when the vector facility is available.
|
||||
// Implementation in asm_s390x.s.
|
||||
//go:noescape
|
||||
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
|
||||
|
||||
func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
|
||||
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len)
|
||||
}
|
||||
|
||||
// EXRL targets, DO NOT CALL!
|
||||
func mvcSrcToBuf()
|
||||
func mvcBufToDst()
|
||||
4
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
4
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
@@ -76,7 +76,9 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err
|
||||
// Bleichenbacher, Advances in Cryptology (Crypto '98),
|
||||
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
|
||||
s := new(big.Int).Exp(c1, priv.X, priv.P)
|
||||
s.ModInverse(s, priv.P)
|
||||
if s.ModInverse(s, priv.P) == nil {
|
||||
return nil, errors.New("elgamal: invalid private key")
|
||||
}
|
||||
s.Mul(s, c2)
|
||||
s.Mod(s, priv.P)
|
||||
em := s.Bytes()
|
||||
|
||||
14
vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
14
vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
@@ -504,7 +504,7 @@ const defaultRSAKeyBits = 2048
|
||||
// which may be empty but must not contain any of "()<>\x00".
|
||||
// If config is nil, sensible defaults will be used.
|
||||
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
|
||||
currentTime := config.Now()
|
||||
creationTime := config.Now()
|
||||
|
||||
bits := defaultRSAKeyBits
|
||||
if config != nil && config.RSABits != 0 {
|
||||
@@ -525,8 +525,8 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
|
||||
}
|
||||
|
||||
e := &Entity{
|
||||
PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
|
||||
PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
|
||||
PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey),
|
||||
PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv),
|
||||
Identities: make(map[string]*Identity),
|
||||
}
|
||||
isPrimaryId := true
|
||||
@@ -534,7 +534,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
|
||||
Name: uid.Id,
|
||||
UserId: uid,
|
||||
SelfSignature: &packet.Signature{
|
||||
CreationTime: currentTime,
|
||||
CreationTime: creationTime,
|
||||
SigType: packet.SigTypePositiveCert,
|
||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||
Hash: config.Hash(),
|
||||
@@ -563,10 +563,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
|
||||
|
||||
e.Subkeys = make([]Subkey, 1)
|
||||
e.Subkeys[0] = Subkey{
|
||||
PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
|
||||
PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
|
||||
PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey),
|
||||
PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv),
|
||||
Sig: &packet.Signature{
|
||||
CreationTime: currentTime,
|
||||
CreationTime: creationTime,
|
||||
SigType: packet.SigTypeSubkeyBinding,
|
||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
||||
Hash: config.Hash(),
|
||||
|
||||
6
vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
6
vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
@@ -5,6 +5,7 @@
|
||||
package packet
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
@@ -78,8 +79,9 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
|
||||
// padding oracle attacks.
|
||||
switch priv.PubKeyAlgo {
|
||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
||||
k := priv.PrivateKey.(*rsa.PrivateKey)
|
||||
b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
|
||||
// Supports both *rsa.PrivateKey and crypto.Decrypter
|
||||
k := priv.PrivateKey.(crypto.Decrypter)
|
||||
b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil)
|
||||
case PubKeyAlgoElGamal:
|
||||
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
|
||||
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
|
||||
|
||||
28
vendor/golang.org/x/crypto/openpgp/packet/private_key.go
generated
vendored
28
vendor/golang.org/x/crypto/openpgp/packet/private_key.go
generated
vendored
@@ -31,54 +31,54 @@ type PrivateKey struct {
|
||||
encryptedData []byte
|
||||
cipher CipherFunction
|
||||
s2k func(out, in []byte)
|
||||
PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer.
|
||||
PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only).
|
||||
sha1Checksum bool
|
||||
iv []byte
|
||||
}
|
||||
|
||||
func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
|
||||
func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
|
||||
pk := new(PrivateKey)
|
||||
pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
|
||||
pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
|
||||
pk.PrivateKey = priv
|
||||
return pk
|
||||
}
|
||||
|
||||
func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
|
||||
func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
|
||||
pk := new(PrivateKey)
|
||||
pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
|
||||
pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
|
||||
pk.PrivateKey = priv
|
||||
return pk
|
||||
}
|
||||
|
||||
func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
|
||||
func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
|
||||
pk := new(PrivateKey)
|
||||
pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
|
||||
pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
|
||||
pk.PrivateKey = priv
|
||||
return pk
|
||||
}
|
||||
|
||||
func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
|
||||
func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
|
||||
pk := new(PrivateKey)
|
||||
pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey)
|
||||
pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
|
||||
pk.PrivateKey = priv
|
||||
return pk
|
||||
}
|
||||
|
||||
// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
|
||||
// implements RSA or ECDSA.
|
||||
func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
|
||||
func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey {
|
||||
pk := new(PrivateKey)
|
||||
// In general, the public Keys should be used as pointers. We still
|
||||
// type-switch on the values, for backwards-compatibility.
|
||||
switch pubkey := signer.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey)
|
||||
pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey)
|
||||
case rsa.PublicKey:
|
||||
pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
|
||||
pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey)
|
||||
case *ecdsa.PublicKey:
|
||||
pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey)
|
||||
pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey)
|
||||
case ecdsa.PublicKey:
|
||||
pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
|
||||
pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey)
|
||||
default:
|
||||
panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
|
||||
}
|
||||
|
||||
39
vendor/golang.org/x/crypto/poly1305/bits_compat.go
generated
vendored
Normal file
39
vendor/golang.org/x/crypto/poly1305/bits_compat.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.13
|
||||
|
||||
package poly1305
|
||||
|
||||
// Generic fallbacks for the math/bits intrinsics, copied from
|
||||
// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
|
||||
// variable time fallbacks until Go 1.13.
|
||||
|
||||
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
|
||||
sum = x + y + carry
|
||||
carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
|
||||
return
|
||||
}
|
||||
|
||||
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
|
||||
diff = x - y - borrow
|
||||
borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
|
||||
return
|
||||
}
|
||||
|
||||
func bitsMul64(x, y uint64) (hi, lo uint64) {
|
||||
const mask32 = 1<<32 - 1
|
||||
x0 := x & mask32
|
||||
x1 := x >> 32
|
||||
y0 := y & mask32
|
||||
y1 := y >> 32
|
||||
w0 := x0 * y0
|
||||
t := x1*y0 + w0>>32
|
||||
w1 := t & mask32
|
||||
w2 := t >> 32
|
||||
w1 += x0 * y1
|
||||
hi = x1*y1 + w2 + w1>>32
|
||||
lo = x * y
|
||||
return
|
||||
}
|
||||
21
vendor/golang.org/x/crypto/poly1305/bits_go1.13.go
generated
vendored
Normal file
21
vendor/golang.org/x/crypto/poly1305/bits_go1.13.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.13
|
||||
|
||||
package poly1305
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
|
||||
return bits.Add64(x, y, carry)
|
||||
}
|
||||
|
||||
func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
|
||||
return bits.Sub64(x, y, borrow)
|
||||
}
|
||||
|
||||
func bitsMul64(x, y uint64) (hi, lo uint64) {
|
||||
return bits.Mul64(x, y)
|
||||
}
|
||||
11
vendor/golang.org/x/crypto/poly1305/mac_noasm.go
generated
vendored
Normal file
11
vendor/golang.org/x/crypto/poly1305/mac_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64,!ppc64le gccgo appengine
|
||||
|
||||
package poly1305
|
||||
|
||||
type mac struct{ macGeneric }
|
||||
|
||||
func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} }
|
||||
90
vendor/golang.org/x/crypto/poly1305/poly1305.go
generated
vendored
90
vendor/golang.org/x/crypto/poly1305/poly1305.go
generated
vendored
@@ -2,21 +2,19 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package poly1305 implements Poly1305 one-time message authentication code as
|
||||
specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
|
||||
|
||||
Poly1305 is a fast, one-time authentication function. It is infeasible for an
|
||||
attacker to generate an authenticator for a message without the key. However, a
|
||||
key must only be used for a single message. Authenticating two different
|
||||
messages with the same key allows an attacker to forge authenticators for other
|
||||
messages with the same key.
|
||||
|
||||
Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
|
||||
used with a fixed key in order to generate one-time keys from an nonce.
|
||||
However, in this package AES isn't used and the one-time key is specified
|
||||
directly.
|
||||
*/
|
||||
// Package poly1305 implements Poly1305 one-time message authentication code as
|
||||
// specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
|
||||
//
|
||||
// Poly1305 is a fast, one-time authentication function. It is infeasible for an
|
||||
// attacker to generate an authenticator for a message without the key. However, a
|
||||
// key must only be used for a single message. Authenticating two different
|
||||
// messages with the same key allows an attacker to forge authenticators for other
|
||||
// messages with the same key.
|
||||
//
|
||||
// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
|
||||
// used with a fixed key in order to generate one-time keys from an nonce.
|
||||
// However, in this package AES isn't used and the one-time key is specified
|
||||
// directly.
|
||||
package poly1305 // import "golang.org/x/crypto/poly1305"
|
||||
|
||||
import "crypto/subtle"
|
||||
@@ -24,10 +22,68 @@ import "crypto/subtle"
|
||||
// TagSize is the size, in bytes, of a poly1305 authenticator.
|
||||
const TagSize = 16
|
||||
|
||||
// Verify returns true if mac is a valid authenticator for m with the given
|
||||
// key.
|
||||
// Sum generates an authenticator for msg using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
sum(out, m, key)
|
||||
}
|
||||
|
||||
// Verify returns true if mac is a valid authenticator for m with the given key.
|
||||
func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
|
||||
var tmp [16]byte
|
||||
Sum(&tmp, m, key)
|
||||
return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
|
||||
}
|
||||
|
||||
// New returns a new MAC computing an authentication
|
||||
// tag of all data written to it with the given key.
|
||||
// This allows writing the message progressively instead
|
||||
// of passing it as a single slice. Common users should use
|
||||
// the Sum function instead.
|
||||
//
|
||||
// The key must be unique for each message, as authenticating
|
||||
// two different messages with the same key allows an attacker
|
||||
// to forge messages at will.
|
||||
func New(key *[32]byte) *MAC {
|
||||
return &MAC{
|
||||
mac: newMAC(key),
|
||||
finalized: false,
|
||||
}
|
||||
}
|
||||
|
||||
// MAC is an io.Writer computing an authentication tag
|
||||
// of the data written to it.
|
||||
//
|
||||
// MAC cannot be used like common hash.Hash implementations,
|
||||
// because using a poly1305 key twice breaks its security.
|
||||
// Therefore writing data to a running MAC after calling
|
||||
// Sum causes it to panic.
|
||||
type MAC struct {
|
||||
mac // platform-dependent implementation
|
||||
|
||||
finalized bool
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (h *MAC) Size() int { return TagSize }
|
||||
|
||||
// Write adds more data to the running message authentication code.
|
||||
// It never returns an error.
|
||||
//
|
||||
// It must not be called after the first call of Sum.
|
||||
func (h *MAC) Write(p []byte) (n int, err error) {
|
||||
if h.finalized {
|
||||
panic("poly1305: write to MAC after Sum")
|
||||
}
|
||||
return h.mac.Write(p)
|
||||
}
|
||||
|
||||
// Sum computes the authenticator of all data written to the
|
||||
// message authentication code.
|
||||
func (h *MAC) Sum(b []byte) []byte {
|
||||
var mac [TagSize]byte
|
||||
h.mac.Sum(&mac)
|
||||
h.finalized = true
|
||||
return append(b, mac[:]...)
|
||||
}
|
||||
|
||||
58
vendor/golang.org/x/crypto/poly1305/sum_amd64.go
generated
vendored
58
vendor/golang.org/x/crypto/poly1305/sum_amd64.go
generated
vendored
@@ -6,17 +6,53 @@
|
||||
|
||||
package poly1305
|
||||
|
||||
// This function is implemented in sum_amd64.s
|
||||
//go:noescape
|
||||
func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
|
||||
func update(state *macState, msg []byte)
|
||||
|
||||
// Sum generates an authenticator for m using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
var mPtr *byte
|
||||
if len(m) > 0 {
|
||||
mPtr = &m[0]
|
||||
}
|
||||
poly1305(out, mPtr, uint64(len(m)), key)
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(m)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMAC(key *[32]byte) (h mac) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
}
|
||||
|
||||
// mac is a wrapper for macGeneric that redirects calls that would have gone to
|
||||
// updateGeneric to update.
|
||||
//
|
||||
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
|
||||
// using function pointers would carry a major performance cost.
|
||||
type mac struct{ macGeneric }
|
||||
|
||||
func (h *mac) Write(p []byte) (int, error) {
|
||||
nn := len(p)
|
||||
if h.offset > 0 {
|
||||
n := copy(h.buffer[h.offset:], p)
|
||||
if h.offset+n < TagSize {
|
||||
h.offset += n
|
||||
return nn, nil
|
||||
}
|
||||
p = p[n:]
|
||||
h.offset = 0
|
||||
update(&h.macState, h.buffer[:])
|
||||
}
|
||||
if n := len(p) - (len(p) % TagSize); n > 0 {
|
||||
update(&h.macState, p[:n])
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
h.offset += copy(h.buffer[h.offset:], p)
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (h *mac) Sum(out *[16]byte) {
|
||||
state := h.macState
|
||||
if h.offset > 0 {
|
||||
update(&state, h.buffer[:h.offset])
|
||||
}
|
||||
finalize(out, &state.h, &state.s)
|
||||
}
|
||||
|
||||
43
vendor/golang.org/x/crypto/poly1305/sum_amd64.s
generated
vendored
43
vendor/golang.org/x/crypto/poly1305/sum_amd64.s
generated
vendored
@@ -54,24 +54,17 @@
|
||||
ADCQ t3, h1; \
|
||||
ADCQ $0, h2
|
||||
|
||||
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
|
||||
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
|
||||
GLOBL ·poly1305Mask<>(SB), RODATA, $16
|
||||
// func update(state *[7]uint64, msg []byte)
|
||||
TEXT ·update(SB), $0-32
|
||||
MOVQ state+0(FP), DI
|
||||
MOVQ msg_base+8(FP), SI
|
||||
MOVQ msg_len+16(FP), R15
|
||||
|
||||
// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
|
||||
TEXT ·poly1305(SB), $0-32
|
||||
MOVQ out+0(FP), DI
|
||||
MOVQ m+8(FP), SI
|
||||
MOVQ mlen+16(FP), R15
|
||||
MOVQ key+24(FP), AX
|
||||
|
||||
MOVQ 0(AX), R11
|
||||
MOVQ 8(AX), R12
|
||||
ANDQ ·poly1305Mask<>(SB), R11 // r0
|
||||
ANDQ ·poly1305Mask<>+8(SB), R12 // r1
|
||||
XORQ R8, R8 // h0
|
||||
XORQ R9, R9 // h1
|
||||
XORQ R10, R10 // h2
|
||||
MOVQ 0(DI), R8 // h0
|
||||
MOVQ 8(DI), R9 // h1
|
||||
MOVQ 16(DI), R10 // h2
|
||||
MOVQ 24(DI), R11 // r0
|
||||
MOVQ 32(DI), R12 // r1
|
||||
|
||||
CMPQ R15, $16
|
||||
JB bytes_between_0_and_15
|
||||
@@ -109,17 +102,7 @@ flush_buffer:
|
||||
JMP multiply
|
||||
|
||||
done:
|
||||
MOVQ R8, AX
|
||||
MOVQ R9, BX
|
||||
SUBQ $0xFFFFFFFFFFFFFFFB, AX
|
||||
SBBQ $0xFFFFFFFFFFFFFFFF, BX
|
||||
SBBQ $3, R10
|
||||
CMOVQCS R8, AX
|
||||
CMOVQCS R9, BX
|
||||
MOVQ key+24(FP), R8
|
||||
ADDQ 16(R8), AX
|
||||
ADCQ 24(R8), BX
|
||||
|
||||
MOVQ AX, 0(DI)
|
||||
MOVQ BX, 8(DI)
|
||||
MOVQ R8, 0(DI)
|
||||
MOVQ R9, 8(DI)
|
||||
MOVQ R10, 16(DI)
|
||||
RET
|
||||
|
||||
7
vendor/golang.org/x/crypto/poly1305/sum_arm.go
generated
vendored
7
vendor/golang.org/x/crypto/poly1305/sum_arm.go
generated
vendored
@@ -6,14 +6,11 @@
|
||||
|
||||
package poly1305
|
||||
|
||||
// This function is implemented in sum_arm.s
|
||||
// poly1305_auth_armv6 is implemented in sum_arm.s
|
||||
//go:noescape
|
||||
func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
|
||||
|
||||
// Sum generates an authenticator for m using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
var mPtr *byte
|
||||
if len(m) > 0 {
|
||||
mPtr = &m[0]
|
||||
|
||||
307
vendor/golang.org/x/crypto/poly1305/sum_generic.go
generated
vendored
Normal file
307
vendor/golang.org/x/crypto/poly1305/sum_generic.go
generated
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file provides the generic implementation of Sum and MAC. Other files
|
||||
// might provide optimized assembly implementations of some of this code.
|
||||
|
||||
package poly1305
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
|
||||
// for a 64 bytes message is approximately
|
||||
//
|
||||
// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
|
||||
//
|
||||
// for some secret r and s. It can be computed sequentially like
|
||||
//
|
||||
// for len(msg) > 0:
|
||||
// h += read(msg, 16)
|
||||
// h *= r
|
||||
// h %= 2¹³⁰ - 5
|
||||
// return h + s
|
||||
//
|
||||
// All the complexity is about doing performant constant-time math on numbers
|
||||
// larger than any available numeric type.
|
||||
|
||||
func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
h := newMACGeneric(key)
|
||||
h.Write(msg)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMACGeneric(key *[32]byte) (h macGeneric) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
}
|
||||
|
||||
// macState holds numbers in saturated 64-bit little-endian limbs. That is,
|
||||
// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
|
||||
type macState struct {
|
||||
// h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
|
||||
// can grow larger during and after rounds.
|
||||
h [3]uint64
|
||||
// r and s are the private key components.
|
||||
r [2]uint64
|
||||
s [2]uint64
|
||||
}
|
||||
|
||||
type macGeneric struct {
|
||||
macState
|
||||
|
||||
buffer [TagSize]byte
|
||||
offset int
|
||||
}
|
||||
|
||||
// Write splits the incoming message into TagSize chunks, and passes them to
|
||||
// update. It buffers incomplete chunks.
|
||||
func (h *macGeneric) Write(p []byte) (int, error) {
|
||||
nn := len(p)
|
||||
if h.offset > 0 {
|
||||
n := copy(h.buffer[h.offset:], p)
|
||||
if h.offset+n < TagSize {
|
||||
h.offset += n
|
||||
return nn, nil
|
||||
}
|
||||
p = p[n:]
|
||||
h.offset = 0
|
||||
updateGeneric(&h.macState, h.buffer[:])
|
||||
}
|
||||
if n := len(p) - (len(p) % TagSize); n > 0 {
|
||||
updateGeneric(&h.macState, p[:n])
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
h.offset += copy(h.buffer[h.offset:], p)
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
// Sum flushes the last incomplete chunk from the buffer, if any, and generates
|
||||
// the MAC output. It does not modify its state, in order to allow for multiple
|
||||
// calls to Sum, even if no Write is allowed after Sum.
|
||||
func (h *macGeneric) Sum(out *[TagSize]byte) {
|
||||
state := h.macState
|
||||
if h.offset > 0 {
|
||||
updateGeneric(&state, h.buffer[:h.offset])
|
||||
}
|
||||
finalize(out, &state.h, &state.s)
|
||||
}
|
||||
|
||||
// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
|
||||
// clears some bits of the secret coefficient to make it possible to implement
|
||||
// multiplication more efficiently.
|
||||
const (
|
||||
rMask0 = 0x0FFFFFFC0FFFFFFF
|
||||
rMask1 = 0x0FFFFFFC0FFFFFFC
|
||||
)
|
||||
|
||||
func initialize(key *[32]byte, r, s *[2]uint64) {
|
||||
r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
|
||||
r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
|
||||
s[0] = binary.LittleEndian.Uint64(key[16:24])
|
||||
s[1] = binary.LittleEndian.Uint64(key[24:32])
|
||||
}
|
||||
|
||||
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
|
||||
// bits.Mul64 and bits.Add64 intrinsics.
|
||||
type uint128 struct {
|
||||
lo, hi uint64
|
||||
}
|
||||
|
||||
func mul64(a, b uint64) uint128 {
|
||||
hi, lo := bitsMul64(a, b)
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
func add128(a, b uint128) uint128 {
|
||||
lo, c := bitsAdd64(a.lo, b.lo, 0)
|
||||
hi, c := bitsAdd64(a.hi, b.hi, c)
|
||||
if c != 0 {
|
||||
panic("poly1305: unexpected overflow")
|
||||
}
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
func shiftRightBy2(a uint128) uint128 {
|
||||
a.lo = a.lo>>2 | (a.hi&3)<<62
|
||||
a.hi = a.hi >> 2
|
||||
return a
|
||||
}
|
||||
|
||||
// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
|
||||
// 128 bits of message, it computes
|
||||
//
|
||||
// h₊ = (h + m) * r mod 2¹³⁰ - 5
|
||||
//
|
||||
// If the msg length is not a multiple of TagSize, it assumes the last
|
||||
// incomplete chunk is the final one.
|
||||
func updateGeneric(state *macState, msg []byte) {
|
||||
h0, h1, h2 := state.h[0], state.h[1], state.h[2]
|
||||
r0, r1 := state.r[0], state.r[1]
|
||||
|
||||
for len(msg) > 0 {
|
||||
var c uint64
|
||||
|
||||
// For the first step, h + m, we use a chain of bits.Add64 intrinsics.
|
||||
// The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
|
||||
// reduced at the end of the multiplication below.
|
||||
//
|
||||
// The spec requires us to set a bit just above the message size, not to
|
||||
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
|
||||
// add 1 to the most significant (2¹²⁸) limb, h2.
|
||||
if len(msg) >= TagSize {
|
||||
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
|
||||
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
|
||||
h2 += c + 1
|
||||
|
||||
msg = msg[TagSize:]
|
||||
} else {
|
||||
var buf [TagSize]byte
|
||||
copy(buf[:], msg)
|
||||
buf[len(msg)] = 1
|
||||
|
||||
h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
|
||||
h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
|
||||
h2 += c
|
||||
|
||||
msg = nil
|
||||
}
|
||||
|
||||
// Multiplication of big number limbs is similar to elementary school
|
||||
// columnar multiplication. Instead of digits, there are 64-bit limbs.
|
||||
//
|
||||
// We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
|
||||
//
|
||||
// h2 h1 h0 x
|
||||
// r1 r0 =
|
||||
// ----------------
|
||||
// h2r0 h1r0 h0r0 <-- individual 128-bit products
|
||||
// + h2r1 h1r1 h0r1
|
||||
// ------------------------
|
||||
// m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
|
||||
// ------------------------
|
||||
// m3.hi m2.hi m1.hi m0.hi <-- carry propagation
|
||||
// + m3.lo m2.lo m1.lo m0.lo
|
||||
// -------------------------------
|
||||
// t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
|
||||
//
|
||||
// The main difference from pen-and-paper multiplication is that we do
|
||||
// carry propagation in a separate step, as if we wrote two digit sums
|
||||
// at first (the 128-bit limbs), and then carried the tens all at once.
|
||||
|
||||
h0r0 := mul64(h0, r0)
|
||||
h1r0 := mul64(h1, r0)
|
||||
h2r0 := mul64(h2, r0)
|
||||
h0r1 := mul64(h0, r1)
|
||||
h1r1 := mul64(h1, r1)
|
||||
h2r1 := mul64(h2, r1)
|
||||
|
||||
// Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
|
||||
// top 4 bits cleared by rMask{0,1}, we know that their product is not going
|
||||
// to overflow 64 bits, so we can ignore the high part of the products.
|
||||
//
|
||||
// This also means that the product doesn't have a fifth limb (t4).
|
||||
if h2r0.hi != 0 {
|
||||
panic("poly1305: unexpected overflow")
|
||||
}
|
||||
if h2r1.hi != 0 {
|
||||
panic("poly1305: unexpected overflow")
|
||||
}
|
||||
|
||||
m0 := h0r0
|
||||
m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
|
||||
m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
|
||||
m3 := h2r1
|
||||
|
||||
t0 := m0.lo
|
||||
t1, c := bitsAdd64(m1.lo, m0.hi, 0)
|
||||
t2, c := bitsAdd64(m2.lo, m1.hi, c)
|
||||
t3, _ := bitsAdd64(m3.lo, m2.hi, c)
|
||||
|
||||
// Now we have the result as 4 64-bit limbs, and we need to reduce it
|
||||
// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
|
||||
// a cheap partial reduction according to the reduction identity
|
||||
//
|
||||
// c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
|
||||
//
|
||||
// because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
|
||||
// likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
|
||||
// assumptions we make about h in the rest of the code.
|
||||
//
|
||||
// See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
|
||||
|
||||
// We split the final result at the 2¹³⁰ mark into h and cc, the carry.
|
||||
// Note that the carry bits are effectively shifted left by 2, in other
|
||||
// words, cc = c * 4 for the c in the reduction identity.
|
||||
h0, h1, h2 = t0, t1, t2&maskLow2Bits
|
||||
cc := uint128{t2 & maskNotLow2Bits, t3}
|
||||
|
||||
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
|
||||
|
||||
h0, c = bitsAdd64(h0, cc.lo, 0)
|
||||
h1, c = bitsAdd64(h1, cc.hi, c)
|
||||
h2 += c
|
||||
|
||||
cc = shiftRightBy2(cc)
|
||||
|
||||
h0, c = bitsAdd64(h0, cc.lo, 0)
|
||||
h1, c = bitsAdd64(h1, cc.hi, c)
|
||||
h2 += c
|
||||
|
||||
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
|
||||
//
|
||||
// 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
|
||||
}
|
||||
|
||||
state.h[0], state.h[1], state.h[2] = h0, h1, h2
|
||||
}
|
||||
|
||||
const (
|
||||
maskLow2Bits uint64 = 0x0000000000000003
|
||||
maskNotLow2Bits uint64 = ^maskLow2Bits
|
||||
)
|
||||
|
||||
// select64 returns x if v == 1 and y if v == 0, in constant time.
|
||||
func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
|
||||
|
||||
// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
|
||||
const (
|
||||
p0 = 0xFFFFFFFFFFFFFFFB
|
||||
p1 = 0xFFFFFFFFFFFFFFFF
|
||||
p2 = 0x0000000000000003
|
||||
)
|
||||
|
||||
// finalize completes the modular reduction of h and computes
|
||||
//
|
||||
// out = h + s mod 2¹²⁸
|
||||
//
|
||||
func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
|
||||
h0, h1, h2 := h[0], h[1], h[2]
|
||||
|
||||
// After the partial reduction in updateGeneric, h might be more than
|
||||
// 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
|
||||
// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
|
||||
// result if the subtraction underflows, and t otherwise.
|
||||
|
||||
hMinusP0, b := bitsSub64(h0, p0, 0)
|
||||
hMinusP1, b := bitsSub64(h1, p1, b)
|
||||
_, b = bitsSub64(h2, p2, b)
|
||||
|
||||
// h = h if h < p else h - p
|
||||
h0 = select64(b, h0, hMinusP0)
|
||||
h1 = select64(b, h1, hMinusP1)
|
||||
|
||||
// Finally, we compute the last Poly1305 step
|
||||
//
|
||||
// tag = h + s mod 2¹²⁸
|
||||
//
|
||||
// by just doing a wide addition with the 128 low bits of h and discarding
|
||||
// the overflow.
|
||||
h0, c := bitsAdd64(h0, s[0], 0)
|
||||
h1, _ = bitsAdd64(h1, s[1], c)
|
||||
|
||||
binary.LittleEndian.PutUint64(out[0:8], h0)
|
||||
binary.LittleEndian.PutUint64(out[8:16], h1)
|
||||
}
|
||||
11
vendor/golang.org/x/crypto/poly1305/sum_noasm.go
generated
vendored
11
vendor/golang.org/x/crypto/poly1305/sum_noasm.go
generated
vendored
@@ -2,13 +2,12 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,!go1.11 !arm,!amd64,!s390x gccgo appengine nacl
|
||||
// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl
|
||||
|
||||
package poly1305
|
||||
|
||||
// Sum generates an authenticator for msg using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
sumGeneric(out, msg, key)
|
||||
func sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(msg)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
58
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
generated
vendored
Normal file
58
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ppc64le,!gccgo,!appengine
|
||||
|
||||
package poly1305
|
||||
|
||||
//go:noescape
|
||||
func update(state *macState, msg []byte)
|
||||
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(m)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMAC(key *[32]byte) (h mac) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
}
|
||||
|
||||
// mac is a wrapper for macGeneric that redirects calls that would have gone to
|
||||
// updateGeneric to update.
|
||||
//
|
||||
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
|
||||
// using function pointers would carry a major performance cost.
|
||||
type mac struct{ macGeneric }
|
||||
|
||||
func (h *mac) Write(p []byte) (int, error) {
|
||||
nn := len(p)
|
||||
if h.offset > 0 {
|
||||
n := copy(h.buffer[h.offset:], p)
|
||||
if h.offset+n < TagSize {
|
||||
h.offset += n
|
||||
return nn, nil
|
||||
}
|
||||
p = p[n:]
|
||||
h.offset = 0
|
||||
update(&h.macState, h.buffer[:])
|
||||
}
|
||||
if n := len(p) - (len(p) % TagSize); n > 0 {
|
||||
update(&h.macState, p[:n])
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
h.offset += copy(h.buffer[h.offset:], p)
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (h *mac) Sum(out *[16]byte) {
|
||||
state := h.macState
|
||||
if h.offset > 0 {
|
||||
update(&state, h.buffer[:h.offset])
|
||||
}
|
||||
finalize(out, &state.h, &state.s)
|
||||
}
|
||||
181
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s
generated
vendored
Normal file
181
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ppc64le,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// This was ported from the amd64 implementation.
|
||||
|
||||
#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
|
||||
MOVD (msg), t0; \
|
||||
MOVD 8(msg), t1; \
|
||||
MOVD $1, t2; \
|
||||
ADDC t0, h0, h0; \
|
||||
ADDE t1, h1, h1; \
|
||||
ADDE t2, h2; \
|
||||
ADD $16, msg
|
||||
|
||||
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
|
||||
MULLD r0, h0, t0; \
|
||||
MULLD r0, h1, t4; \
|
||||
MULHDU r0, h0, t1; \
|
||||
MULHDU r0, h1, t5; \
|
||||
ADDC t4, t1, t1; \
|
||||
MULLD r0, h2, t2; \
|
||||
ADDZE t5; \
|
||||
MULHDU r1, h0, t4; \
|
||||
MULLD r1, h0, h0; \
|
||||
ADD t5, t2, t2; \
|
||||
ADDC h0, t1, t1; \
|
||||
MULLD h2, r1, t3; \
|
||||
ADDZE t4, h0; \
|
||||
MULHDU r1, h1, t5; \
|
||||
MULLD r1, h1, t4; \
|
||||
ADDC t4, t2, t2; \
|
||||
ADDE t5, t3, t3; \
|
||||
ADDC h0, t2, t2; \
|
||||
MOVD $-4, t4; \
|
||||
MOVD t0, h0; \
|
||||
MOVD t1, h1; \
|
||||
ADDZE t3; \
|
||||
ANDCC $3, t2, h2; \
|
||||
AND t2, t4, t0; \
|
||||
ADDC t0, h0, h0; \
|
||||
ADDE t3, h1, h1; \
|
||||
SLD $62, t3, t4; \
|
||||
SRD $2, t2; \
|
||||
ADDZE h2; \
|
||||
OR t4, t2, t2; \
|
||||
SRD $2, t3; \
|
||||
ADDC t2, h0, h0; \
|
||||
ADDE t3, h1, h1; \
|
||||
ADDZE h2
|
||||
|
||||
DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
|
||||
DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
|
||||
GLOBL ·poly1305Mask<>(SB), RODATA, $16
|
||||
|
||||
// func update(state *[7]uint64, msg []byte)
|
||||
TEXT ·update(SB), $0-32
|
||||
MOVD state+0(FP), R3
|
||||
MOVD msg_base+8(FP), R4
|
||||
MOVD msg_len+16(FP), R5
|
||||
|
||||
MOVD 0(R3), R8 // h0
|
||||
MOVD 8(R3), R9 // h1
|
||||
MOVD 16(R3), R10 // h2
|
||||
MOVD 24(R3), R11 // r0
|
||||
MOVD 32(R3), R12 // r1
|
||||
|
||||
CMP R5, $16
|
||||
BLT bytes_between_0_and_15
|
||||
|
||||
loop:
|
||||
POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
|
||||
|
||||
multiply:
|
||||
POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
|
||||
ADD $-16, R5
|
||||
CMP R5, $16
|
||||
BGE loop
|
||||
|
||||
bytes_between_0_and_15:
|
||||
CMP $0, R5
|
||||
BEQ done
|
||||
MOVD $0, R16 // h0
|
||||
MOVD $0, R17 // h1
|
||||
|
||||
flush_buffer:
|
||||
CMP R5, $8
|
||||
BLE just1
|
||||
|
||||
MOVD $8, R21
|
||||
SUB R21, R5, R21
|
||||
|
||||
// Greater than 8 -- load the rightmost remaining bytes in msg
|
||||
// and put into R17 (h1)
|
||||
MOVD (R4)(R21), R17
|
||||
MOVD $16, R22
|
||||
|
||||
// Find the offset to those bytes
|
||||
SUB R5, R22, R22
|
||||
SLD $3, R22
|
||||
|
||||
// Shift to get only the bytes in msg
|
||||
SRD R22, R17, R17
|
||||
|
||||
// Put 1 at high end
|
||||
MOVD $1, R23
|
||||
SLD $3, R21
|
||||
SLD R21, R23, R23
|
||||
OR R23, R17, R17
|
||||
|
||||
// Remainder is 8
|
||||
MOVD $8, R5
|
||||
|
||||
just1:
|
||||
CMP R5, $8
|
||||
BLT less8
|
||||
|
||||
// Exactly 8
|
||||
MOVD (R4), R16
|
||||
|
||||
CMP $0, R17
|
||||
|
||||
// Check if we've already set R17; if not
|
||||
// set 1 to indicate end of msg.
|
||||
BNE carry
|
||||
MOVD $1, R17
|
||||
BR carry
|
||||
|
||||
less8:
|
||||
MOVD $0, R16 // h0
|
||||
MOVD $0, R22 // shift count
|
||||
CMP R5, $4
|
||||
BLT less4
|
||||
MOVWZ (R4), R16
|
||||
ADD $4, R4
|
||||
ADD $-4, R5
|
||||
MOVD $32, R22
|
||||
|
||||
less4:
|
||||
CMP R5, $2
|
||||
BLT less2
|
||||
MOVHZ (R4), R21
|
||||
SLD R22, R21, R21
|
||||
OR R16, R21, R16
|
||||
ADD $16, R22
|
||||
ADD $-2, R5
|
||||
ADD $2, R4
|
||||
|
||||
less2:
|
||||
CMP $0, R5
|
||||
BEQ insert1
|
||||
MOVBZ (R4), R21
|
||||
SLD R22, R21, R21
|
||||
OR R16, R21, R16
|
||||
ADD $8, R22
|
||||
|
||||
insert1:
|
||||
// Insert 1 at end of msg
|
||||
MOVD $1, R21
|
||||
SLD R22, R21, R21
|
||||
OR R16, R21, R16
|
||||
|
||||
carry:
|
||||
// Add new values to h0, h1, h2
|
||||
ADDC R16, R8
|
||||
ADDE R17, R9
|
||||
ADDE $0, R10
|
||||
MOVD $16, R5
|
||||
ADD R5, R4
|
||||
BR multiply
|
||||
|
||||
done:
|
||||
// Save h0, h1, h2 in state
|
||||
MOVD R8, 0(R3)
|
||||
MOVD R9, 8(R3)
|
||||
MOVD R10, 16(R3)
|
||||
RET
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user