[FEATURE/FIX] Viper Configuration and JSON Schema (#472)
This commit is contained in:
parent
34be507c4a
commit
092f26a4e2
3
.golangci.yml
Normal file
3
.golangci.yml
Normal file
@ -0,0 +1,3 @@
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-blank: false # to keep `_ = viper.BindPFlag(...)` from throwing errors
|
29
CHANGELOG.md
29
CHANGELOG.md
@ -1,5 +1,34 @@
|
||||
# Changelog
|
||||
|
||||
## v4.1.0
|
||||
|
||||
### Important
|
||||
|
||||
- Using Viper brings us lots of nice features, but also one problem:
|
||||
- We had to switch StringArray flags to StringSlice flags, which
|
||||
- allow to use multiple flag values comma-separated in a single flag, but also
|
||||
- split flag values that contain a comma into separate parts (and we cannot handle issues that arise due to this)
|
||||
- so if you rely on commas in your flag values (e.g. for `--env X=a,b,c`), please consider filing an issue or supporting <https://github.com/spf13/viper/issues/246> and <https://github.com/spf13/viper/pull/398>
|
||||
- `--env X=a,b,c` would be treated the same as `--env X=a`, `--env b`, `--env c`
|
||||
|
||||
### Features & Enhancements
|
||||
|
||||
- use [viper](https://github.com/spf13/viper) for configuration management
|
||||
- takes over the job of properly fetching and merging config options from
|
||||
- CLI arguments/flags
|
||||
- environment variables
|
||||
- config file
|
||||
- this also fixes some issues with using the config file (like cobra defaults overriding config file values)
|
||||
- add JSON-Schema validation for the `Simple` config file schema
|
||||
- new config version `k3d.io/v1alpha2` (some naming changes)
|
||||
- `exposeAPI` -> `kubeAPI`
|
||||
- `options.k3d.noRollback` -> `options.k3d.disableRollback`
|
||||
- `options.k3d.prepDisableHostIPInjection` -> `options.k3d.disableHostIPInjection`
|
||||
|
||||
### Misc
|
||||
|
||||
- tests/e2e: add config override test
|
||||
|
||||
## v4.0.0
|
||||
|
||||
### Breaking Changes
|
||||
|
@ -30,11 +30,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util"
|
||||
k3dCluster "github.com/rancher/k3d/v4/pkg/client"
|
||||
"github.com/rancher/k3d/v4/pkg/config"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/version"
|
||||
@ -42,6 +44,8 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var configFile string
|
||||
|
||||
const clusterCreateDescription = `
|
||||
Create a new k3s cluster with containerized nodes (k3s in docker).
|
||||
Every cluster will consist of one or more containers:
|
||||
@ -50,58 +54,95 @@ Every cluster will consist of one or more containers:
|
||||
- (optionally) 1 (or more) agent node containers (k3s)
|
||||
`
|
||||
|
||||
// flags that go through some pre-processing before transforming them to config
|
||||
type preProcessedFlags struct {
|
||||
APIPort string
|
||||
Volumes []string
|
||||
Ports []string
|
||||
Labels []string
|
||||
Env []string
|
||||
RegistryUse []string
|
||||
var cfgViper = viper.New()
|
||||
var ppViper = viper.New()
|
||||
|
||||
func initConfig() {
|
||||
|
||||
// Viper for pre-processed config options
|
||||
ppViper.SetEnvPrefix("K3D")
|
||||
|
||||
// viper for the general config (file, env and non pre-processed flags)
|
||||
cfgViper.SetEnvPrefix("K3D")
|
||||
cfgViper.AutomaticEnv()
|
||||
|
||||
cfgViper.SetConfigType("yaml")
|
||||
|
||||
// Set config file, if specified
|
||||
if configFile != "" {
|
||||
cfgViper.SetConfigFile(configFile)
|
||||
|
||||
if _, err := os.Stat(configFile); err != nil {
|
||||
log.Fatalf("Failed to stat config file %s: %+v", configFile, err)
|
||||
}
|
||||
log.Tracef("Schema: %+v", conf.JSONSchema)
|
||||
|
||||
if err := config.ValidateSchemaFile(configFile, []byte(conf.JSONSchema)); err != nil {
|
||||
log.Fatalf("Schema Validation failed for config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
log.Fatalf("Config file %s not found: %+v", configFile, err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
log.Fatalf("Failed to read config file %s: %+v", configFile, err)
|
||||
}
|
||||
|
||||
log.Infof("Using config file %s", cfgViper.ConfigFileUsed())
|
||||
}
|
||||
if log.GetLevel() >= log.DebugLevel {
|
||||
c, _ := yaml.Marshal(cfgViper.AllSettings())
|
||||
log.Debugf("Configuration:\n%s", c)
|
||||
|
||||
c, _ = yaml.Marshal(ppViper.AllSettings())
|
||||
log.Debugf("Additional CLI Configuration:\n%s", c)
|
||||
}
|
||||
}
|
||||
|
||||
// NewCmdClusterCreate returns a new cobra command
|
||||
func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
cliConfig := &conf.SimpleConfig{}
|
||||
var configFile string
|
||||
ppFlags := &preProcessedFlags{}
|
||||
|
||||
// create new command
|
||||
cmd := &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Create a new cluster",
|
||||
Long: clusterCreateDescription,
|
||||
Args: cobra.RangeArgs(0, 1), // exactly one cluster name can be set (default: k3d.DefaultClusterName)
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
initConfig()
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
/*********************
|
||||
* CLI Configuration *
|
||||
*********************/
|
||||
parseCreateClusterCmd(cmd, args, cliConfig, ppFlags)
|
||||
|
||||
/************************
|
||||
* Merge Configurations *
|
||||
************************/
|
||||
log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", cliConfig)
|
||||
|
||||
if configFile != "" {
|
||||
configFromFile, err := config.ReadConfig(configFile)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
cliConfig, err = config.MergeSimple(*cliConfig, configFromFile.(conf.SimpleConfig))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
/*************************
|
||||
* Compute Configuration *
|
||||
*************************/
|
||||
cfg, err := config.FromViperSimple(cfgViper)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", cliConfig)
|
||||
log.Debugf("========== Simple Config ==========\n%+v\n==========================\n", cfg)
|
||||
|
||||
cfg, err = applyCLIOverrides(cfg)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to apply CLI overrides: %+v", err)
|
||||
}
|
||||
|
||||
log.Debugf("========== Merged Simple Config ==========\n%+v\n==========================\n", cfg)
|
||||
|
||||
/**************************************
|
||||
* Transform & Validate Configuration *
|
||||
**************************************/
|
||||
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, *cliConfig)
|
||||
|
||||
// Set the name
|
||||
if len(args) != 0 {
|
||||
cfg.Name = args[0]
|
||||
}
|
||||
|
||||
clusterConfig, err := config.TransformSimpleToClusterConfig(cmd.Context(), runtimes.SelectedRuntime, cfg)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
@ -128,7 +169,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
if err := k3dCluster.ClusterRun(cmd.Context(), runtimes.SelectedRuntime, clusterConfig); err != nil {
|
||||
// rollback if creation failed
|
||||
log.Errorln(err)
|
||||
if cliConfig.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
|
||||
if cfg.Options.K3dOptions.NoRollback { // TODO: move rollback mechanics to pkg/
|
||||
log.Fatalln("Cluster creation FAILED, rollback deactivated.")
|
||||
}
|
||||
// rollback if creation failed
|
||||
@ -152,7 +193,7 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
|
||||
if clusterConfig.KubeconfigOpts.UpdateDefaultKubeconfig {
|
||||
log.Debugf("Updating default kubeconfig with a new context for cluster %s", clusterConfig.Cluster.Name)
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: cliConfig.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
|
||||
if _, err := k3dCluster.KubeconfigGetWrite(cmd.Context(), runtimes.SelectedRuntime, &clusterConfig.Cluster, "", &k3dCluster.WriteKubeConfigOptions{UpdateExisting: true, OverwriteExisting: false, UpdateCurrentContext: cfg.Options.KubeconfigOptions.SwitchCurrentContext}); err != nil {
|
||||
log.Warningln(err)
|
||||
}
|
||||
}
|
||||
@ -176,59 +217,117 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
/*********
|
||||
* Flags *
|
||||
*********/
|
||||
cmd.Flags().StringVar(&ppFlags.APIPort, "api-port", "random", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
|
||||
cmd.Flags().IntVarP(&cliConfig.Servers, "servers", "s", 1, "Specify how many servers you want to create")
|
||||
cmd.Flags().IntVarP(&cliConfig.Agents, "agents", "a", 0, "Specify how many agents you want to create")
|
||||
cmd.Flags().StringVarP(&cliConfig.Image, "image", "i", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)), "Specify k3s image that you want to use for the nodes")
|
||||
cmd.Flags().StringVar(&cliConfig.Network, "network", "", "Join an existing network")
|
||||
cmd.Flags().StringVar(&cliConfig.ClusterToken, "token", "", "Specify a cluster token. By default, we generate one.")
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Volumes, "volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Ports, "port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Labels, "label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -v \"other.label=somevalue@server[0]\"`")
|
||||
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.Wait, "wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||
cmd.Flags().DurationVar(&cliConfig.Options.K3dOptions.Timeout, "timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||
cmd.Flags().BoolVar(&cliConfig.Options.KubeconfigOptions.UpdateDefaultKubeconfig, "kubeconfig-update-default", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||
cmd.Flags().BoolVar(&cliConfig.Options.KubeconfigOptions.SwitchCurrentContext, "kubeconfig-switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default)")
|
||||
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.DisableLoadbalancer, "no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
|
||||
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.NoRollback, "no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
|
||||
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.PrepDisableHostIPInjection, "no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
|
||||
cmd.Flags().StringVar(&cliConfig.Options.Runtime.GPURequest, "gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
|
||||
cmd.Flags().StringArrayVarP(&ppFlags.Env, "env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com\" -e \"SOME_KEY=SOME_VAL@server[0]\"`")
|
||||
/***************
|
||||
* Config File *
|
||||
***************/
|
||||
|
||||
/* Image Importing */
|
||||
cmd.Flags().BoolVar(&cliConfig.Options.K3dOptions.DisableImageVolume, "no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||
|
||||
/* Config File */
|
||||
cmd.Flags().StringVarP(&configFile, "config", "c", "", "Path of a config file to use")
|
||||
if err := cobra.MarkFlagFilename(cmd.Flags(), "config", "yaml", "yml"); err != nil {
|
||||
log.Fatalln("Failed to mark flag 'config' as filename flag")
|
||||
}
|
||||
|
||||
/***********************
|
||||
* Pre-Processed Flags *
|
||||
***********************
|
||||
*
|
||||
* Flags that have a different style in the CLI than their internal representation.
|
||||
* Also, we cannot set (viper) default values just here for those.
|
||||
* Example:
|
||||
* CLI: `--api-port 0.0.0.0:6443`
|
||||
* Config File:
|
||||
* exposeAPI:
|
||||
* hostIP: 0.0.0.0
|
||||
* port: 6443
|
||||
*
|
||||
* Note: here we also use Slice-type flags instead of Array because of https://github.com/spf13/viper/issues/380
|
||||
*/
|
||||
|
||||
cmd.Flags().String("api-port", "", "Specify the Kubernetes API server port exposed on the LoadBalancer (Format: `[HOST:]HOSTPORT`)\n - Example: `k3d cluster create --servers 3 --api-port 0.0.0.0:6550`")
|
||||
_ = ppViper.BindPFlag("cli.api-port", cmd.Flags().Lookup("api-port"))
|
||||
|
||||
cmd.Flags().StringSliceP("env", "e", nil, "Add environment variables to nodes (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -e \"HTTP_PROXY=my.proxy.com\" -e \"SOME_KEY=SOME_VAL@server[0]\"`")
|
||||
_ = ppViper.BindPFlag("cli.env", cmd.Flags().Lookup("env"))
|
||||
|
||||
cmd.Flags().StringSliceP("volume", "v", nil, "Mount volumes into the nodes (Format: `[SOURCE:]DEST[@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -v /my/path@agent[0,1] -v /tmp/test:/tmp/other@server[0]`")
|
||||
_ = ppViper.BindPFlag("cli.volumes", cmd.Flags().Lookup("volume"))
|
||||
|
||||
cmd.Flags().StringSliceP("port", "p", nil, "Map ports from the node containers to the host (Format: `[HOST:][HOSTPORT:]CONTAINERPORT[/PROTOCOL][@NODEFILTER]`)\n - Example: `k3d cluster create --agents 2 -p 8080:80@agent[0] -p 8081@agent[1]`")
|
||||
_ = ppViper.BindPFlag("cli.ports", cmd.Flags().Lookup("port"))
|
||||
|
||||
cmd.Flags().StringSliceP("label", "l", nil, "Add label to node container (Format: `KEY[=VALUE][@NODEFILTER[;NODEFILTER...]]`\n - Example: `k3d cluster create --agents 2 -l \"my.label@agent[0,1]\" -v \"other.label=somevalue@server[0]\"`")
|
||||
_ = ppViper.BindPFlag("cli.labels", cmd.Flags().Lookup("label"))
|
||||
|
||||
/******************
|
||||
* "Normal" Flags *
|
||||
******************
|
||||
*
|
||||
* No pre-processing needed on CLI level.
|
||||
* Bound to Viper config value.
|
||||
* Default Values set via Viper.
|
||||
*/
|
||||
|
||||
cmd.Flags().IntP("servers", "s", 0, "Specify how many servers you want to create")
|
||||
_ = cfgViper.BindPFlag("servers", cmd.Flags().Lookup("servers"))
|
||||
cfgViper.SetDefault("servers", 1)
|
||||
|
||||
cmd.Flags().IntP("agents", "a", 0, "Specify how many agents you want to create")
|
||||
_ = cfgViper.BindPFlag("agents", cmd.Flags().Lookup("agents"))
|
||||
cfgViper.SetDefault("agents", 0)
|
||||
|
||||
cmd.Flags().StringP("image", "i", "", "Specify k3s image that you want to use for the nodes")
|
||||
_ = cfgViper.BindPFlag("image", cmd.Flags().Lookup("image"))
|
||||
cfgViper.SetDefault("image", fmt.Sprintf("%s:%s", k3d.DefaultK3sImageRepo, version.GetK3sVersion(false)))
|
||||
|
||||
cmd.Flags().String("network", "", "Join an existing network")
|
||||
_ = cfgViper.BindPFlag("network", cmd.Flags().Lookup("network"))
|
||||
|
||||
cmd.Flags().String("token", "", "Specify a cluster token. By default, we generate one.")
|
||||
_ = cfgViper.BindPFlag("token", cmd.Flags().Lookup("token"))
|
||||
|
||||
cmd.Flags().Bool("wait", true, "Wait for the server(s) to be ready before returning. Use '--timeout DURATION' to not wait forever.")
|
||||
_ = cfgViper.BindPFlag("options.k3d.wait", cmd.Flags().Lookup("wait"))
|
||||
|
||||
cmd.Flags().Duration("timeout", 0*time.Second, "Rollback changes if cluster couldn't be created in specified duration.")
|
||||
_ = cfgViper.BindPFlag("options.k3d.timeout", cmd.Flags().Lookup("timeout"))
|
||||
|
||||
cmd.Flags().Bool("kubeconfig-update-default", true, "Directly update the default kubeconfig with the new cluster's context")
|
||||
_ = cfgViper.BindPFlag("options.kubeconfig.updatedefaultkubeconfig", cmd.Flags().Lookup("kubeconfig-update-default"))
|
||||
|
||||
cmd.Flags().Bool("kubeconfig-switch-context", true, "Directly switch the default kubeconfig's current-context to the new cluster's context (requires --kubeconfig-update-default)")
|
||||
_ = cfgViper.BindPFlag("options.kubeconfig.switchcurrentcontext", cmd.Flags().Lookup("kubeconfig-switch-context"))
|
||||
|
||||
cmd.Flags().Bool("no-lb", false, "Disable the creation of a LoadBalancer in front of the server nodes")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disableloadbalancer", cmd.Flags().Lookup("no-lb"))
|
||||
|
||||
cmd.Flags().Bool("no-rollback", false, "Disable the automatic rollback actions, if anything goes wrong")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disablerollback", cmd.Flags().Lookup("no-rollback"))
|
||||
|
||||
cmd.Flags().Bool("no-hostip", false, "Disable the automatic injection of the Host IP as 'host.k3d.internal' into the containers and CoreDNS")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disablehostipinjection", cmd.Flags().Lookup("no-hostip"))
|
||||
|
||||
cmd.Flags().String("gpus", "", "GPU devices to add to the cluster node containers ('all' to pass all GPUs) [From docker]")
|
||||
_ = cfgViper.BindPFlag("options.runtime.gpurequest", cmd.Flags().Lookup("gpus"))
|
||||
|
||||
/* Image Importing */
|
||||
cmd.Flags().Bool("no-image-volume", false, "Disable the creation of a volume for importing images")
|
||||
_ = cfgViper.BindPFlag("options.k3d.disableimagevolume", cmd.Flags().Lookup("no-image-volume"))
|
||||
|
||||
/* Registry */
|
||||
cmd.Flags().StringArrayVar(&cliConfig.Registries.Use, "registry-use", nil, "Connect to one or more k3d-managed registries running locally")
|
||||
cmd.Flags().BoolVar(&cliConfig.Registries.Create, "registry-create", false, "Create a k3d-managed registry and connect it to the cluster")
|
||||
cmd.Flags().StringVar(&cliConfig.Registries.Config, "registry-config", "", "Specify path to an extra registries.yaml file")
|
||||
cmd.Flags().StringSlice("registry-use", nil, "Connect to one or more k3d-managed registries running locally")
|
||||
_ = cfgViper.BindPFlag("registries.use", cmd.Flags().Lookup("registry-use"))
|
||||
|
||||
/* Multi Server Configuration */
|
||||
cmd.Flags().Bool("registry-create", false, "Create a k3d-managed registry and connect it to the cluster")
|
||||
_ = cfgViper.BindPFlag("registries.create", cmd.Flags().Lookup("registry-create"))
|
||||
|
||||
// multi-server - datastore
|
||||
// TODO: implement multi-server setups with external data store
|
||||
// cmd.Flags().String("datastore-endpoint", "", "[WIP] Specify external datastore endpoint (e.g. for multi server clusters)")
|
||||
/*
|
||||
cmd.Flags().String("datastore-network", "", "Specify container network where we can find the datastore-endpoint (add a connection)")
|
||||
|
||||
// TODO: set default paths and hint, that one should simply mount the files using --volume flag
|
||||
cmd.Flags().String("datastore-cafile", "", "Specify external datastore's TLS Certificate Authority (CA) file")
|
||||
cmd.Flags().String("datastore-certfile", "", "Specify external datastore's TLS certificate file'")
|
||||
cmd.Flags().String("datastore-keyfile", "", "Specify external datastore's TLS key file'")
|
||||
*/
|
||||
cmd.Flags().String("registry-config", "", "Specify path to an extra registries.yaml file")
|
||||
_ = cfgViper.BindPFlag("registries.config", cmd.Flags().Lookup("registry-config"))
|
||||
|
||||
/* k3s */
|
||||
cmd.Flags().StringArrayVar(&cliConfig.Options.K3sOptions.ExtraServerArgs, "k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
|
||||
cmd.Flags().StringArrayVar(&cliConfig.Options.K3sOptions.ExtraAgentArgs, "k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
|
||||
cmd.Flags().StringArray("k3s-server-arg", nil, "Additional args passed to the `k3s server` command on server nodes (new flag per arg)")
|
||||
_ = cfgViper.BindPFlag("options.k3s.extraserverargs", cmd.Flags().Lookup("k3s-server-arg"))
|
||||
|
||||
cmd.Flags().StringArray("k3s-agent-arg", nil, "Additional args passed to the `k3s agent` command on agent nodes (new flag per arg)")
|
||||
_ = cfgViper.BindPFlag("options.k3s.extraagentargs", cmd.Flags().Lookup("k3s-agent-arg"))
|
||||
|
||||
/* Subcommands */
|
||||
|
||||
@ -236,33 +335,34 @@ func NewCmdClusterCreate() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
// parseCreateClusterCmd parses the command input into variables required to create a cluster
|
||||
func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.SimpleConfig, ppFlags *preProcessedFlags) {
|
||||
|
||||
/********************************
|
||||
* Parse and validate arguments *
|
||||
********************************/
|
||||
|
||||
if len(args) != 0 {
|
||||
cliConfig.Name = args[0]
|
||||
}
|
||||
func applyCLIOverrides(cfg conf.SimpleConfig) (conf.SimpleConfig, error) {
|
||||
|
||||
/****************************
|
||||
* Parse and validate flags *
|
||||
****************************/
|
||||
|
||||
// -> WAIT TIMEOUT // TODO: timeout to be validated in pkg/
|
||||
if cmd.Flags().Changed("timeout") && cliConfig.Options.K3dOptions.Timeout <= 0*time.Second {
|
||||
log.Fatalln("--timeout DURATION must be >= 1s")
|
||||
}
|
||||
|
||||
// -> API-PORT
|
||||
// parse the port mapping
|
||||
exposeAPI, err := cliutil.ParsePortExposureSpec(ppFlags.APIPort, k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
var exposeAPI *k3d.ExposureOpts
|
||||
if ppViper.IsSet("cli.api-port") {
|
||||
if cfg.ExposeAPI.HostPort != "" {
|
||||
log.Debugf("Overriding pre-defined kubeAPI Exposure Spec %+v with CLI argument %s", cfg.ExposeAPI, ppViper.GetString("cli.api-port"))
|
||||
}
|
||||
var err error
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec(ppViper.GetString("cli.api-port"), k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
}
|
||||
cliConfig.ExposeAPI = conf.SimpleExposureOpts{
|
||||
|
||||
if exposeAPI == nil {
|
||||
var err error
|
||||
exposeAPI, err = cliutil.ParsePortExposureSpec("random", k3d.DefaultAPIPort)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
}
|
||||
cfg.ExposeAPI = conf.SimpleExposureOpts{
|
||||
Host: exposeAPI.Host,
|
||||
HostIP: exposeAPI.Binding.HostIP,
|
||||
HostPort: exposeAPI.Binding.HostPort,
|
||||
@ -271,7 +371,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
// -> VOLUMES
|
||||
// volumeFilterMap will map volume mounts to applied node filters
|
||||
volumeFilterMap := make(map[string][]string, 1)
|
||||
for _, volumeFlag := range ppFlags.Volumes {
|
||||
for _, volumeFlag := range ppViper.GetStringSlice("cli.volumes") {
|
||||
|
||||
// split node filter from the specified volume
|
||||
volume, filters, err := cliutil.SplitFiltersFromFlag(volumeFlag)
|
||||
@ -279,7 +379,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cliConfig.Registries.Create || cliConfig.Registries.Config != "" || len(cliConfig.Registries.Use) != 0) {
|
||||
if strings.Contains(volume, k3d.DefaultRegistriesFilePath) && (cfg.Registries.Create || cfg.Registries.Config != "" || len(cfg.Registries.Use) != 0) {
|
||||
log.Warnf("Seems like you're mounting a file at '%s' while also using a referenced registries config or k3d-managed registries: Your mounted file will probably be overwritten!", k3d.DefaultRegistriesFilePath)
|
||||
}
|
||||
|
||||
@ -292,7 +392,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
}
|
||||
|
||||
for volume, nodeFilters := range volumeFilterMap {
|
||||
cliConfig.Volumes = append(cliConfig.Volumes, conf.VolumeWithNodeFilters{
|
||||
cfg.Volumes = append(cfg.Volumes, conf.VolumeWithNodeFilters{
|
||||
Volume: volume,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
@ -302,7 +402,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
|
||||
// -> PORTS
|
||||
portFilterMap := make(map[string][]string, 1)
|
||||
for _, portFlag := range ppFlags.Ports {
|
||||
for _, portFlag := range ppViper.GetStringSlice("cli.ports") {
|
||||
// split node filter from the specified volume
|
||||
portmap, filters, err := cliutil.SplitFiltersFromFlag(portFlag)
|
||||
if err != nil {
|
||||
@ -322,7 +422,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
}
|
||||
|
||||
for port, nodeFilters := range portFilterMap {
|
||||
cliConfig.Ports = append(cliConfig.Ports, conf.PortWithNodeFilters{
|
||||
cfg.Ports = append(cfg.Ports, conf.PortWithNodeFilters{
|
||||
Port: port,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
@ -333,7 +433,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
// --label
|
||||
// labelFilterMap will add container label to applied node filters
|
||||
labelFilterMap := make(map[string][]string, 1)
|
||||
for _, labelFlag := range ppFlags.Labels {
|
||||
for _, labelFlag := range ppViper.GetStringSlice("cli.labels") {
|
||||
|
||||
// split node filter from the specified label
|
||||
label, nodeFilters, err := cliutil.SplitFiltersFromFlag(labelFlag)
|
||||
@ -350,7 +450,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
}
|
||||
|
||||
for label, nodeFilters := range labelFilterMap {
|
||||
cliConfig.Labels = append(cliConfig.Labels, conf.LabelWithNodeFilters{
|
||||
cfg.Labels = append(cfg.Labels, conf.LabelWithNodeFilters{
|
||||
Label: label,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
@ -361,7 +461,7 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
// --env
|
||||
// envFilterMap will add container env vars to applied node filters
|
||||
envFilterMap := make(map[string][]string, 1)
|
||||
for _, envFlag := range ppFlags.Env {
|
||||
for _, envFlag := range ppViper.GetStringSlice("cli.env") {
|
||||
|
||||
// split node filter from the specified env var
|
||||
env, filters, err := cliutil.SplitFiltersFromFlag(envFlag)
|
||||
@ -378,11 +478,13 @@ func parseCreateClusterCmd(cmd *cobra.Command, args []string, cliConfig *conf.Si
|
||||
}
|
||||
|
||||
for envVar, nodeFilters := range envFilterMap {
|
||||
cliConfig.Env = append(cliConfig.Env, conf.EnvVarWithNodeFilters{
|
||||
cfg.Env = append(cfg.Env, conf.EnvVarWithNodeFilters{
|
||||
EnvVar: envVar,
|
||||
NodeFilters: nodeFilters,
|
||||
})
|
||||
}
|
||||
|
||||
log.Tracef("EnvFilterMap: %+v", envFilterMap)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -94,7 +94,6 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initLogging, initRuntime)
|
||||
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.debugLogging, "verbose", false, "Enable verbose output (debug logging)")
|
||||
rootCmd.PersistentFlags().BoolVar(&flags.traceLogging, "trace", false, "Enable super verbose output (trace logging)")
|
||||
@ -119,6 +118,9 @@ func init() {
|
||||
printVersion()
|
||||
},
|
||||
})
|
||||
|
||||
// Init
|
||||
cobra.OnInitialize(initLogging, initRuntime)
|
||||
}
|
||||
|
||||
// initLogging initializes the logger
|
||||
|
@ -41,8 +41,7 @@ func ParsePortExposureSpec(exposedPortSpec, internalPort string) (*k3d.ExposureO
|
||||
match := apiPortRegexp.FindStringSubmatch(exposedPortSpec)
|
||||
|
||||
if len(match) == 0 {
|
||||
log.Errorln("Failed to parse Port Exposure specification")
|
||||
return nil, fmt.Errorf("Port Exposure Spec format error: Must be [(HostIP|HostName):]HostPort")
|
||||
return nil, fmt.Errorf("Failed to parse Port Exposure specification '%s': Format must be [(HostIP|HostName):]HostPort", exposedPortSpec)
|
||||
}
|
||||
|
||||
submatches := util.MapSubexpNames(apiPortRegexp.SubexpNames(), match)
|
||||
|
@ -29,7 +29,7 @@ This file can also be used for providing additional information necessary for ac
|
||||
If you're using a `SimpleConfig` file to configure your k3d cluster, you may as well embed the registries.yaml in there directly:
|
||||
|
||||
```yaml
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Simple
|
||||
name: test
|
||||
servers: 1
|
||||
|
5
go.mod
5
go.mod
@ -39,11 +39,14 @@ require (
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/stretchr/testify v1.6.1 // indirect
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
google.golang.org/grpc v1.34.0 // indirect
|
||||
gopkg.in/ini.v1 v1.58.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gotest.tools v2.2.0+incompatible
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
k8s.io/client-go v0.17.0
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
12
go.sum
12
go.sum
@ -448,6 +448,7 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
||||
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
@ -528,9 +529,16 @@ github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOV
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZyhNuGassLTcXTwjiWq7NmjdavZsUnmFybQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
@ -750,6 +758,8 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
@ -785,4 +795,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/rancher/k3d/v4/pkg/actions"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
config "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
k3drt "github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes/docker"
|
||||
runtimeErr "github.com/rancher/k3d/v4/pkg/runtimes/errors"
|
||||
|
@ -29,36 +29,33 @@ import (
|
||||
|
||||
"github.com/spf13/viper"
|
||||
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
)
|
||||
|
||||
func ReadConfig(file string) (conf.Config, error) {
|
||||
cfgViper := viper.New()
|
||||
func FromViperSimple(config *viper.Viper) (conf.SimpleConfig, error) {
|
||||
|
||||
cfgViper.SetConfigFile(file)
|
||||
var cfg conf.SimpleConfig
|
||||
|
||||
cfgViper.SetConfigType("yaml")
|
||||
cfgViper.SetEnvPrefix(k3d.DefaultObjectNamePrefix)
|
||||
cfgViper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
|
||||
cfgViper.AutomaticEnv()
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := cfgViper.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
log.Errorln("No config file found!")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
// config file found but some other error happened
|
||||
log.Debugf("Failed to read config file: %+v", cfgViper.ConfigFileUsed())
|
||||
return nil, err
|
||||
// determine config kind
|
||||
if config.GetString("kind") != "" && strings.ToLower(config.GetString("kind")) != "simple" {
|
||||
return cfg, fmt.Errorf("Wrong `kind` '%s' != 'simple' in config file", config.GetString("kind"))
|
||||
}
|
||||
|
||||
if err := config.Unmarshal(&cfg); err != nil {
|
||||
log.Errorln("Failed to unmarshal File config")
|
||||
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func FromViper(config *viper.Viper) (conf.Config, error) {
|
||||
|
||||
var cfg conf.Config
|
||||
|
||||
// determine config kind
|
||||
switch strings.ToLower(cfgViper.GetString("kind")) {
|
||||
switch strings.ToLower(config.GetString("kind")) {
|
||||
case "simple":
|
||||
cfg = conf.SimpleConfig{}
|
||||
case "cluster":
|
||||
@ -68,16 +65,14 @@ func ReadConfig(file string) (conf.Config, error) {
|
||||
case "":
|
||||
return nil, fmt.Errorf("Missing `kind` in config file")
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown `kind` '%s' in config file", cfgViper.GetString("kind"))
|
||||
return nil, fmt.Errorf("Unknown `kind` '%s' in config file", config.GetString("kind"))
|
||||
}
|
||||
|
||||
if err := cfgViper.Unmarshal(&cfg); err != nil {
|
||||
if err := config.Unmarshal(&cfg); err != nil {
|
||||
log.Errorln("Failed to unmarshal File config")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Using Config: %s", cfgViper.ConfigFileUsed())
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
@ -26,7 +26,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
)
|
||||
@ -39,7 +40,7 @@ func TestReadSimpleConfig(t *testing.T) {
|
||||
|
||||
expectedConfig := conf.SimpleConfig{
|
||||
TypeMeta: conf.TypeMeta{
|
||||
APIVersion: "k3d.io/v1alpha1",
|
||||
APIVersion: "k3d.io/v1alpha2",
|
||||
Kind: "Simple",
|
||||
},
|
||||
Name: "test",
|
||||
@ -94,20 +95,27 @@ func TestReadSimpleConfig(t *testing.T) {
|
||||
|
||||
cfgFile := "./test_assets/config_test_simple.yaml"
|
||||
|
||||
cfg, err := ReadConfig(cfgFile)
|
||||
config := viper.New()
|
||||
config.SetConfigFile(cfgFile)
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := config.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
t.Error(err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
cfg, err := FromViperSimple(config)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
simpleCfg, ok := cfg.(conf.SimpleConfig)
|
||||
if !ok {
|
||||
t.Error("Config is not of type SimpleConfig")
|
||||
}
|
||||
t.Logf("\n========== Read Config %s ==========\n%+v\n=================================\n", config.ConfigFileUsed(), cfg)
|
||||
|
||||
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", simpleCfg)
|
||||
|
||||
if diff := deep.Equal(simpleCfg, expectedConfig); diff != nil {
|
||||
t.Errorf("Actual representation\n%+v\ndoes not match expected representation\n%+v\nDiff:\n%+v", simpleCfg, expectedConfig, diff)
|
||||
if diff := deep.Equal(cfg, expectedConfig); diff != nil {
|
||||
t.Errorf("Actual representation\n%+v\ndoes not match expected representation\n%+v\nDiff:\n%+v", cfg, expectedConfig, diff)
|
||||
}
|
||||
|
||||
}
|
||||
@ -116,7 +124,7 @@ func TestReadClusterConfig(t *testing.T) {
|
||||
|
||||
expectedConfig := conf.ClusterConfig{
|
||||
TypeMeta: conf.TypeMeta{
|
||||
APIVersion: "k3d.io/v1alpha1",
|
||||
APIVersion: "k3d.io/v1alpha2",
|
||||
Kind: "Cluster",
|
||||
},
|
||||
Cluster: k3d.Cluster{
|
||||
@ -132,9 +140,21 @@ func TestReadClusterConfig(t *testing.T) {
|
||||
|
||||
cfgFile := "./test_assets/config_test_cluster.yaml"
|
||||
|
||||
readConfig, err := ReadConfig(cfgFile)
|
||||
config := viper.New()
|
||||
config.SetConfigFile(cfgFile)
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := config.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
t.Error(err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
readConfig, err := FromViper(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", readConfig)
|
||||
@ -149,7 +169,7 @@ func TestReadClusterListConfig(t *testing.T) {
|
||||
|
||||
expectedConfig := conf.ClusterListConfig{
|
||||
TypeMeta: conf.TypeMeta{
|
||||
APIVersion: "k3d.io/v1alpha1",
|
||||
APIVersion: "k3d.io/v1alpha2",
|
||||
Kind: "ClusterList",
|
||||
},
|
||||
Clusters: []k3d.Cluster{
|
||||
@ -176,9 +196,21 @@ func TestReadClusterListConfig(t *testing.T) {
|
||||
|
||||
cfgFile := "./test_assets/config_test_cluster_list.yaml"
|
||||
|
||||
readConfig, err := ReadConfig(cfgFile)
|
||||
config := viper.New()
|
||||
config.SetConfigFile(cfgFile)
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := config.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
t.Error(err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
readConfig, err := FromViper(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", readConfig)
|
||||
@ -193,7 +225,19 @@ func TestReadUnknownConfig(t *testing.T) {
|
||||
|
||||
cfgFile := "./test_assets/config_test_unknown.yaml"
|
||||
|
||||
_, err := ReadConfig(cfgFile)
|
||||
config := viper.New()
|
||||
config.SetConfigFile(cfgFile)
|
||||
|
||||
// try to read config into memory (viper map structure)
|
||||
if err := config.ReadInConfig(); err != nil {
|
||||
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
|
||||
t.Error(err)
|
||||
}
|
||||
// config file found but some other error happened
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
_, err := FromViperSimple(config)
|
||||
if err == nil {
|
||||
t.Fail()
|
||||
}
|
||||
|
90
pkg/config/jsonschema.go
Normal file
90
pkg/config/jsonschema.go
Normal file
@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"github.com/xeipuuv/gojsonschema"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ValidateSchemaFile takes a filepath, reads the file and validates it against a JSON schema
|
||||
func ValidateSchemaFile(filepath string, schema []byte) error {
|
||||
log.Debugf("Validating file %s against default JSONSchema...", filepath)
|
||||
|
||||
fileContents, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to read file %s: %+v", filepath, err)
|
||||
}
|
||||
|
||||
var content map[string]interface{}
|
||||
if err := yaml.Unmarshal(fileContents, &content); err != nil {
|
||||
return fmt.Errorf("Failed to unmarshal the content of %s to a map: %+v", filepath, err)
|
||||
}
|
||||
|
||||
return ValidateSchema(content, schema)
|
||||
}
|
||||
|
||||
// ValidateSchema validates a YAML construct (non-struct representation) against a JSON Schema
|
||||
func ValidateSchema(content map[string]interface{}, schemaJSON []byte) error {
|
||||
|
||||
contentYaml, err := yaml.Marshal(content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contentJSON, err := yaml.YAMLToJSON(contentYaml)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bytes.Equal(contentJSON, []byte("null")) {
|
||||
contentJSON = []byte("{}") // non-json yaml struct
|
||||
}
|
||||
|
||||
configLoader := gojsonschema.NewBytesLoader(contentJSON)
|
||||
schemaLoader := gojsonschema.NewBytesLoader(schemaJSON)
|
||||
|
||||
result, err := gojsonschema.Validate(schemaLoader, configLoader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("JSON Schema Validation Result: %+v", result)
|
||||
|
||||
if !result.Valid() {
|
||||
var sb strings.Builder
|
||||
for _, desc := range result.Errors() {
|
||||
sb.WriteString(fmt.Sprintf("- %s\n", desc))
|
||||
}
|
||||
return errors.New(sb.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
75
pkg/config/jsonschema_test.go
Normal file
75
pkg/config/jsonschema_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
)
|
||||
|
||||
// TestEnsureHardcodedSchemaMatchesFile ensures that the JSONSchema hardcoded in the config package matches the corresponding file
|
||||
/*
|
||||
* TODO: as soon as we move to Go 1.16, the file will be embedded using //go:embed and we can drop this test
|
||||
*/
|
||||
func TestEnsureHardcodedSchemaMatchesFile(t *testing.T) {
|
||||
schemaFilePath := "./v1alpha2/schema.json"
|
||||
schemaContents, err := ioutil.ReadFile(schemaFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read schema file %s: %+v", schemaFilePath, err)
|
||||
}
|
||||
|
||||
if bytes.Compare([]byte(v1alpha2.JSONSchema), schemaContents) != 0 {
|
||||
t.Errorf("Schema file %s does not match hardcoded schema!", schemaFilePath)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestValidateSchema(t *testing.T) {
|
||||
|
||||
cfgPath := "./test_assets/config_test_simple.yaml"
|
||||
|
||||
if err := ValidateSchemaFile(cfgPath, []byte(v1alpha2.JSONSchema)); err != nil {
|
||||
t.Errorf("Validation of config file %s against the default schema failed: %+v", cfgPath, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestValidateSchemaFail(t *testing.T) {
|
||||
|
||||
cfgPath := "./test_assets/config_test_simple_invalid_servers.yaml"
|
||||
|
||||
var err error
|
||||
if err = ValidateSchemaFile(cfgPath, []byte(v1alpha2.JSONSchema)); err == nil {
|
||||
t.Errorf("Validation of config file %s against the default schema passed where we expected a failure", cfgPath)
|
||||
}
|
||||
|
||||
expectedErrorText := `- name: Invalid type. Expected: string, given: integer
|
||||
`
|
||||
|
||||
if err.Error() != expectedErrorText {
|
||||
t.Errorf("Actual validation error\n%s\ndoes not match expected error\n%s\n", err.Error(), expectedErrorText)
|
||||
}
|
||||
|
||||
}
|
@ -24,7 +24,7 @@ package config
|
||||
|
||||
import (
|
||||
"github.com/imdario/mergo"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
@ -25,7 +25,8 @@ package config
|
||||
import (
|
||||
"testing"
|
||||
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
"github.com/spf13/viper"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
@ -36,11 +37,19 @@ func TestMergeSimpleConfig(t *testing.T) {
|
||||
var src, dest conf.Config
|
||||
var err error
|
||||
|
||||
if src, err = ReadConfig(srcConfig); err != nil {
|
||||
cfg1 := viper.New()
|
||||
cfg1.SetConfigFile(srcConfig)
|
||||
_ = cfg1.ReadInConfig()
|
||||
|
||||
cfg2 := viper.New()
|
||||
cfg2.SetConfigFile(destConfig)
|
||||
_ = cfg2.ReadInConfig()
|
||||
|
||||
if src, err = FromViperSimple(cfg1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if dest, err = ReadConfig(destConfig); err != nil {
|
||||
if dest, err = FromViperSimple(cfg2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Cluster
|
||||
name: foo
|
||||
nodes:
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: ClusterList
|
||||
clusters:
|
||||
- name: foo
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Simple
|
||||
name: test
|
||||
servers: 1
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Simple
|
||||
name: supertest
|
||||
agents: 8
|
@ -0,0 +1,43 @@
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Simple
|
||||
name: 1234
|
||||
servers: 1
|
||||
agents: 2
|
||||
kubeAPI:
|
||||
hostIP: "0.0.0.0"
|
||||
hostPort: "6443"
|
||||
image: rancher/k3s:latest
|
||||
volumes:
|
||||
- volume: /my/path:/some/path
|
||||
nodeFilters:
|
||||
- all
|
||||
ports:
|
||||
- port: 80:80
|
||||
nodeFilters:
|
||||
- loadbalancer
|
||||
- port: 0.0.0.0:443:443
|
||||
nodeFilters:
|
||||
- loadbalancer
|
||||
env:
|
||||
- envVar: bar=baz
|
||||
nodeFilters:
|
||||
- all
|
||||
labels:
|
||||
- label: foo=bar
|
||||
nodeFilters:
|
||||
- server[0]
|
||||
- loadbalancer
|
||||
|
||||
options:
|
||||
k3d:
|
||||
wait: true
|
||||
timeout: "60s"
|
||||
disableLoadbalancer: false
|
||||
disableImageVolume: false
|
||||
k3s:
|
||||
extraServerArgs:
|
||||
- --tls-san=127.0.0.1
|
||||
extraAgentArgs: []
|
||||
kubeconfig:
|
||||
updateDefaultKubeconfig: true
|
||||
switchCurrentContext: true
|
@ -1,3 +1,3 @@
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Unknown
|
||||
foo: bar
|
@ -31,7 +31,7 @@ import (
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
cliutil "github.com/rancher/k3d/v4/cmd/util" // TODO: move parseapiport to pkg
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/types/k3s"
|
||||
@ -93,6 +93,8 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
newCluster.ServerLoadBalancer = &k3d.Node{
|
||||
Role: k3d.LoadBalancerRole,
|
||||
}
|
||||
} else {
|
||||
log.Debugln("Disabling the load balancer")
|
||||
}
|
||||
|
||||
/*************
|
||||
@ -137,7 +139,7 @@ func TransformSimpleToClusterConfig(ctx context.Context, runtime runtimes.Runtim
|
||||
nodeList = append(nodeList, newCluster.ServerLoadBalancer)
|
||||
}
|
||||
for _, volumeWithNodeFilters := range simpleConfig.Volumes {
|
||||
nodes, err := util.FilterNodes(newCluster.Nodes, volumeWithNodeFilters.NodeFilters)
|
||||
nodes, err := util.FilterNodes(nodeList, volumeWithNodeFilters.NodeFilters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -26,26 +26,25 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func TestTransformSimpleConfigToClusterConfig(t *testing.T) {
|
||||
cfgFile := "./test_assets/config_test_simple.yaml"
|
||||
|
||||
cfg, err := ReadConfig(cfgFile)
|
||||
vip := viper.New()
|
||||
vip.SetConfigFile(cfgFile)
|
||||
_ = vip.ReadInConfig()
|
||||
|
||||
cfg, err := FromViperSimple(vip)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
simpleCfg, ok := cfg.(conf.SimpleConfig)
|
||||
if !ok {
|
||||
t.Error("Config is not of type SimpleConfig")
|
||||
}
|
||||
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", cfg)
|
||||
|
||||
t.Logf("\n========== Read Config ==========\n%+v\n=================================\n", simpleCfg)
|
||||
|
||||
clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, simpleCfg)
|
||||
clusterCfg, err := TransformSimpleToClusterConfig(context.Background(), runtimes.Docker, cfg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
237
pkg/config/v1alpha2/schema.go
Normal file
237
pkg/config/v1alpha2/schema.go
Normal file
@ -0,0 +1,237 @@
|
||||
/*
|
||||
Copyright © 2020 The k3d Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
// JSONSchema describes the schema used to validate config files
|
||||
/* TODO: JSONSchema should be an embedded file. We're moving to the //go:embed tag as of Go 1.16
|
||||
* ... and only hardcode it here to avoid using 3rd party tools like go-bindata or packr right now for the time being
|
||||
*/
|
||||
var JSONSchema = `{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "SimpleConfig",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind"
|
||||
],
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"k3d.io/v1alpha2"
|
||||
]
|
||||
},
|
||||
"kind": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Simple"
|
||||
]
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the cluster (must be a valid hostname and will be prefixed with 'k3d-'). Example: 'mycluster'.",
|
||||
"type": "string",
|
||||
"format": "hostname"
|
||||
},
|
||||
"servers": {
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
},
|
||||
"agents": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
"kubeAPI": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"format": "hostname"
|
||||
},
|
||||
"hostIP": {
|
||||
"type": "string",
|
||||
"format": "ipv4"
|
||||
},
|
||||
"hostPort": {
|
||||
"type":"string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"image": {
|
||||
"type": "string"
|
||||
},
|
||||
"network": {
|
||||
"type": "string"
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
},
|
||||
"volumes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"volume": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"port": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"label": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"options": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"k3d": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"wait": {
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
},
|
||||
"timeout": {
|
||||
"type": "string"
|
||||
},
|
||||
"disableLoadbalancer": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"disableImageVolume": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"disableRollback": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"disableHostIPInjection": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"k3s": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"extraServerArgs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"extraAgentArgs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"kubeconfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"updateDefaultKubeconfig": {
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
},
|
||||
"switchCurrentContext": {
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"runtime": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"gpuRequest": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"env": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"envVar": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"registries": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"definitions": {
|
||||
"nodeFilters": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
209
pkg/config/v1alpha2/schema.json
Normal file
209
pkg/config/v1alpha2/schema.json
Normal file
@ -0,0 +1,209 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "SimpleConfig",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind"
|
||||
],
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"k3d.io/v1alpha2"
|
||||
]
|
||||
},
|
||||
"kind": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"Simple"
|
||||
]
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the cluster (must be a valid hostname and will be prefixed with 'k3d-'). Example: 'mycluster'.",
|
||||
"type": "string",
|
||||
"format": "hostname"
|
||||
},
|
||||
"servers": {
|
||||
"type": "number",
|
||||
"minimum": 1
|
||||
},
|
||||
"agents": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
"kubeAPI": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"format": "hostname"
|
||||
},
|
||||
"hostIP": {
|
||||
"type": "string",
|
||||
"format": "ipv4"
|
||||
},
|
||||
"hostPort": {
|
||||
"type":"string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"image": {
|
||||
"type": "string"
|
||||
},
|
||||
"network": {
|
||||
"type": "string"
|
||||
},
|
||||
"token": {
|
||||
"type": "string"
|
||||
},
|
||||
"volumes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"volume": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"port": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"label": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"options": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"k3d": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"wait": {
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
},
|
||||
"timeout": {
|
||||
"type": "string"
|
||||
},
|
||||
"disableLoadbalancer": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"disableImageVolume": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"disableRollback": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"disableHostIPInjection": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"k3s": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"extraServerArgs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"extraAgentArgs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"kubeconfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"updateDefaultKubeconfig": {
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
},
|
||||
"switchCurrentContext": {
|
||||
"type": "boolean",
|
||||
"default": true
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"runtime": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"gpuRequest": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"env": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"envVar": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeFilters": {
|
||||
"$ref": "#/definitions/nodeFilters"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"registries": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"definitions": {
|
||||
"nodeFilters": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -20,7 +20,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -32,7 +32,7 @@ import (
|
||||
|
||||
// DefaultConfigTpl for printing
|
||||
const DefaultConfigTpl = `---
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Simple
|
||||
name: %s
|
||||
servers: 1
|
||||
@ -100,8 +100,8 @@ type SimpleConfigOptionsK3d struct {
|
||||
Timeout time.Duration `mapstructure:"timeout" yaml:"timeout"`
|
||||
DisableLoadbalancer bool `mapstructure:"disableLoadbalancer" yaml:"disableLoadbalancer"`
|
||||
DisableImageVolume bool `mapstructure:"disableImageVolume" yaml:"disableImageVolume"`
|
||||
NoRollback bool `mapstructure:"noRollback" yaml:"noRollback"`
|
||||
PrepDisableHostIPInjection bool `mapstructure:"prepDisableHostIPInjection" yaml:"prepDisableHostIPInjection"`
|
||||
NoRollback bool `mapstructure:"disableRollback" yaml:"disableRollback"`
|
||||
PrepDisableHostIPInjection bool `mapstructure:"disableHostIPInjection" yaml:"disableHostIPInjection"`
|
||||
NodeHookActions []k3d.NodeHookAction `mapstructure:"nodeHookActions" yaml:"nodeHookActions,omitempty"`
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ type SimpleConfig struct {
|
||||
ExposeAPI SimpleExposureOpts `mapstructure:"kubeAPI" yaml:"kubeAPI" json:"kubeAPI,omitempty"`
|
||||
Image string `mapstructure:"image" yaml:"image" json:"image,omitempty"`
|
||||
Network string `mapstructure:"network" yaml:"network" json:"network,omitempty"`
|
||||
ClusterToken string `mapstructure:"clusterToken" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated
|
||||
ClusterToken string `mapstructure:"token" yaml:"clusterToken" json:"clusterToken,omitempty"` // default: auto-generated
|
||||
Volumes []VolumeWithNodeFilters `mapstructure:"volumes" yaml:"volumes" json:"volumes,omitempty"`
|
||||
Ports []PortWithNodeFilters `mapstructure:"ports" yaml:"ports" json:"ports,omitempty"`
|
||||
Labels []LabelWithNodeFilters `mapstructure:"labels" yaml:"labels" json:"labels,omitempty"`
|
@ -27,7 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
k3dc "github.com/rancher/k3d/v4/pkg/client"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
k3d "github.com/rancher/k3d/v4/pkg/types"
|
||||
"github.com/rancher/k3d/v4/pkg/util"
|
||||
|
@ -26,14 +26,19 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha1"
|
||||
conf "github.com/rancher/k3d/v4/pkg/config/v1alpha2"
|
||||
"github.com/rancher/k3d/v4/pkg/runtimes"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func TestValidateClusterConfig(t *testing.T) {
|
||||
cfgFile := "./test_assets/config_test_cluster.yaml"
|
||||
|
||||
cfg, err := ReadConfig(cfgFile)
|
||||
vip := viper.New()
|
||||
vip.SetConfigFile(cfgFile)
|
||||
_ = vip.ReadInConfig()
|
||||
|
||||
cfg, err := FromViper(vip)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ func FilterNodes(nodes []*k3d.Node, filters []string) ([]*k3d.Node, error) {
|
||||
agentNodes := []*k3d.Node{}
|
||||
var serverlb *k3d.Node
|
||||
for _, node := range nodes {
|
||||
log.Tracef("FilterNodes (%+v): Checking node role %s", filters, node.Role)
|
||||
if node.Role == k3d.ServerRole {
|
||||
serverNodes = append(serverNodes, node)
|
||||
} else if node.Role == k3d.AgentRole {
|
||||
|
@ -1,11 +1,11 @@
|
||||
apiVersion: k3d.io/v1alpha1
|
||||
apiVersion: k3d.io/v1alpha2
|
||||
kind: Simple
|
||||
name: test
|
||||
servers: 1
|
||||
servers: 3
|
||||
agents: 2
|
||||
exposeAPI:
|
||||
kubeAPI:
|
||||
hostIP: "0.0.0.0"
|
||||
port: "6443"
|
||||
hostPort: "6446"
|
||||
image: rancher/k3s:latest
|
||||
volumes:
|
||||
- volume: /my/path:/some/path
|
||||
|
@ -30,8 +30,8 @@ sleep 5
|
||||
info "Checking that we have access to the cluster..."
|
||||
check_clusters "$clustername" || failed "error checking cluster"
|
||||
|
||||
info "Checking that we have 3 nodes online..."
|
||||
check_multi_node "$clustername" 3 || failed "failed to verify number of nodes"
|
||||
info "Checking that we have 5 nodes online..."
|
||||
check_multi_node "$clustername" 5 || failed "failed to verify number of nodes"
|
||||
|
||||
# 2. check some config settings
|
||||
|
||||
|
65
tests/test_config_with_overrides.sh
Executable file
65
tests/test_config_with_overrides.sh
Executable file
@ -0,0 +1,65 @@
|
||||
#!/bin/bash
|
||||
|
||||
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
[ -d "$CURR_DIR" ] || { echo "FATAL: no current dir (maybe running in zsh?)"; exit 1; }
|
||||
|
||||
# shellcheck source=./common.sh
|
||||
source "$CURR_DIR/common.sh"
|
||||
|
||||
|
||||
: "${EXTRA_FLAG:=""}"
|
||||
: "${EXTRA_TITLE:=""}"
|
||||
|
||||
if [[ -n "$K3S_IMAGE_TAG" ]]; then
|
||||
EXTRA_FLAG="--image rancher/k3s:$K3S_IMAGE_TAG"
|
||||
EXTRA_TITLE="(rancher/k3s:$K3S_IMAGE_TAG)"
|
||||
fi
|
||||
|
||||
|
||||
clustername="cfgoverridetest"
|
||||
|
||||
highlight "[START] Config With Override $EXTRA_TITLE"
|
||||
|
||||
info "Creating cluster $clustername..."
|
||||
$EXE cluster create "$clustername" --config "$CURR_DIR/assets/config_test_simple.yaml" --servers 4 -v /tmp/test:/tmp/test@loadbalancer --registry-create=false --env "x=y@agent[1]" $EXTRA_FLAG || failed "could not create cluster $clustername $EXTRA_TITLE"
|
||||
|
||||
info "Sleeping for 5 seconds to give the cluster enough time to get ready..."
|
||||
sleep 5
|
||||
|
||||
# 1. check initial access to the cluster
|
||||
info "Checking that we have access to the cluster..."
|
||||
check_clusters "$clustername" || failed "error checking cluster"
|
||||
|
||||
info "Checking that we have 6 nodes online..."
|
||||
check_multi_node "$clustername" 6 || failed "failed to verify number of nodes"
|
||||
|
||||
# 2. check some config settings
|
||||
|
||||
## Environment Variables
|
||||
info "Ensuring that environment variables are present in the node containers as set in the config and overrides"
|
||||
exec_in_node "k3d-$clustername-server-0" "env" | grep "bar=baz" || failed "Expected env var 'bar=baz' is not present in node k3d-$clustername-server-0"
|
||||
exec_in_node "k3d-$clustername-agent-1" "env" | grep "x=y" || failed "Expected env var 'x=y' is not present in node k3d-$clustername-agent-1"
|
||||
|
||||
## Container Labels
|
||||
info "Ensuring that container labels have been set as stated in the config"
|
||||
docker_assert_container_label "k3d-$clustername-server-0" "foo=bar" || failed "Expected label 'foo=bar' not present on container/node k3d-$clustername-server-0"
|
||||
|
||||
## Registry Node
|
||||
info "Ensuring, that we DO NOT have a registry node present"
|
||||
$EXE node list "k3d-$clustername-registry" && failed "Expected k3d-$clustername-registry to NOT be present"
|
||||
|
||||
## merged registries.yaml
|
||||
info "Ensuring, that the registries.yaml file contains both registries"
|
||||
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "my.company.registry" || failed "Expected 'my.company.registry' to be in the /etc/rancher/k3s/registries.yaml"
|
||||
exec_in_node "k3d-$clustername-server-0" "cat /etc/rancher/k3s/registries.yaml" | grep -i "k3d-$clustername-registry" && failed "Expected 'k3d-$clustername-registry' to NOT be in the /etc/rancher/k3s/registries.yaml"
|
||||
|
||||
# Cleanup
|
||||
|
||||
info "Deleting cluster $clustername..."
|
||||
$EXE cluster delete "$clustername" || failed "could not delete the cluster $clustername"
|
||||
|
||||
highlight "[DONE] ConfigTest $EXTRA_TITLE"
|
||||
|
||||
exit 0
|
||||
|
||||
|
202
vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
generated
vendored
Normal file
202
vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2015 xeipuuv
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
41
vendor/github.com/xeipuuv/gojsonpointer/README.md
generated
vendored
Normal file
41
vendor/github.com/xeipuuv/gojsonpointer/README.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
# gojsonpointer
|
||||
An implementation of JSON Pointer - Go language
|
||||
|
||||
## Usage
|
||||
jsonText := `{
|
||||
"name": "Bobby B",
|
||||
"occupation": {
|
||||
"title" : "King",
|
||||
"years" : 15,
|
||||
"heir" : "Joffrey B"
|
||||
}
|
||||
}`
|
||||
|
||||
var jsonDocument map[string]interface{}
|
||||
json.Unmarshal([]byte(jsonText), &jsonDocument)
|
||||
|
||||
//create a JSON pointer
|
||||
pointerString := "/occupation/title"
|
||||
pointer, _ := NewJsonPointer(pointerString)
|
||||
|
||||
//SET a new value for the "title" in the document
|
||||
pointer.Set(jsonDocument, "Supreme Leader of Westeros")
|
||||
|
||||
//GET the new "title" from the document
|
||||
title, _, _ := pointer.Get(jsonDocument)
|
||||
fmt.Println(title) //outputs "Supreme Leader of Westeros"
|
||||
|
||||
//DELETE the "heir" from the document
|
||||
deletePointer := NewJsonPointer("/occupation/heir")
|
||||
deletePointer.Delete(jsonDocument)
|
||||
|
||||
b, _ := json.Marshal(jsonDocument)
|
||||
fmt.Println(string(b))
|
||||
//outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}`
|
||||
|
||||
|
||||
## References
|
||||
https://tools.ietf.org/html/rfc6901
|
||||
|
||||
### Note
|
||||
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
|
211
vendor/github.com/xeipuuv/gojsonpointer/pointer.go
generated
vendored
Normal file
211
vendor/github.com/xeipuuv/gojsonpointer/pointer.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonpointer
|
||||
// repository-desc An implementation of JSON Pointer - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 25-02-2013
|
||||
|
||||
package gojsonpointer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
const_empty_pointer = ``
|
||||
const_pointer_separator = `/`
|
||||
|
||||
const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
|
||||
)
|
||||
|
||||
type implStruct struct {
|
||||
mode string // "SET" or "GET"
|
||||
|
||||
inDocument interface{}
|
||||
|
||||
setInValue interface{}
|
||||
|
||||
getOutNode interface{}
|
||||
getOutKind reflect.Kind
|
||||
outError error
|
||||
}
|
||||
|
||||
type JsonPointer struct {
|
||||
referenceTokens []string
|
||||
}
|
||||
|
||||
// NewJsonPointer parses the given string JSON pointer and returns an object
|
||||
func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) {
|
||||
|
||||
// Pointer to the root of the document
|
||||
if len(jsonPointerString) == 0 {
|
||||
// Keep referenceTokens nil
|
||||
return
|
||||
}
|
||||
if jsonPointerString[0] != '/' {
|
||||
return p, errors.New(const_invalid_start)
|
||||
}
|
||||
|
||||
p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator)
|
||||
return
|
||||
}
|
||||
|
||||
// Uses the pointer to retrieve a value from a JSON document
|
||||
func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
|
||||
|
||||
is := &implStruct{mode: "GET", inDocument: document}
|
||||
p.implementation(is)
|
||||
return is.getOutNode, is.getOutKind, is.outError
|
||||
|
||||
}
|
||||
|
||||
// Uses the pointer to update a value from a JSON document
|
||||
func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
|
||||
|
||||
is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
|
||||
p.implementation(is)
|
||||
return document, is.outError
|
||||
|
||||
}
|
||||
|
||||
// Uses the pointer to delete a value from a JSON document
|
||||
func (p *JsonPointer) Delete(document interface{}) (interface{}, error) {
|
||||
is := &implStruct{mode: "DEL", inDocument: document}
|
||||
p.implementation(is)
|
||||
return document, is.outError
|
||||
}
|
||||
|
||||
// Both Get and Set functions use the same implementation to avoid code duplication
|
||||
func (p *JsonPointer) implementation(i *implStruct) {
|
||||
|
||||
kind := reflect.Invalid
|
||||
|
||||
// Full document when empty
|
||||
if len(p.referenceTokens) == 0 {
|
||||
i.getOutNode = i.inDocument
|
||||
i.outError = nil
|
||||
i.getOutKind = kind
|
||||
i.outError = nil
|
||||
return
|
||||
}
|
||||
|
||||
node := i.inDocument
|
||||
|
||||
previousNodes := make([]interface{}, len(p.referenceTokens))
|
||||
previousTokens := make([]string, len(p.referenceTokens))
|
||||
|
||||
for ti, token := range p.referenceTokens {
|
||||
|
||||
isLastToken := ti == len(p.referenceTokens)-1
|
||||
previousNodes[ti] = node
|
||||
previousTokens[ti] = token
|
||||
|
||||
switch v := node.(type) {
|
||||
|
||||
case map[string]interface{}:
|
||||
decodedToken := decodeReferenceToken(token)
|
||||
if _, ok := v[decodedToken]; ok {
|
||||
node = v[decodedToken]
|
||||
if isLastToken && i.mode == "SET" {
|
||||
v[decodedToken] = i.setInValue
|
||||
} else if isLastToken && i.mode == "DEL" {
|
||||
delete(v, decodedToken)
|
||||
}
|
||||
} else if isLastToken && i.mode == "SET" {
|
||||
v[decodedToken] = i.setInValue
|
||||
} else {
|
||||
i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
|
||||
i.getOutKind = reflect.Map
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
|
||||
case []interface{}:
|
||||
tokenIndex, err := strconv.Atoi(token)
|
||||
if err != nil {
|
||||
i.outError = fmt.Errorf("Invalid array index '%s'", token)
|
||||
i.getOutKind = reflect.Slice
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
if tokenIndex < 0 || tokenIndex >= len(v) {
|
||||
i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex)
|
||||
i.getOutKind = reflect.Slice
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
|
||||
node = v[tokenIndex]
|
||||
if isLastToken && i.mode == "SET" {
|
||||
v[tokenIndex] = i.setInValue
|
||||
} else if isLastToken && i.mode == "DEL" {
|
||||
v[tokenIndex] = v[len(v)-1]
|
||||
v[len(v)-1] = nil
|
||||
v = v[:len(v)-1]
|
||||
previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v
|
||||
}
|
||||
|
||||
default:
|
||||
i.outError = fmt.Errorf("Invalid token reference '%s'", token)
|
||||
i.getOutKind = reflect.ValueOf(node).Kind()
|
||||
i.getOutNode = nil
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
i.getOutNode = node
|
||||
i.getOutKind = reflect.ValueOf(node).Kind()
|
||||
i.outError = nil
|
||||
}
|
||||
|
||||
// Pointer to string representation function
|
||||
func (p *JsonPointer) String() string {
|
||||
|
||||
if len(p.referenceTokens) == 0 {
|
||||
return const_empty_pointer
|
||||
}
|
||||
|
||||
pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
|
||||
|
||||
return pointerString
|
||||
}
|
||||
|
||||
// Specific JSON pointer encoding here
|
||||
// ~0 => ~
|
||||
// ~1 => /
|
||||
// ... and vice versa
|
||||
|
||||
func decodeReferenceToken(token string) string {
|
||||
step1 := strings.Replace(token, `~1`, `/`, -1)
|
||||
step2 := strings.Replace(step1, `~0`, `~`, -1)
|
||||
return step2
|
||||
}
|
||||
|
||||
func encodeReferenceToken(token string) string {
|
||||
step1 := strings.Replace(token, `~`, `~0`, -1)
|
||||
step2 := strings.Replace(step1, `/`, `~1`, -1)
|
||||
return step2
|
||||
}
|
202
vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
generated
vendored
Normal file
202
vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2015 xeipuuv
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
10
vendor/github.com/xeipuuv/gojsonreference/README.md
generated
vendored
Normal file
10
vendor/github.com/xeipuuv/gojsonreference/README.md
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# gojsonreference
|
||||
An implementation of JSON Reference - Go language
|
||||
|
||||
## Dependencies
|
||||
https://github.com/xeipuuv/gojsonpointer
|
||||
|
||||
## References
|
||||
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
|
||||
|
||||
http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
|
147
vendor/github.com/xeipuuv/gojsonreference/reference.go
generated
vendored
Normal file
147
vendor/github.com/xeipuuv/gojsonreference/reference.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonreference
|
||||
// repository-desc An implementation of JSON Reference - Go language
|
||||
//
|
||||
// description Main and unique file.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package gojsonreference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/xeipuuv/gojsonpointer"
|
||||
)
|
||||
|
||||
const (
|
||||
const_fragment_char = `#`
|
||||
)
|
||||
|
||||
func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
|
||||
|
||||
var r JsonReference
|
||||
err := r.parse(jsonReferenceString)
|
||||
return r, err
|
||||
|
||||
}
|
||||
|
||||
type JsonReference struct {
|
||||
referenceUrl *url.URL
|
||||
referencePointer gojsonpointer.JsonPointer
|
||||
|
||||
HasFullUrl bool
|
||||
HasUrlPathOnly bool
|
||||
HasFragmentOnly bool
|
||||
HasFileScheme bool
|
||||
HasFullFilePath bool
|
||||
}
|
||||
|
||||
func (r *JsonReference) GetUrl() *url.URL {
|
||||
return r.referenceUrl
|
||||
}
|
||||
|
||||
func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
|
||||
return &r.referencePointer
|
||||
}
|
||||
|
||||
func (r *JsonReference) String() string {
|
||||
|
||||
if r.referenceUrl != nil {
|
||||
return r.referenceUrl.String()
|
||||
}
|
||||
|
||||
if r.HasFragmentOnly {
|
||||
return const_fragment_char + r.referencePointer.String()
|
||||
}
|
||||
|
||||
return r.referencePointer.String()
|
||||
}
|
||||
|
||||
func (r *JsonReference) IsCanonical() bool {
|
||||
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
|
||||
}
|
||||
|
||||
// "Constructor", parses the given string JSON reference
|
||||
func (r *JsonReference) parse(jsonReferenceString string) (err error) {
|
||||
|
||||
r.referenceUrl, err = url.Parse(jsonReferenceString)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
refUrl := r.referenceUrl
|
||||
|
||||
if refUrl.Scheme != "" && refUrl.Host != "" {
|
||||
r.HasFullUrl = true
|
||||
} else {
|
||||
if refUrl.Path != "" {
|
||||
r.HasUrlPathOnly = true
|
||||
} else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
|
||||
r.HasFragmentOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
r.HasFileScheme = refUrl.Scheme == "file"
|
||||
if runtime.GOOS == "windows" {
|
||||
// on Windows, a file URL may have an extra leading slash, and if it
|
||||
// doesn't then its first component will be treated as the host by the
|
||||
// Go runtime
|
||||
if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
|
||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
|
||||
} else {
|
||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
|
||||
}
|
||||
} else {
|
||||
r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
|
||||
}
|
||||
|
||||
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
|
||||
r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Creates a new reference from a parent and a child
|
||||
// If the child cannot inherit from the parent, an error is returned
|
||||
func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
|
||||
if child.GetUrl() == nil {
|
||||
return nil, errors.New("childUrl is nil!")
|
||||
}
|
||||
|
||||
if r.GetUrl() == nil {
|
||||
return nil, errors.New("parentUrl is nil!")
|
||||
}
|
||||
|
||||
// Get a copy of the parent url to make sure we do not modify the original.
|
||||
// URL reference resolving fails if the fragment of the child is empty, but the parent's is not.
|
||||
// The fragment of the child must be used, so the fragment of the parent is manually removed.
|
||||
parentUrl := *r.GetUrl()
|
||||
parentUrl.Fragment = ""
|
||||
|
||||
ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ref, err
|
||||
}
|
3
vendor/github.com/xeipuuv/gojsonschema/.gitignore
generated
vendored
Normal file
3
vendor/github.com/xeipuuv/gojsonschema/.gitignore
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
*.sw[nop]
|
||||
*.iml
|
||||
.vscode/
|
9
vendor/github.com/xeipuuv/gojsonschema/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/xeipuuv/gojsonschema/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.11"
|
||||
- "1.12"
|
||||
- "1.13"
|
||||
before_install:
|
||||
- go get github.com/xeipuuv/gojsonreference
|
||||
- go get github.com/xeipuuv/gojsonpointer
|
||||
- go get github.com/stretchr/testify/assert
|
202
vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
generated
vendored
Normal file
202
vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2015 xeipuuv
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
466
vendor/github.com/xeipuuv/gojsonschema/README.md
generated
vendored
Normal file
466
vendor/github.com/xeipuuv/gojsonschema/README.md
generated
vendored
Normal file
@ -0,0 +1,466 @@
|
||||
[](https://godoc.org/github.com/xeipuuv/gojsonschema)
|
||||
[](https://travis-ci.org/xeipuuv/gojsonschema)
|
||||
[](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema)
|
||||
|
||||
# gojsonschema
|
||||
|
||||
## Description
|
||||
|
||||
An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07.
|
||||
|
||||
References :
|
||||
|
||||
* http://json-schema.org
|
||||
* http://json-schema.org/latest/json-schema-core.html
|
||||
* http://json-schema.org/latest/json-schema-validation.html
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
go get github.com/xeipuuv/gojsonschema
|
||||
```
|
||||
|
||||
Dependencies :
|
||||
* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer)
|
||||
* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference)
|
||||
* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package)
|
||||
|
||||
## Usage
|
||||
|
||||
### Example
|
||||
|
||||
```go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/xeipuuv/gojsonschema"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
|
||||
documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json")
|
||||
|
||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
if result.Valid() {
|
||||
fmt.Printf("The document is valid\n")
|
||||
} else {
|
||||
fmt.Printf("The document is not valid. see errors :\n")
|
||||
for _, desc := range result.Errors() {
|
||||
fmt.Printf("- %s\n", desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
```
|
||||
|
||||
#### Loaders
|
||||
|
||||
There are various ways to load your JSON data.
|
||||
In order to load your schemas and documents,
|
||||
first declare an appropriate loader :
|
||||
|
||||
* Web / HTTP, using a reference :
|
||||
|
||||
```go
|
||||
loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json")
|
||||
```
|
||||
|
||||
* Local file, using a reference :
|
||||
|
||||
```go
|
||||
loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
|
||||
```
|
||||
|
||||
References use the URI scheme, the prefix (file://) and a full path to the file are required.
|
||||
|
||||
* JSON strings :
|
||||
|
||||
```go
|
||||
loader := gojsonschema.NewStringLoader(`{"type": "string"}`)
|
||||
```
|
||||
|
||||
* Custom Go types :
|
||||
|
||||
```go
|
||||
m := map[string]interface{}{"type": "string"}
|
||||
loader := gojsonschema.NewGoLoader(m)
|
||||
```
|
||||
|
||||
And
|
||||
|
||||
```go
|
||||
type Root struct {
|
||||
Users []User `json:"users"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
...
|
||||
|
||||
data := Root{}
|
||||
data.Users = append(data.Users, User{"John"})
|
||||
data.Users = append(data.Users, User{"Sophia"})
|
||||
data.Users = append(data.Users, User{"Bill"})
|
||||
|
||||
loader := gojsonschema.NewGoLoader(data)
|
||||
```
|
||||
|
||||
#### Validation
|
||||
|
||||
Once the loaders are set, validation is easy :
|
||||
|
||||
```go
|
||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
||||
```
|
||||
|
||||
Alternatively, you might want to load a schema only once and process to multiple validations :
|
||||
|
||||
```go
|
||||
schema, err := gojsonschema.NewSchema(schemaLoader)
|
||||
...
|
||||
result1, err := schema.Validate(documentLoader1)
|
||||
...
|
||||
result2, err := schema.Validate(documentLoader2)
|
||||
...
|
||||
// etc ...
|
||||
```
|
||||
|
||||
To check the result :
|
||||
|
||||
```go
|
||||
if result.Valid() {
|
||||
fmt.Printf("The document is valid\n")
|
||||
} else {
|
||||
fmt.Printf("The document is not valid. see errors :\n")
|
||||
for _, err := range result.Errors() {
|
||||
// Err implements the ResultError interface
|
||||
fmt.Printf("- %s\n", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Loading local schemas
|
||||
|
||||
By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`.
|
||||
|
||||
```go
|
||||
sl := gojsonschema.NewSchemaLoader()
|
||||
loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`)
|
||||
err := sl.AddSchema("http://some_host.com/string.json", loader1)
|
||||
```
|
||||
|
||||
Alternatively if your schema already has an `$id` you can use the `AddSchemas` function
|
||||
```go
|
||||
loader2 := gojsonschema.NewStringLoader(`{
|
||||
"$id" : "http://some_host.com/maxlength.json",
|
||||
"maxLength" : 5
|
||||
}`)
|
||||
err = sl.AddSchemas(loader2)
|
||||
```
|
||||
|
||||
The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them.
|
||||
```go
|
||||
loader3 := gojsonschema.NewStringLoader(`{
|
||||
"$id" : "http://some_host.com/main.json",
|
||||
"allOf" : [
|
||||
{ "$ref" : "http://some_host.com/string.json" },
|
||||
{ "$ref" : "http://some_host.com/maxlength.json" }
|
||||
]
|
||||
}`)
|
||||
|
||||
schema, err := sl.Compile(loader3)
|
||||
|
||||
documentLoader := gojsonschema.NewStringLoader(`"hello world"`)
|
||||
|
||||
result, err := schema.Validate(documentLoader)
|
||||
```
|
||||
|
||||
It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema.
|
||||
|
||||
```go
|
||||
err = sl.AddSchemas(loader3)
|
||||
schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json"))
|
||||
```
|
||||
|
||||
Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used.
|
||||
|
||||
## Using a specific draft
|
||||
By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode.
|
||||
|
||||
Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property.
|
||||
|
||||
```go
|
||||
sl := gojsonschema.NewSchemaLoader()
|
||||
sl.Draft = gojsonschema.Draft7
|
||||
sl.AutoDetect = false
|
||||
```
|
||||
|
||||
If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas.
|
||||
|
||||
## Meta-schema validation
|
||||
Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property.
|
||||
|
||||
The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step.
|
||||
|
||||
```go
|
||||
sl := gojsonschema.NewSchemaLoader()
|
||||
sl.Validate = true
|
||||
err := sl.AddSchemas(gojsonschema.NewStringLoader(`{
|
||||
$id" : "http://some_host.com/invalid.json",
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"multipleOf" : true
|
||||
}`))
|
||||
```
|
||||
```
|
||||
```
|
||||
|
||||
Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema.
|
||||
|
||||
Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used.
|
||||
|
||||
|
||||
## Working with Errors
|
||||
|
||||
The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it
|
||||
```go
|
||||
gojsonschema.Locale = YourCustomLocale{}
|
||||
```
|
||||
|
||||
However, each error contains additional contextual information.
|
||||
|
||||
Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens.
|
||||
|
||||
**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below
|
||||
|
||||
Note: An error of RequiredType has an err.Type() return value of "required"
|
||||
|
||||
"required": RequiredError
|
||||
"invalid_type": InvalidTypeError
|
||||
"number_any_of": NumberAnyOfError
|
||||
"number_one_of": NumberOneOfError
|
||||
"number_all_of": NumberAllOfError
|
||||
"number_not": NumberNotError
|
||||
"missing_dependency": MissingDependencyError
|
||||
"internal": InternalError
|
||||
"const": ConstEror
|
||||
"enum": EnumError
|
||||
"array_no_additional_items": ArrayNoAdditionalItemsError
|
||||
"array_min_items": ArrayMinItemsError
|
||||
"array_max_items": ArrayMaxItemsError
|
||||
"unique": ItemsMustBeUniqueError
|
||||
"contains" : ArrayContainsError
|
||||
"array_min_properties": ArrayMinPropertiesError
|
||||
"array_max_properties": ArrayMaxPropertiesError
|
||||
"additional_property_not_allowed": AdditionalPropertyNotAllowedError
|
||||
"invalid_property_pattern": InvalidPropertyPatternError
|
||||
"invalid_property_name": InvalidPropertyNameError
|
||||
"string_gte": StringLengthGTEError
|
||||
"string_lte": StringLengthLTEError
|
||||
"pattern": DoesNotMatchPatternError
|
||||
"multiple_of": MultipleOfError
|
||||
"number_gte": NumberGTEError
|
||||
"number_gt": NumberGTError
|
||||
"number_lte": NumberLTEError
|
||||
"number_lt": NumberLTError
|
||||
"condition_then" : ConditionThenError
|
||||
"condition_else" : ConditionElseError
|
||||
|
||||
**err.Value()**: *interface{}* Returns the value given
|
||||
|
||||
**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
|
||||
|
||||
**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
|
||||
|
||||
**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
|
||||
|
||||
**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result.
|
||||
|
||||
**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
|
||||
|
||||
Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e.
|
||||
```
|
||||
{{.field}} must be greater than or equal to {{.min}}
|
||||
```
|
||||
|
||||
The library allows you to specify custom template functions, should you require more complex error message handling.
|
||||
```go
|
||||
gojsonschema.ErrorTemplateFuncs = map[string]interface{}{
|
||||
"allcaps": func(s string) string {
|
||||
return strings.ToUpper(s)
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Given the above definition, you can use the custom function `"allcaps"` in your localization templates:
|
||||
```
|
||||
{{allcaps .field}} must be greater than or equal to {{.min}}
|
||||
```
|
||||
|
||||
The above error message would then be rendered with the `field` value in capital letters. For example:
|
||||
```
|
||||
"PASSWORD must be greater than or equal to 8"
|
||||
```
|
||||
|
||||
Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type.
|
||||
|
||||
## Formats
|
||||
JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this:
|
||||
|
||||
````json
|
||||
{"type": "string", "format": "email"}
|
||||
````
|
||||
|
||||
Not all formats defined in draft-07 are available. Implemented formats are:
|
||||
|
||||
* `date`
|
||||
* `time`
|
||||
* `date-time`
|
||||
* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames.
|
||||
* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support.
|
||||
* `idn-email`. Same caveat as `email`.
|
||||
* `ipv4`
|
||||
* `ipv6`
|
||||
* `uri`. Includes unicode support.
|
||||
* `uri-reference`. Includes unicode support.
|
||||
* `iri`
|
||||
* `iri-reference`
|
||||
* `uri-template`
|
||||
* `uuid`
|
||||
* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible.
|
||||
* `json-pointer`
|
||||
* `relative-json-pointer`
|
||||
|
||||
`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific
|
||||
unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats.
|
||||
|
||||
The validation code for `uri`, `idn-email` and their relatives use mostly standard library code.
|
||||
|
||||
For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this:
|
||||
|
||||
```go
|
||||
// Define the format checker
|
||||
type RoleFormatChecker struct {}
|
||||
|
||||
// Ensure it meets the gojsonschema.FormatChecker interface
|
||||
func (f RoleFormatChecker) IsFormat(input interface{}) bool {
|
||||
|
||||
asString, ok := input.(string)
|
||||
if ok == false {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.HasPrefix("ROLE_", asString)
|
||||
}
|
||||
|
||||
// Add it to the library
|
||||
gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{})
|
||||
````
|
||||
|
||||
Now to use in your json schema:
|
||||
````json
|
||||
{"type": "string", "format": "role"}
|
||||
````
|
||||
|
||||
Another example would be to check if the provided integer matches an id on database:
|
||||
|
||||
JSON schema:
|
||||
```json
|
||||
{"type": "integer", "format": "ValidUserId"}
|
||||
```
|
||||
|
||||
```go
|
||||
// Define the format checker
|
||||
type ValidUserIdFormatChecker struct {}
|
||||
|
||||
// Ensure it meets the gojsonschema.FormatChecker interface
|
||||
func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool {
|
||||
|
||||
asFloat64, ok := input.(float64) // Numbers are always float64 here
|
||||
if ok == false {
|
||||
return false
|
||||
}
|
||||
|
||||
// XXX
|
||||
// do the magic on the database looking for the int(asFloat64)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Add it to the library
|
||||
gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{})
|
||||
````
|
||||
|
||||
Formats can also be removed, for example if you want to override one of the formats that is defined by default.
|
||||
|
||||
```go
|
||||
gojsonschema.FormatCheckers.Remove("hostname")
|
||||
```
|
||||
|
||||
|
||||
## Additional custom validation
|
||||
After the validation has run and you have the results, you may add additional
|
||||
errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead
|
||||
of having to add special exceptions for your own errors. Below is an example.
|
||||
|
||||
```go
|
||||
type AnswerInvalidError struct {
|
||||
gojsonschema.ResultErrorFields
|
||||
}
|
||||
|
||||
func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError {
|
||||
err := AnswerInvalidError{}
|
||||
err.SetContext(context)
|
||||
err.SetType("custom_invalid_error")
|
||||
// it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed
|
||||
// using the description of err will be overridden by this.
|
||||
err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}")
|
||||
err.SetValue(value)
|
||||
err.SetDetails(details)
|
||||
|
||||
return &err
|
||||
}
|
||||
|
||||
func main() {
|
||||
// ...
|
||||
schema, err := gojsonschema.NewSchema(schemaLoader)
|
||||
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
|
||||
|
||||
if true { // some validation
|
||||
jsonContext := gojsonschema.NewJsonContext("question", nil)
|
||||
errDetail := gojsonschema.ErrorDetails{
|
||||
"answer": 42,
|
||||
}
|
||||
result.AddError(
|
||||
newAnswerInvalidError(
|
||||
gojsonschema.NewJsonContext("answer", jsonContext),
|
||||
52,
|
||||
errDetail,
|
||||
),
|
||||
errDetail,
|
||||
)
|
||||
}
|
||||
|
||||
return result, err
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
This is especially useful if you want to add validation beyond what the
|
||||
json schema drafts can provide such business specific logic.
|
||||
|
||||
## Uses
|
||||
|
||||
gojsonschema uses the following test suite :
|
||||
|
||||
https://github.com/json-schema/JSON-Schema-Test-Suite
|
125
vendor/github.com/xeipuuv/gojsonschema/draft.go
generated
vendored
Normal file
125
vendor/github.com/xeipuuv/gojsonschema/draft.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright 2018 johandorland ( https://github.com/johandorland )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"reflect"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
// Draft is a JSON-schema draft version
|
||||
type Draft int
|
||||
|
||||
// Supported Draft versions
|
||||
const (
|
||||
Draft4 Draft = 4
|
||||
Draft6 Draft = 6
|
||||
Draft7 Draft = 7
|
||||
Hybrid Draft = math.MaxInt32
|
||||
)
|
||||
|
||||
type draftConfig struct {
|
||||
Version Draft
|
||||
MetaSchemaURL string
|
||||
MetaSchema string
|
||||
}
|
||||
type draftConfigs []draftConfig
|
||||
|
||||
var drafts draftConfigs
|
||||
|
||||
func init() {
|
||||
drafts = []draftConfig{
|
||||
{
|
||||
Version: Draft4,
|
||||
MetaSchemaURL: "http://json-schema.org/draft-04/schema",
|
||||
MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`,
|
||||
},
|
||||
{
|
||||
Version: Draft6,
|
||||
MetaSchemaURL: "http://json-schema.org/draft-06/schema",
|
||||
MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`,
|
||||
},
|
||||
{
|
||||
Version: Draft7,
|
||||
MetaSchemaURL: "http://json-schema.org/draft-07/schema",
|
||||
MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (dc draftConfigs) GetMetaSchema(url string) string {
|
||||
for _, config := range dc {
|
||||
if config.MetaSchemaURL == url {
|
||||
return config.MetaSchema
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (dc draftConfigs) GetDraftVersion(url string) *Draft {
|
||||
for _, config := range dc {
|
||||
if config.MetaSchemaURL == url {
|
||||
return &config.Version
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (dc draftConfigs) GetSchemaURL(draft Draft) string {
|
||||
for _, config := range dc {
|
||||
if config.Version == draft {
|
||||
return config.MetaSchemaURL
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func parseSchemaURL(documentNode interface{}) (string, *Draft, error) {
|
||||
|
||||
if isKind(documentNode, reflect.Bool) {
|
||||
return "", nil, nil
|
||||
}
|
||||
|
||||
if !isKind(documentNode, reflect.Map) {
|
||||
return "", nil, errors.New("schema is invalid")
|
||||
}
|
||||
|
||||
m := documentNode.(map[string]interface{})
|
||||
|
||||
if existsMapKey(m, KEY_SCHEMA) {
|
||||
if !isKind(m[KEY_SCHEMA], reflect.String) {
|
||||
return "", nil, errors.New(formatErrorDescription(
|
||||
Locale.MustBeOfType(),
|
||||
ErrorDetails{
|
||||
"key": KEY_SCHEMA,
|
||||
"type": TYPE_STRING,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string))
|
||||
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
schema := schemaReference.String()
|
||||
|
||||
return schema, drafts.GetDraftVersion(schema), nil
|
||||
}
|
||||
|
||||
return "", nil, nil
|
||||
}
|
364
vendor/github.com/xeipuuv/gojsonschema/errors.go
generated
vendored
Normal file
364
vendor/github.com/xeipuuv/gojsonschema/errors.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}}
|
||||
|
||||
// template.Template is not thread-safe for writing, so some locking is done
|
||||
// sync.RWMutex is used for efficiently locking when new templates are created
|
||||
type errorTemplate struct {
|
||||
*template.Template
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type (
|
||||
|
||||
// FalseError. ErrorDetails: -
|
||||
FalseError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// RequiredError indicates that a required field is missing
|
||||
// ErrorDetails: property string
|
||||
RequiredError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// InvalidTypeError indicates that a field has the incorrect type
|
||||
// ErrorDetails: expected, given
|
||||
InvalidTypeError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberAnyOfError is produced in case of a failing "anyOf" validation
|
||||
// ErrorDetails: -
|
||||
NumberAnyOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberOneOfError is produced in case of a failing "oneOf" validation
|
||||
// ErrorDetails: -
|
||||
NumberOneOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberAllOfError is produced in case of a failing "allOf" validation
|
||||
// ErrorDetails: -
|
||||
NumberAllOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberNotError is produced if a "not" validation failed
|
||||
// ErrorDetails: -
|
||||
NumberNotError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// MissingDependencyError is produced in case of a "missing dependency" problem
|
||||
// ErrorDetails: dependency
|
||||
MissingDependencyError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// InternalError indicates an internal error
|
||||
// ErrorDetails: error
|
||||
InternalError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ConstError indicates a const error
|
||||
// ErrorDetails: allowed
|
||||
ConstError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// EnumError indicates an enum error
|
||||
// ErrorDetails: allowed
|
||||
EnumError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed
|
||||
// ErrorDetails: -
|
||||
ArrayNoAdditionalItemsError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMinItemsError is produced if an array contains less items than the allowed minimum
|
||||
// ErrorDetails: min
|
||||
ArrayMinItemsError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMaxItemsError is produced if an array contains more items than the allowed maximum
|
||||
// ErrorDetails: max
|
||||
ArrayMaxItemsError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items
|
||||
// ErrorDetails: type, i, j
|
||||
ItemsMustBeUniqueError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayContainsError is produced if an array contains invalid items
|
||||
// ErrorDetails:
|
||||
ArrayContainsError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum
|
||||
// ErrorDetails: min
|
||||
ArrayMinPropertiesError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum
|
||||
// ErrorDetails: max
|
||||
ArrayMaxPropertiesError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed
|
||||
// ErrorDetails: property
|
||||
AdditionalPropertyNotAllowedError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// InvalidPropertyPatternError is produced if an pattern was found
|
||||
// ErrorDetails: property, pattern
|
||||
InvalidPropertyPatternError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// InvalidPropertyNameError is produced if an invalid-named property was found
|
||||
// ErrorDetails: property
|
||||
InvalidPropertyNameError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// StringLengthGTEError is produced if a string is shorter than the minimum required length
|
||||
// ErrorDetails: min
|
||||
StringLengthGTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// StringLengthLTEError is produced if a string is longer than the maximum allowed length
|
||||
// ErrorDetails: max
|
||||
StringLengthLTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// DoesNotMatchPatternError is produced if a string does not match the defined pattern
|
||||
// ErrorDetails: pattern
|
||||
DoesNotMatchPatternError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// DoesNotMatchFormatError is produced if a string does not match the defined format
|
||||
// ErrorDetails: format
|
||||
DoesNotMatchFormatError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// MultipleOfError is produced if a number is not a multiple of the defined multipleOf
|
||||
// ErrorDetails: multiple
|
||||
MultipleOfError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberGTEError is produced if a number is lower than the allowed minimum
|
||||
// ErrorDetails: min
|
||||
NumberGTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set
|
||||
// ErrorDetails: min
|
||||
NumberGTError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberLTEError is produced if a number is higher than the allowed maximum
|
||||
// ErrorDetails: max
|
||||
NumberLTEError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set
|
||||
// ErrorDetails: max
|
||||
NumberLTError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ConditionThenError is produced if a condition's "then" validation is invalid
|
||||
// ErrorDetails: -
|
||||
ConditionThenError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
|
||||
// ConditionElseError is produced if a condition's "else" condition is invalid
|
||||
// ErrorDetails: -
|
||||
ConditionElseError struct {
|
||||
ResultErrorFields
|
||||
}
|
||||
)
|
||||
|
||||
// newError takes a ResultError type and sets the type, context, description, details, value, and field
|
||||
func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) {
|
||||
var t string
|
||||
var d string
|
||||
switch err.(type) {
|
||||
case *FalseError:
|
||||
t = "false"
|
||||
d = locale.False()
|
||||
case *RequiredError:
|
||||
t = "required"
|
||||
d = locale.Required()
|
||||
case *InvalidTypeError:
|
||||
t = "invalid_type"
|
||||
d = locale.InvalidType()
|
||||
case *NumberAnyOfError:
|
||||
t = "number_any_of"
|
||||
d = locale.NumberAnyOf()
|
||||
case *NumberOneOfError:
|
||||
t = "number_one_of"
|
||||
d = locale.NumberOneOf()
|
||||
case *NumberAllOfError:
|
||||
t = "number_all_of"
|
||||
d = locale.NumberAllOf()
|
||||
case *NumberNotError:
|
||||
t = "number_not"
|
||||
d = locale.NumberNot()
|
||||
case *MissingDependencyError:
|
||||
t = "missing_dependency"
|
||||
d = locale.MissingDependency()
|
||||
case *InternalError:
|
||||
t = "internal"
|
||||
d = locale.Internal()
|
||||
case *ConstError:
|
||||
t = "const"
|
||||
d = locale.Const()
|
||||
case *EnumError:
|
||||
t = "enum"
|
||||
d = locale.Enum()
|
||||
case *ArrayNoAdditionalItemsError:
|
||||
t = "array_no_additional_items"
|
||||
d = locale.ArrayNoAdditionalItems()
|
||||
case *ArrayMinItemsError:
|
||||
t = "array_min_items"
|
||||
d = locale.ArrayMinItems()
|
||||
case *ArrayMaxItemsError:
|
||||
t = "array_max_items"
|
||||
d = locale.ArrayMaxItems()
|
||||
case *ItemsMustBeUniqueError:
|
||||
t = "unique"
|
||||
d = locale.Unique()
|
||||
case *ArrayContainsError:
|
||||
t = "contains"
|
||||
d = locale.ArrayContains()
|
||||
case *ArrayMinPropertiesError:
|
||||
t = "array_min_properties"
|
||||
d = locale.ArrayMinProperties()
|
||||
case *ArrayMaxPropertiesError:
|
||||
t = "array_max_properties"
|
||||
d = locale.ArrayMaxProperties()
|
||||
case *AdditionalPropertyNotAllowedError:
|
||||
t = "additional_property_not_allowed"
|
||||
d = locale.AdditionalPropertyNotAllowed()
|
||||
case *InvalidPropertyPatternError:
|
||||
t = "invalid_property_pattern"
|
||||
d = locale.InvalidPropertyPattern()
|
||||
case *InvalidPropertyNameError:
|
||||
t = "invalid_property_name"
|
||||
d = locale.InvalidPropertyName()
|
||||
case *StringLengthGTEError:
|
||||
t = "string_gte"
|
||||
d = locale.StringGTE()
|
||||
case *StringLengthLTEError:
|
||||
t = "string_lte"
|
||||
d = locale.StringLTE()
|
||||
case *DoesNotMatchPatternError:
|
||||
t = "pattern"
|
||||
d = locale.DoesNotMatchPattern()
|
||||
case *DoesNotMatchFormatError:
|
||||
t = "format"
|
||||
d = locale.DoesNotMatchFormat()
|
||||
case *MultipleOfError:
|
||||
t = "multiple_of"
|
||||
d = locale.MultipleOf()
|
||||
case *NumberGTEError:
|
||||
t = "number_gte"
|
||||
d = locale.NumberGTE()
|
||||
case *NumberGTError:
|
||||
t = "number_gt"
|
||||
d = locale.NumberGT()
|
||||
case *NumberLTEError:
|
||||
t = "number_lte"
|
||||
d = locale.NumberLTE()
|
||||
case *NumberLTError:
|
||||
t = "number_lt"
|
||||
d = locale.NumberLT()
|
||||
case *ConditionThenError:
|
||||
t = "condition_then"
|
||||
d = locale.ConditionThen()
|
||||
case *ConditionElseError:
|
||||
t = "condition_else"
|
||||
d = locale.ConditionElse()
|
||||
}
|
||||
|
||||
err.SetType(t)
|
||||
err.SetContext(context)
|
||||
err.SetValue(value)
|
||||
err.SetDetails(details)
|
||||
err.SetDescriptionFormat(d)
|
||||
details["field"] = err.Field()
|
||||
|
||||
if _, exists := details["context"]; !exists && context != nil {
|
||||
details["context"] = context.String()
|
||||
}
|
||||
|
||||
err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
|
||||
}
|
||||
|
||||
// formatErrorDescription takes a string in the default text/template
|
||||
// format and converts it to a string with replacements. The fields come
|
||||
// from the ErrorDetails struct and vary for each type of error.
|
||||
func formatErrorDescription(s string, details ErrorDetails) string {
|
||||
|
||||
var tpl *template.Template
|
||||
var descrAsBuffer bytes.Buffer
|
||||
var err error
|
||||
|
||||
errorTemplates.RLock()
|
||||
tpl = errorTemplates.Lookup(s)
|
||||
errorTemplates.RUnlock()
|
||||
|
||||
if tpl == nil {
|
||||
errorTemplates.Lock()
|
||||
tpl = errorTemplates.New(s)
|
||||
|
||||
if ErrorTemplateFuncs != nil {
|
||||
tpl.Funcs(ErrorTemplateFuncs)
|
||||
}
|
||||
|
||||
tpl, err = tpl.Parse(s)
|
||||
errorTemplates.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
err = tpl.Execute(&descrAsBuffer, details)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
return descrAsBuffer.String()
|
||||
}
|
368
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
generated
vendored
Normal file
368
vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
generated
vendored
Normal file
@ -0,0 +1,368 @@
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
|
||||
FormatChecker interface {
|
||||
// IsFormat checks if input has the correct format and type
|
||||
IsFormat(input interface{}) bool
|
||||
}
|
||||
|
||||
// FormatCheckerChain holds the formatters
|
||||
FormatCheckerChain struct {
|
||||
formatters map[string]FormatChecker
|
||||
}
|
||||
|
||||
// EmailFormatChecker verifies email address formats
|
||||
EmailFormatChecker struct{}
|
||||
|
||||
// IPV4FormatChecker verifies IP addresses in the IPv4 format
|
||||
IPV4FormatChecker struct{}
|
||||
|
||||
// IPV6FormatChecker verifies IP addresses in the IPv6 format
|
||||
IPV6FormatChecker struct{}
|
||||
|
||||
// DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
|
||||
//
|
||||
// Valid formats:
|
||||
// Partial Time: HH:MM:SS
|
||||
// Full Date: YYYY-MM-DD
|
||||
// Full Time: HH:MM:SSZ-07:00
|
||||
// Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
|
||||
//
|
||||
// Where
|
||||
// YYYY = 4DIGIT year
|
||||
// MM = 2DIGIT month ; 01-12
|
||||
// DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
// HH = 2DIGIT hour ; 00-23
|
||||
// MM = 2DIGIT ; 00-59
|
||||
// SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
|
||||
// T = Literal
|
||||
// Z = Literal
|
||||
//
|
||||
// Note: Nanoseconds are also suported in all formats
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc3339#section-5.6
|
||||
DateTimeFormatChecker struct{}
|
||||
|
||||
// DateFormatChecker verifies date formats
|
||||
//
|
||||
// Valid format:
|
||||
// Full Date: YYYY-MM-DD
|
||||
//
|
||||
// Where
|
||||
// YYYY = 4DIGIT year
|
||||
// MM = 2DIGIT month ; 01-12
|
||||
// DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
|
||||
DateFormatChecker struct{}
|
||||
|
||||
// TimeFormatChecker verifies time formats
|
||||
//
|
||||
// Valid formats:
|
||||
// Partial Time: HH:MM:SS
|
||||
// Full Time: HH:MM:SSZ-07:00
|
||||
//
|
||||
// Where
|
||||
// HH = 2DIGIT hour ; 00-23
|
||||
// MM = 2DIGIT ; 00-59
|
||||
// SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
|
||||
// T = Literal
|
||||
// Z = Literal
|
||||
TimeFormatChecker struct{}
|
||||
|
||||
// URIFormatChecker validates a URI with a valid Scheme per RFC3986
|
||||
URIFormatChecker struct{}
|
||||
|
||||
// URIReferenceFormatChecker validates a URI or relative-reference per RFC3986
|
||||
URIReferenceFormatChecker struct{}
|
||||
|
||||
// URITemplateFormatChecker validates a URI template per RFC6570
|
||||
URITemplateFormatChecker struct{}
|
||||
|
||||
// HostnameFormatChecker validates a hostname is in the correct format
|
||||
HostnameFormatChecker struct{}
|
||||
|
||||
// UUIDFormatChecker validates a UUID is in the correct format
|
||||
UUIDFormatChecker struct{}
|
||||
|
||||
// RegexFormatChecker validates a regex is in the correct format
|
||||
RegexFormatChecker struct{}
|
||||
|
||||
// JSONPointerFormatChecker validates a JSON Pointer per RFC6901
|
||||
JSONPointerFormatChecker struct{}
|
||||
|
||||
// RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format
|
||||
RelativeJSONPointerFormatChecker struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
// FormatCheckers holds the valid formatters, and is a public variable
|
||||
// so library users can add custom formatters
|
||||
FormatCheckers = FormatCheckerChain{
|
||||
formatters: map[string]FormatChecker{
|
||||
"date": DateFormatChecker{},
|
||||
"time": TimeFormatChecker{},
|
||||
"date-time": DateTimeFormatChecker{},
|
||||
"hostname": HostnameFormatChecker{},
|
||||
"email": EmailFormatChecker{},
|
||||
"idn-email": EmailFormatChecker{},
|
||||
"ipv4": IPV4FormatChecker{},
|
||||
"ipv6": IPV6FormatChecker{},
|
||||
"uri": URIFormatChecker{},
|
||||
"uri-reference": URIReferenceFormatChecker{},
|
||||
"iri": URIFormatChecker{},
|
||||
"iri-reference": URIReferenceFormatChecker{},
|
||||
"uri-template": URITemplateFormatChecker{},
|
||||
"uuid": UUIDFormatChecker{},
|
||||
"regex": RegexFormatChecker{},
|
||||
"json-pointer": JSONPointerFormatChecker{},
|
||||
"relative-json-pointer": RelativeJSONPointerFormatChecker{},
|
||||
},
|
||||
}
|
||||
|
||||
// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
|
||||
rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
|
||||
|
||||
// Use a regex to make sure curly brackets are balanced properly after validating it as a AURI
|
||||
rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$")
|
||||
|
||||
rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
|
||||
|
||||
rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$")
|
||||
|
||||
rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$")
|
||||
|
||||
lock = new(sync.RWMutex)
|
||||
)
|
||||
|
||||
// Add adds a FormatChecker to the FormatCheckerChain
|
||||
// The name used will be the value used for the format key in your json schema
|
||||
func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
|
||||
lock.Lock()
|
||||
c.formatters[name] = f
|
||||
lock.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
|
||||
func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
|
||||
lock.Lock()
|
||||
delete(c.formatters, name)
|
||||
lock.Unlock()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
|
||||
func (c *FormatCheckerChain) Has(name string) bool {
|
||||
lock.RLock()
|
||||
_, ok := c.formatters[name]
|
||||
lock.RUnlock()
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsFormat will check an input against a FormatChecker with the given name
|
||||
// to see if it is the correct format
|
||||
func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
|
||||
lock.RLock()
|
||||
f, ok := c.formatters[name]
|
||||
lock.RUnlock()
|
||||
|
||||
// If a format is unrecognized it should always pass validation
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return f.IsFormat(input)
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted e-mail address
|
||||
func (f EmailFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := mail.ParseAddress(asString)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted IPv4-address
|
||||
func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Credit: https://github.com/asaskevich/govalidator
|
||||
ip := net.ParseIP(asString)
|
||||
return ip != nil && strings.Contains(asString, ".")
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted IPv6=address
|
||||
func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Credit: https://github.com/asaskevich/govalidator
|
||||
ip := net.ParseIP(asString)
|
||||
return ip != nil && strings.Contains(asString, ":")
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6
|
||||
func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
formats := []string{
|
||||
"15:04:05",
|
||||
"15:04:05Z07:00",
|
||||
"2006-01-02",
|
||||
time.RFC3339,
|
||||
time.RFC3339Nano,
|
||||
}
|
||||
|
||||
for _, format := range formats {
|
||||
if _, err := time.Parse(format, asString); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD)
|
||||
func (f DateFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, err := time.Parse("2006-01-02", asString)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00)
|
||||
func (f TimeFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, err := time.Parse("15:04:05Z07:00", asString); err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
_, err := time.Parse("15:04:05", asString)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986
|
||||
func (f URIFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
u, err := url.Parse(asString)
|
||||
|
||||
if err != nil || u.Scheme == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
return !strings.Contains(asString, `\`)
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986
|
||||
func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := url.Parse(asString)
|
||||
return err == nil && !strings.Contains(asString, `\`)
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted URI template per RFC6570
|
||||
func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
u, err := url.Parse(asString)
|
||||
if err != nil || strings.Contains(asString, `\`) {
|
||||
return false
|
||||
}
|
||||
|
||||
return rxURITemplate.MatchString(u.Path)
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted hostname
|
||||
func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return rxHostname.MatchString(asString) && len(asString) < 256
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted UUID
|
||||
func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return rxUUID.MatchString(asString)
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted regular expression
|
||||
func (f RegexFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if asString == "" {
|
||||
return true
|
||||
}
|
||||
_, err := regexp.Compile(asString)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901
|
||||
func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return rxJSONPointer.MatchString(asString)
|
||||
}
|
||||
|
||||
// IsFormat checks if input is a correctly formatted relative JSON Pointer
|
||||
func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool {
|
||||
asString, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return rxRelJSONPointer.MatchString(asString)
|
||||
}
|
13
vendor/github.com/xeipuuv/gojsonschema/glide.yaml
generated
vendored
Normal file
13
vendor/github.com/xeipuuv/gojsonschema/glide.yaml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package: github.com/xeipuuv/gojsonschema
|
||||
license: Apache 2.0
|
||||
import:
|
||||
- package: github.com/xeipuuv/gojsonschema
|
||||
|
||||
- package: github.com/xeipuuv/gojsonpointer
|
||||
|
||||
- package: github.com/xeipuuv/gojsonreference
|
||||
|
||||
testImport:
|
||||
- package: github.com/stretchr/testify
|
||||
subpackages:
|
||||
- assert
|
7
vendor/github.com/xeipuuv/gojsonschema/go.mod
generated
vendored
Normal file
7
vendor/github.com/xeipuuv/gojsonschema/go.mod
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
module github.com/xeipuuv/gojsonschema
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
|
||||
)
|
11
vendor/github.com/xeipuuv/gojsonschema/go.sum
generated
vendored
Normal file
11
vendor/github.com/xeipuuv/gojsonschema/go.sum
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
37
vendor/github.com/xeipuuv/gojsonschema/internalLog.go
generated
vendored
Normal file
37
vendor/github.com/xeipuuv/gojsonschema/internalLog.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Very simple log wrapper.
|
||||
// Used for debugging/testing purposes.
|
||||
//
|
||||
// created 01-01-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
const internalLogEnabled = false
|
||||
|
||||
func internalLog(format string, v ...interface{}) {
|
||||
log.Printf(format, v...)
|
||||
}
|
73
vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
generated
vendored
Normal file
73
vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2013 MongoDB, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author tolsen
|
||||
// author-github https://github.com/tolsen
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
|
||||
//
|
||||
// created 04-09-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import "bytes"
|
||||
|
||||
// JsonContext implements a persistent linked-list of strings
|
||||
type JsonContext struct {
|
||||
head string
|
||||
tail *JsonContext
|
||||
}
|
||||
|
||||
// NewJsonContext creates a new JsonContext
|
||||
func NewJsonContext(head string, tail *JsonContext) *JsonContext {
|
||||
return &JsonContext{head, tail}
|
||||
}
|
||||
|
||||
// String displays the context in reverse.
|
||||
// This plays well with the data structure's persistent nature with
|
||||
// Cons and a json document's tree structure.
|
||||
func (c *JsonContext) String(del ...string) string {
|
||||
byteArr := make([]byte, 0, c.stringLen())
|
||||
buf := bytes.NewBuffer(byteArr)
|
||||
c.writeStringToBuffer(buf, del)
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (c *JsonContext) stringLen() int {
|
||||
length := 0
|
||||
if c.tail != nil {
|
||||
length = c.tail.stringLen() + 1 // add 1 for "."
|
||||
}
|
||||
|
||||
length += len(c.head)
|
||||
return length
|
||||
}
|
||||
|
||||
func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
|
||||
if c.tail != nil {
|
||||
c.tail.writeStringToBuffer(buf, del)
|
||||
|
||||
if len(del) > 0 {
|
||||
buf.WriteString(del[0])
|
||||
} else {
|
||||
buf.WriteString(".")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(c.head)
|
||||
}
|
386
vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
generated
vendored
Normal file
386
vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Different strategies to load JSON files.
|
||||
// Includes References (file and HTTP), JSON strings and Go types.
|
||||
//
|
||||
// created 01-02-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
var osFS = osFileSystem(os.Open)
|
||||
|
||||
// JSONLoader defines the JSON loader interface
|
||||
type JSONLoader interface {
|
||||
JsonSource() interface{}
|
||||
LoadJSON() (interface{}, error)
|
||||
JsonReference() (gojsonreference.JsonReference, error)
|
||||
LoaderFactory() JSONLoaderFactory
|
||||
}
|
||||
|
||||
// JSONLoaderFactory defines the JSON loader factory interface
|
||||
type JSONLoaderFactory interface {
|
||||
// New creates a new JSON loader for the given source
|
||||
New(source string) JSONLoader
|
||||
}
|
||||
|
||||
// DefaultJSONLoaderFactory is the default JSON loader factory
|
||||
type DefaultJSONLoaderFactory struct {
|
||||
}
|
||||
|
||||
// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem
|
||||
type FileSystemJSONLoaderFactory struct {
|
||||
fs http.FileSystem
|
||||
}
|
||||
|
||||
// New creates a new JSON loader for the given source
|
||||
func (d DefaultJSONLoaderFactory) New(source string) JSONLoader {
|
||||
return &jsonReferenceLoader{
|
||||
fs: osFS,
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new JSON loader for the given source
|
||||
func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader {
|
||||
return &jsonReferenceLoader{
|
||||
fs: f.fs,
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem.
|
||||
type osFileSystem func(string) (*os.File, error)
|
||||
|
||||
// Opens a file with the given name
|
||||
func (o osFileSystem) Open(name string) (http.File, error) {
|
||||
return o(name)
|
||||
}
|
||||
|
||||
// JSON Reference loader
|
||||
// references are used to load JSONs from files and HTTP
|
||||
|
||||
type jsonReferenceLoader struct {
|
||||
fs http.FileSystem
|
||||
source string
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) JsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
||||
return gojsonreference.NewJsonReference(l.JsonSource().(string))
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory {
|
||||
return &FileSystemJSONLoaderFactory{
|
||||
fs: l.fs,
|
||||
}
|
||||
}
|
||||
|
||||
// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system.
|
||||
func NewReferenceLoader(source string) JSONLoader {
|
||||
return &jsonReferenceLoader{
|
||||
fs: osFS,
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system.
|
||||
func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader {
|
||||
return &jsonReferenceLoader{
|
||||
fs: fs,
|
||||
source: source,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
|
||||
|
||||
var err error
|
||||
|
||||
reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
refToURL := reference
|
||||
refToURL.GetUrl().Fragment = ""
|
||||
|
||||
var document interface{}
|
||||
|
||||
if reference.HasFileScheme {
|
||||
|
||||
filename := strings.TrimPrefix(refToURL.String(), "file://")
|
||||
filename, err = url.QueryUnescape(filename)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// on Windows, a file URL may have an extra leading slash, use slashes
|
||||
// instead of backslashes, and have spaces escaped
|
||||
filename = strings.TrimPrefix(filename, "/")
|
||||
filename = filepath.FromSlash(filename)
|
||||
}
|
||||
|
||||
document, err = l.loadFromFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
document, err = l.loadFromHTTP(refToURL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return document, nil
|
||||
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
|
||||
|
||||
// returned cached versions for metaschemas for drafts 4, 6 and 7
|
||||
// for performance and allow for easier offline use
|
||||
if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" {
|
||||
return decodeJSONUsingNumber(strings.NewReader(metaSchema))
|
||||
}
|
||||
|
||||
resp, err := http.Get(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// must return HTTP Status 200 OK
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status}))
|
||||
}
|
||||
|
||||
bodyBuff, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
|
||||
}
|
||||
|
||||
func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
|
||||
f, err := l.fs.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
bodyBuff, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
|
||||
|
||||
}
|
||||
|
||||
// JSON string loader
|
||||
|
||||
type jsonStringLoader struct {
|
||||
source string
|
||||
}
|
||||
|
||||
func (l *jsonStringLoader) JsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
|
||||
func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
||||
return gojsonreference.NewJsonReference("#")
|
||||
}
|
||||
|
||||
func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory {
|
||||
return &DefaultJSONLoaderFactory{}
|
||||
}
|
||||
|
||||
// NewStringLoader creates a new JSONLoader, taking a string as source
|
||||
func NewStringLoader(source string) JSONLoader {
|
||||
return &jsonStringLoader{source: source}
|
||||
}
|
||||
|
||||
func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
|
||||
|
||||
return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string)))
|
||||
|
||||
}
|
||||
|
||||
// JSON bytes loader
|
||||
|
||||
type jsonBytesLoader struct {
|
||||
source []byte
|
||||
}
|
||||
|
||||
func (l *jsonBytesLoader) JsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
|
||||
func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
||||
return gojsonreference.NewJsonReference("#")
|
||||
}
|
||||
|
||||
func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory {
|
||||
return &DefaultJSONLoaderFactory{}
|
||||
}
|
||||
|
||||
// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source
|
||||
func NewBytesLoader(source []byte) JSONLoader {
|
||||
return &jsonBytesLoader{source: source}
|
||||
}
|
||||
|
||||
func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
|
||||
return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte)))
|
||||
}
|
||||
|
||||
// JSON Go (types) loader
|
||||
// used to load JSONs from the code as maps, interface{}, structs ...
|
||||
|
||||
type jsonGoLoader struct {
|
||||
source interface{}
|
||||
}
|
||||
|
||||
func (l *jsonGoLoader) JsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
|
||||
func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
||||
return gojsonreference.NewJsonReference("#")
|
||||
}
|
||||
|
||||
func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
|
||||
return &DefaultJSONLoaderFactory{}
|
||||
}
|
||||
|
||||
// NewGoLoader creates a new JSONLoader from a given Go struct
|
||||
func NewGoLoader(source interface{}) JSONLoader {
|
||||
return &jsonGoLoader{source: source}
|
||||
}
|
||||
|
||||
func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
|
||||
|
||||
// convert it to a compliant JSON first to avoid types "mismatches"
|
||||
|
||||
jsonBytes, err := json.Marshal(l.JsonSource())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decodeJSONUsingNumber(bytes.NewReader(jsonBytes))
|
||||
|
||||
}
|
||||
|
||||
type jsonIOLoader struct {
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
// NewReaderLoader creates a new JSON loader using the provided io.Reader
|
||||
func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) {
|
||||
buf := &bytes.Buffer{}
|
||||
return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf)
|
||||
}
|
||||
|
||||
// NewWriterLoader creates a new JSON loader using the provided io.Writer
|
||||
func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) {
|
||||
buf := &bytes.Buffer{}
|
||||
return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
|
||||
}
|
||||
|
||||
func (l *jsonIOLoader) JsonSource() interface{} {
|
||||
return l.buf.String()
|
||||
}
|
||||
|
||||
func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
|
||||
return decodeJSONUsingNumber(l.buf)
|
||||
}
|
||||
|
||||
func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
||||
return gojsonreference.NewJsonReference("#")
|
||||
}
|
||||
|
||||
func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
|
||||
return &DefaultJSONLoaderFactory{}
|
||||
}
|
||||
|
||||
// JSON raw loader
|
||||
// In case the JSON is already marshalled to interface{} use this loader
|
||||
// This is used for testing as otherwise there is no guarantee the JSON is marshalled
|
||||
// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
|
||||
type jsonRawLoader struct {
|
||||
source interface{}
|
||||
}
|
||||
|
||||
// NewRawLoader creates a new JSON raw loader for the given source
|
||||
func NewRawLoader(source interface{}) JSONLoader {
|
||||
return &jsonRawLoader{source: source}
|
||||
}
|
||||
func (l *jsonRawLoader) JsonSource() interface{} {
|
||||
return l.source
|
||||
}
|
||||
func (l *jsonRawLoader) LoadJSON() (interface{}, error) {
|
||||
return l.source, nil
|
||||
}
|
||||
func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) {
|
||||
return gojsonreference.NewJsonReference("#")
|
||||
}
|
||||
func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory {
|
||||
return &DefaultJSONLoaderFactory{}
|
||||
}
|
||||
|
||||
func decodeJSONUsingNumber(r io.Reader) (interface{}, error) {
|
||||
|
||||
var document interface{}
|
||||
|
||||
decoder := json.NewDecoder(r)
|
||||
decoder.UseNumber()
|
||||
|
||||
err := decoder.Decode(&document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return document, nil
|
||||
|
||||
}
|
472
vendor/github.com/xeipuuv/gojsonschema/locales.go
generated
vendored
Normal file
472
vendor/github.com/xeipuuv/gojsonschema/locales.go
generated
vendored
Normal file
@ -0,0 +1,472 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Contains const string and messages.
|
||||
//
|
||||
// created 01-01-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
type (
|
||||
// locale is an interface for defining custom error strings
|
||||
locale interface {
|
||||
|
||||
// False returns a format-string for "false" schema validation errors
|
||||
False() string
|
||||
|
||||
// Required returns a format-string for "required" schema validation errors
|
||||
Required() string
|
||||
|
||||
// InvalidType returns a format-string for "invalid type" schema validation errors
|
||||
InvalidType() string
|
||||
|
||||
// NumberAnyOf returns a format-string for "anyOf" schema validation errors
|
||||
NumberAnyOf() string
|
||||
|
||||
// NumberOneOf returns a format-string for "oneOf" schema validation errors
|
||||
NumberOneOf() string
|
||||
|
||||
// NumberAllOf returns a format-string for "allOf" schema validation errors
|
||||
NumberAllOf() string
|
||||
|
||||
// NumberNot returns a format-string to format a NumberNotError
|
||||
NumberNot() string
|
||||
|
||||
// MissingDependency returns a format-string for "missing dependency" schema validation errors
|
||||
MissingDependency() string
|
||||
|
||||
// Internal returns a format-string for internal errors
|
||||
Internal() string
|
||||
|
||||
// Const returns a format-string to format a ConstError
|
||||
Const() string
|
||||
|
||||
// Enum returns a format-string to format an EnumError
|
||||
Enum() string
|
||||
|
||||
// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema
|
||||
ArrayNotEnoughItems() string
|
||||
|
||||
// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError
|
||||
ArrayNoAdditionalItems() string
|
||||
|
||||
// ArrayMinItems returns a format-string to format an ArrayMinItemsError
|
||||
ArrayMinItems() string
|
||||
|
||||
// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError
|
||||
ArrayMaxItems() string
|
||||
|
||||
// Unique returns a format-string to format an ItemsMustBeUniqueError
|
||||
Unique() string
|
||||
|
||||
// ArrayContains returns a format-string to format an ArrayContainsError
|
||||
ArrayContains() string
|
||||
|
||||
// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError
|
||||
ArrayMinProperties() string
|
||||
|
||||
// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError
|
||||
ArrayMaxProperties() string
|
||||
|
||||
// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError
|
||||
AdditionalPropertyNotAllowed() string
|
||||
|
||||
// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError
|
||||
InvalidPropertyPattern() string
|
||||
|
||||
// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError
|
||||
InvalidPropertyName() string
|
||||
|
||||
// StringGTE returns a format-string to format an StringLengthGTEError
|
||||
StringGTE() string
|
||||
|
||||
// StringLTE returns a format-string to format an StringLengthLTEError
|
||||
StringLTE() string
|
||||
|
||||
// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError
|
||||
DoesNotMatchPattern() string
|
||||
|
||||
// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError
|
||||
DoesNotMatchFormat() string
|
||||
|
||||
// MultipleOf returns a format-string to format an MultipleOfError
|
||||
MultipleOf() string
|
||||
|
||||
// NumberGTE returns a format-string to format an NumberGTEError
|
||||
NumberGTE() string
|
||||
|
||||
// NumberGT returns a format-string to format an NumberGTError
|
||||
NumberGT() string
|
||||
|
||||
// NumberLTE returns a format-string to format an NumberLTEError
|
||||
NumberLTE() string
|
||||
|
||||
// NumberLT returns a format-string to format an NumberLTError
|
||||
NumberLT() string
|
||||
|
||||
// Schema validations
|
||||
|
||||
// RegexPattern returns a format-string to format a regex-pattern error
|
||||
RegexPattern() string
|
||||
|
||||
// GreaterThanZero returns a format-string to format an error where a number must be greater than zero
|
||||
GreaterThanZero() string
|
||||
|
||||
// MustBeOfA returns a format-string to format an error where a value is of the wrong type
|
||||
MustBeOfA() string
|
||||
|
||||
// MustBeOfAn returns a format-string to format an error where a value is of the wrong type
|
||||
MustBeOfAn() string
|
||||
|
||||
// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error
|
||||
CannotBeUsedWithout() string
|
||||
|
||||
// CannotBeGT returns a format-string to format an error where a value are greater than allowed
|
||||
CannotBeGT() string
|
||||
|
||||
// MustBeOfType returns a format-string to format an error where a value does not match the required type
|
||||
MustBeOfType() string
|
||||
|
||||
// MustBeValidRegex returns a format-string to format an error where a regex is invalid
|
||||
MustBeValidRegex() string
|
||||
|
||||
// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format
|
||||
MustBeValidFormat() string
|
||||
|
||||
// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0
|
||||
MustBeGTEZero() string
|
||||
|
||||
// KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed
|
||||
KeyCannotBeGreaterThan() string
|
||||
|
||||
// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type
|
||||
KeyItemsMustBeOfType() string
|
||||
|
||||
// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique
|
||||
KeyItemsMustBeUnique() string
|
||||
|
||||
// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error
|
||||
ReferenceMustBeCanonical() string
|
||||
|
||||
// NotAValidType returns a format-string to format an invalid type error
|
||||
NotAValidType() string
|
||||
|
||||
// Duplicated returns a format-string to format an error where types are duplicated
|
||||
Duplicated() string
|
||||
|
||||
// HttpBadStatus returns a format-string for errors when loading a schema using HTTP
|
||||
HttpBadStatus() string
|
||||
|
||||
// ParseError returns a format-string for JSON parsing errors
|
||||
ParseError() string
|
||||
|
||||
// ConditionThen returns a format-string for ConditionThenError errors
|
||||
ConditionThen() string
|
||||
|
||||
// ConditionElse returns a format-string for ConditionElseError errors
|
||||
ConditionElse() string
|
||||
|
||||
// ErrorFormat returns a format string for errors
|
||||
ErrorFormat() string
|
||||
}
|
||||
|
||||
// DefaultLocale is the default locale for this package
|
||||
DefaultLocale struct{}
|
||||
)
|
||||
|
||||
// False returns a format-string for "false" schema validation errors
|
||||
func (l DefaultLocale) False() string {
|
||||
return "False always fails validation"
|
||||
}
|
||||
|
||||
// Required returns a format-string for "required" schema validation errors
|
||||
func (l DefaultLocale) Required() string {
|
||||
return `{{.property}} is required`
|
||||
}
|
||||
|
||||
// InvalidType returns a format-string for "invalid type" schema validation errors
|
||||
func (l DefaultLocale) InvalidType() string {
|
||||
return `Invalid type. Expected: {{.expected}}, given: {{.given}}`
|
||||
}
|
||||
|
||||
// NumberAnyOf returns a format-string for "anyOf" schema validation errors
|
||||
func (l DefaultLocale) NumberAnyOf() string {
|
||||
return `Must validate at least one schema (anyOf)`
|
||||
}
|
||||
|
||||
// NumberOneOf returns a format-string for "oneOf" schema validation errors
|
||||
func (l DefaultLocale) NumberOneOf() string {
|
||||
return `Must validate one and only one schema (oneOf)`
|
||||
}
|
||||
|
||||
// NumberAllOf returns a format-string for "allOf" schema validation errors
|
||||
func (l DefaultLocale) NumberAllOf() string {
|
||||
return `Must validate all the schemas (allOf)`
|
||||
}
|
||||
|
||||
// NumberNot returns a format-string to format a NumberNotError
|
||||
func (l DefaultLocale) NumberNot() string {
|
||||
return `Must not validate the schema (not)`
|
||||
}
|
||||
|
||||
// MissingDependency returns a format-string for "missing dependency" schema validation errors
|
||||
func (l DefaultLocale) MissingDependency() string {
|
||||
return `Has a dependency on {{.dependency}}`
|
||||
}
|
||||
|
||||
// Internal returns a format-string for internal errors
|
||||
func (l DefaultLocale) Internal() string {
|
||||
return `Internal Error {{.error}}`
|
||||
}
|
||||
|
||||
// Const returns a format-string to format a ConstError
|
||||
func (l DefaultLocale) Const() string {
|
||||
return `{{.field}} does not match: {{.allowed}}`
|
||||
}
|
||||
|
||||
// Enum returns a format-string to format an EnumError
|
||||
func (l DefaultLocale) Enum() string {
|
||||
return `{{.field}} must be one of the following: {{.allowed}}`
|
||||
}
|
||||
|
||||
// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError
|
||||
func (l DefaultLocale) ArrayNoAdditionalItems() string {
|
||||
return `No additional items allowed on array`
|
||||
}
|
||||
|
||||
// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema
|
||||
func (l DefaultLocale) ArrayNotEnoughItems() string {
|
||||
return `Not enough items on array to match positional list of schema`
|
||||
}
|
||||
|
||||
// ArrayMinItems returns a format-string to format an ArrayMinItemsError
|
||||
func (l DefaultLocale) ArrayMinItems() string {
|
||||
return `Array must have at least {{.min}} items`
|
||||
}
|
||||
|
||||
// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError
|
||||
func (l DefaultLocale) ArrayMaxItems() string {
|
||||
return `Array must have at most {{.max}} items`
|
||||
}
|
||||
|
||||
// Unique returns a format-string to format an ItemsMustBeUniqueError
|
||||
func (l DefaultLocale) Unique() string {
|
||||
return `{{.type}} items[{{.i}},{{.j}}] must be unique`
|
||||
}
|
||||
|
||||
// ArrayContains returns a format-string to format an ArrayContainsError
|
||||
func (l DefaultLocale) ArrayContains() string {
|
||||
return `At least one of the items must match`
|
||||
}
|
||||
|
||||
// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError
|
||||
func (l DefaultLocale) ArrayMinProperties() string {
|
||||
return `Must have at least {{.min}} properties`
|
||||
}
|
||||
|
||||
// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError
|
||||
func (l DefaultLocale) ArrayMaxProperties() string {
|
||||
return `Must have at most {{.max}} properties`
|
||||
}
|
||||
|
||||
// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError
|
||||
func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
|
||||
return `Additional property {{.property}} is not allowed`
|
||||
}
|
||||
|
||||
// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError
|
||||
func (l DefaultLocale) InvalidPropertyPattern() string {
|
||||
return `Property "{{.property}}" does not match pattern {{.pattern}}`
|
||||
}
|
||||
|
||||
// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError
|
||||
func (l DefaultLocale) InvalidPropertyName() string {
|
||||
return `Property name of "{{.property}}" does not match`
|
||||
}
|
||||
|
||||
// StringGTE returns a format-string to format an StringLengthGTEError
|
||||
func (l DefaultLocale) StringGTE() string {
|
||||
return `String length must be greater than or equal to {{.min}}`
|
||||
}
|
||||
|
||||
// StringLTE returns a format-string to format an StringLengthLTEError
|
||||
func (l DefaultLocale) StringLTE() string {
|
||||
return `String length must be less than or equal to {{.max}}`
|
||||
}
|
||||
|
||||
// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError
|
||||
func (l DefaultLocale) DoesNotMatchPattern() string {
|
||||
return `Does not match pattern '{{.pattern}}'`
|
||||
}
|
||||
|
||||
// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError
|
||||
func (l DefaultLocale) DoesNotMatchFormat() string {
|
||||
return `Does not match format '{{.format}}'`
|
||||
}
|
||||
|
||||
// MultipleOf returns a format-string to format an MultipleOfError
|
||||
func (l DefaultLocale) MultipleOf() string {
|
||||
return `Must be a multiple of {{.multiple}}`
|
||||
}
|
||||
|
||||
// NumberGTE returns the format string to format a NumberGTEError
|
||||
func (l DefaultLocale) NumberGTE() string {
|
||||
return `Must be greater than or equal to {{.min}}`
|
||||
}
|
||||
|
||||
// NumberGT returns the format string to format a NumberGTError
|
||||
func (l DefaultLocale) NumberGT() string {
|
||||
return `Must be greater than {{.min}}`
|
||||
}
|
||||
|
||||
// NumberLTE returns the format string to format a NumberLTEError
|
||||
func (l DefaultLocale) NumberLTE() string {
|
||||
return `Must be less than or equal to {{.max}}`
|
||||
}
|
||||
|
||||
// NumberLT returns the format string to format a NumberLTError
|
||||
func (l DefaultLocale) NumberLT() string {
|
||||
return `Must be less than {{.max}}`
|
||||
}
|
||||
|
||||
// Schema validators
|
||||
|
||||
// RegexPattern returns a format-string to format a regex-pattern error
|
||||
func (l DefaultLocale) RegexPattern() string {
|
||||
return `Invalid regex pattern '{{.pattern}}'`
|
||||
}
|
||||
|
||||
// GreaterThanZero returns a format-string to format an error where a number must be greater than zero
|
||||
func (l DefaultLocale) GreaterThanZero() string {
|
||||
return `{{.number}} must be strictly greater than 0`
|
||||
}
|
||||
|
||||
// MustBeOfA returns a format-string to format an error where a value is of the wrong type
|
||||
func (l DefaultLocale) MustBeOfA() string {
|
||||
return `{{.x}} must be of a {{.y}}`
|
||||
}
|
||||
|
||||
// MustBeOfAn returns a format-string to format an error where a value is of the wrong type
|
||||
func (l DefaultLocale) MustBeOfAn() string {
|
||||
return `{{.x}} must be of an {{.y}}`
|
||||
}
|
||||
|
||||
// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error
|
||||
func (l DefaultLocale) CannotBeUsedWithout() string {
|
||||
return `{{.x}} cannot be used without {{.y}}`
|
||||
}
|
||||
|
||||
// CannotBeGT returns a format-string to format an error where a value are greater than allowed
|
||||
func (l DefaultLocale) CannotBeGT() string {
|
||||
return `{{.x}} cannot be greater than {{.y}}`
|
||||
}
|
||||
|
||||
// MustBeOfType returns a format-string to format an error where a value does not match the required type
|
||||
func (l DefaultLocale) MustBeOfType() string {
|
||||
return `{{.key}} must be of type {{.type}}`
|
||||
}
|
||||
|
||||
// MustBeValidRegex returns a format-string to format an error where a regex is invalid
|
||||
func (l DefaultLocale) MustBeValidRegex() string {
|
||||
return `{{.key}} must be a valid regex`
|
||||
}
|
||||
|
||||
// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format
|
||||
func (l DefaultLocale) MustBeValidFormat() string {
|
||||
return `{{.key}} must be a valid format {{.given}}`
|
||||
}
|
||||
|
||||
// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0
|
||||
func (l DefaultLocale) MustBeGTEZero() string {
|
||||
return `{{.key}} must be greater than or equal to 0`
|
||||
}
|
||||
|
||||
// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed
|
||||
func (l DefaultLocale) KeyCannotBeGreaterThan() string {
|
||||
return `{{.key}} cannot be greater than {{.y}}`
|
||||
}
|
||||
|
||||
// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type
|
||||
func (l DefaultLocale) KeyItemsMustBeOfType() string {
|
||||
return `{{.key}} items must be {{.type}}`
|
||||
}
|
||||
|
||||
// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique
|
||||
func (l DefaultLocale) KeyItemsMustBeUnique() string {
|
||||
return `{{.key}} items must be unique`
|
||||
}
|
||||
|
||||
// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error
|
||||
func (l DefaultLocale) ReferenceMustBeCanonical() string {
|
||||
return `Reference {{.reference}} must be canonical`
|
||||
}
|
||||
|
||||
// NotAValidType returns a format-string to format an invalid type error
|
||||
func (l DefaultLocale) NotAValidType() string {
|
||||
return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}`
|
||||
}
|
||||
|
||||
// Duplicated returns a format-string to format an error where types are duplicated
|
||||
func (l DefaultLocale) Duplicated() string {
|
||||
return `{{.type}} type is duplicated`
|
||||
}
|
||||
|
||||
// HttpBadStatus returns a format-string for errors when loading a schema using HTTP
|
||||
func (l DefaultLocale) HttpBadStatus() string {
|
||||
return `Could not read schema from HTTP, response status is {{.status}}`
|
||||
}
|
||||
|
||||
// ErrorFormat returns a format string for errors
|
||||
// Replacement options: field, description, context, value
|
||||
func (l DefaultLocale) ErrorFormat() string {
|
||||
return `{{.field}}: {{.description}}`
|
||||
}
|
||||
|
||||
// ParseError returns a format-string for JSON parsing errors
|
||||
func (l DefaultLocale) ParseError() string {
|
||||
return `Expected: {{.expected}}, given: Invalid JSON`
|
||||
}
|
||||
|
||||
// ConditionThen returns a format-string for ConditionThenError errors
|
||||
// If/Else
|
||||
func (l DefaultLocale) ConditionThen() string {
|
||||
return `Must validate "then" as "if" was valid`
|
||||
}
|
||||
|
||||
// ConditionElse returns a format-string for ConditionElseError errors
|
||||
func (l DefaultLocale) ConditionElse() string {
|
||||
return `Must validate "else" as "if" was not valid`
|
||||
}
|
||||
|
||||
// constants
|
||||
const (
|
||||
STRING_NUMBER = "number"
|
||||
STRING_ARRAY_OF_STRINGS = "array of strings"
|
||||
STRING_ARRAY_OF_SCHEMAS = "array of schemas"
|
||||
STRING_SCHEMA = "valid schema"
|
||||
STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
|
||||
STRING_PROPERTIES = "properties"
|
||||
STRING_DEPENDENCY = "dependency"
|
||||
STRING_PROPERTY = "property"
|
||||
STRING_UNDEFINED = "undefined"
|
||||
STRING_CONTEXT_ROOT = "(root)"
|
||||
STRING_ROOT_SCHEMA_PROPERTY = "(root)"
|
||||
)
|
220
vendor/github.com/xeipuuv/gojsonschema/result.go
generated
vendored
Normal file
220
vendor/github.com/xeipuuv/gojsonschema/result.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Result and ResultError implementations.
|
||||
//
|
||||
// created 01-01-2015
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type (
|
||||
// ErrorDetails is a map of details specific to each error.
|
||||
// While the values will vary, every error will contain a "field" value
|
||||
ErrorDetails map[string]interface{}
|
||||
|
||||
// ResultError is the interface that library errors must implement
|
||||
ResultError interface {
|
||||
// Field returns the field name without the root context
|
||||
// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
|
||||
Field() string
|
||||
// SetType sets the error-type
|
||||
SetType(string)
|
||||
// Type returns the error-type
|
||||
Type() string
|
||||
// SetContext sets the JSON-context for the error
|
||||
SetContext(*JsonContext)
|
||||
// Context returns the JSON-context of the error
|
||||
Context() *JsonContext
|
||||
// SetDescription sets a description for the error
|
||||
SetDescription(string)
|
||||
// Description returns the description of the error
|
||||
Description() string
|
||||
// SetDescriptionFormat sets the format for the description in the default text/template format
|
||||
SetDescriptionFormat(string)
|
||||
// DescriptionFormat returns the format for the description in the default text/template format
|
||||
DescriptionFormat() string
|
||||
// SetValue sets the value related to the error
|
||||
SetValue(interface{})
|
||||
// Value returns the value related to the error
|
||||
Value() interface{}
|
||||
// SetDetails sets the details specific to the error
|
||||
SetDetails(ErrorDetails)
|
||||
// Details returns details about the error
|
||||
Details() ErrorDetails
|
||||
// String returns a string representation of the error
|
||||
String() string
|
||||
}
|
||||
|
||||
// ResultErrorFields holds the fields for each ResultError implementation.
|
||||
// ResultErrorFields implements the ResultError interface, so custom errors
|
||||
// can be defined by just embedding this type
|
||||
ResultErrorFields struct {
|
||||
errorType string // A string with the type of error (i.e. invalid_type)
|
||||
context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
|
||||
description string // A human readable error message
|
||||
descriptionFormat string // A format for human readable error message
|
||||
value interface{} // Value given by the JSON file that is the source of the error
|
||||
details ErrorDetails
|
||||
}
|
||||
|
||||
// Result holds the result of a validation
|
||||
Result struct {
|
||||
errors []ResultError
|
||||
// Scores how well the validation matched. Useful in generating
|
||||
// better error messages for anyOf and oneOf.
|
||||
score int
|
||||
}
|
||||
)
|
||||
|
||||
// Field returns the field name without the root context
|
||||
// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
|
||||
func (v *ResultErrorFields) Field() string {
|
||||
return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
|
||||
}
|
||||
|
||||
// SetType sets the error-type
|
||||
func (v *ResultErrorFields) SetType(errorType string) {
|
||||
v.errorType = errorType
|
||||
}
|
||||
|
||||
// Type returns the error-type
|
||||
func (v *ResultErrorFields) Type() string {
|
||||
return v.errorType
|
||||
}
|
||||
|
||||
// SetContext sets the JSON-context for the error
|
||||
func (v *ResultErrorFields) SetContext(context *JsonContext) {
|
||||
v.context = context
|
||||
}
|
||||
|
||||
// Context returns the JSON-context of the error
|
||||
func (v *ResultErrorFields) Context() *JsonContext {
|
||||
return v.context
|
||||
}
|
||||
|
||||
// SetDescription sets a description for the error
|
||||
func (v *ResultErrorFields) SetDescription(description string) {
|
||||
v.description = description
|
||||
}
|
||||
|
||||
// Description returns the description of the error
|
||||
func (v *ResultErrorFields) Description() string {
|
||||
return v.description
|
||||
}
|
||||
|
||||
// SetDescriptionFormat sets the format for the description in the default text/template format
|
||||
func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) {
|
||||
v.descriptionFormat = descriptionFormat
|
||||
}
|
||||
|
||||
// DescriptionFormat returns the format for the description in the default text/template format
|
||||
func (v *ResultErrorFields) DescriptionFormat() string {
|
||||
return v.descriptionFormat
|
||||
}
|
||||
|
||||
// SetValue sets the value related to the error
|
||||
func (v *ResultErrorFields) SetValue(value interface{}) {
|
||||
v.value = value
|
||||
}
|
||||
|
||||
// Value returns the value related to the error
|
||||
func (v *ResultErrorFields) Value() interface{} {
|
||||
return v.value
|
||||
}
|
||||
|
||||
// SetDetails sets the details specific to the error
|
||||
func (v *ResultErrorFields) SetDetails(details ErrorDetails) {
|
||||
v.details = details
|
||||
}
|
||||
|
||||
// Details returns details about the error
|
||||
func (v *ResultErrorFields) Details() ErrorDetails {
|
||||
return v.details
|
||||
}
|
||||
|
||||
// String returns a string representation of the error
|
||||
func (v ResultErrorFields) String() string {
|
||||
// as a fallback, the value is displayed go style
|
||||
valueString := fmt.Sprintf("%v", v.value)
|
||||
|
||||
// marshal the go value value to json
|
||||
if v.value == nil {
|
||||
valueString = TYPE_NULL
|
||||
} else {
|
||||
if vs, err := marshalToJSONString(v.value); err == nil {
|
||||
if vs == nil {
|
||||
valueString = TYPE_NULL
|
||||
} else {
|
||||
valueString = *vs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{
|
||||
"context": v.context.String(),
|
||||
"description": v.description,
|
||||
"value": valueString,
|
||||
"field": v.Field(),
|
||||
})
|
||||
}
|
||||
|
||||
// Valid indicates if no errors were found
|
||||
func (v *Result) Valid() bool {
|
||||
return len(v.errors) == 0
|
||||
}
|
||||
|
||||
// Errors returns the errors that were found
|
||||
func (v *Result) Errors() []ResultError {
|
||||
return v.errors
|
||||
}
|
||||
|
||||
// AddError appends a fully filled error to the error set
|
||||
// SetDescription() will be called with the result of the parsed err.DescriptionFormat()
|
||||
func (v *Result) AddError(err ResultError, details ErrorDetails) {
|
||||
if _, exists := details["context"]; !exists && err.Context() != nil {
|
||||
details["context"] = err.Context().String()
|
||||
}
|
||||
|
||||
err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
|
||||
|
||||
v.errors = append(v.errors, err)
|
||||
}
|
||||
|
||||
func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) {
|
||||
newError(err, context, value, Locale, details)
|
||||
v.errors = append(v.errors, err)
|
||||
v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
|
||||
}
|
||||
|
||||
// Used to copy errors from a sub-schema to the main one
|
||||
func (v *Result) mergeErrors(otherResult *Result) {
|
||||
v.errors = append(v.errors, otherResult.Errors()...)
|
||||
v.score += otherResult.score
|
||||
}
|
||||
|
||||
func (v *Result) incrementScore() {
|
||||
v.score++
|
||||
}
|
1087
vendor/github.com/xeipuuv/gojsonschema/schema.go
generated
vendored
Normal file
1087
vendor/github.com/xeipuuv/gojsonschema/schema.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
206
vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go
generated
vendored
Normal file
206
vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go
generated
vendored
Normal file
@ -0,0 +1,206 @@
|
||||
// Copyright 2018 johandorland ( https://github.com/johandorland )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
// SchemaLoader is used to load schemas
|
||||
type SchemaLoader struct {
|
||||
pool *schemaPool
|
||||
AutoDetect bool
|
||||
Validate bool
|
||||
Draft Draft
|
||||
}
|
||||
|
||||
// NewSchemaLoader creates a new NewSchemaLoader
|
||||
func NewSchemaLoader() *SchemaLoader {
|
||||
|
||||
ps := &SchemaLoader{
|
||||
pool: &schemaPool{
|
||||
schemaPoolDocuments: make(map[string]*schemaPoolDocument),
|
||||
},
|
||||
AutoDetect: true,
|
||||
Validate: false,
|
||||
Draft: Hybrid,
|
||||
}
|
||||
ps.pool.autoDetect = &ps.AutoDetect
|
||||
|
||||
return ps
|
||||
}
|
||||
|
||||
func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error {
|
||||
|
||||
var (
|
||||
schema string
|
||||
err error
|
||||
)
|
||||
if sl.AutoDetect {
|
||||
schema, _, err = parseSchemaURL(documentNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If no explicit "$schema" is used, use the default metaschema associated with the draft used
|
||||
if schema == "" {
|
||||
if sl.Draft == Hybrid {
|
||||
return nil
|
||||
}
|
||||
schema = drafts.GetSchemaURL(sl.Draft)
|
||||
}
|
||||
|
||||
//Disable validation when loading the metaschema to prevent an infinite recursive loop
|
||||
sl.Validate = false
|
||||
|
||||
metaSchema, err := sl.Compile(NewReferenceLoader(schema))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sl.Validate = true
|
||||
|
||||
result := metaSchema.validateDocument(documentNode)
|
||||
|
||||
if !result.Valid() {
|
||||
var res bytes.Buffer
|
||||
for _, err := range result.Errors() {
|
||||
res.WriteString(err.String())
|
||||
res.WriteString("\n")
|
||||
}
|
||||
return errors.New(res.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require
|
||||
// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema
|
||||
func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error {
|
||||
emptyRef, _ := gojsonreference.NewJsonReference("")
|
||||
|
||||
for _, loader := range loaders {
|
||||
doc, err := loader.LoadJSON()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sl.Validate {
|
||||
if err := sl.validateMetaschema(doc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Directly use the Recursive function, so that it get only added to the schema pool by $id
|
||||
// and not by the ref of the document as it's empty
|
||||
if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//AddSchema adds a schema under the provided URL to the schema cache
|
||||
func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error {
|
||||
|
||||
ref, err := gojsonreference.NewJsonReference(url)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
doc, err := loader.LoadJSON()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sl.Validate {
|
||||
if err := sl.validateMetaschema(doc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sl.pool.parseReferences(doc, ref, true)
|
||||
}
|
||||
|
||||
// Compile loads and compiles a schema
|
||||
func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) {
|
||||
|
||||
ref, err := rootSchema.JsonReference()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := Schema{}
|
||||
d.pool = sl.pool
|
||||
d.pool.jsonLoaderFactory = rootSchema.LoaderFactory()
|
||||
d.documentReference = ref
|
||||
d.referencePool = newSchemaReferencePool()
|
||||
|
||||
var doc interface{}
|
||||
if ref.String() != "" {
|
||||
// Get document from schema pool
|
||||
spd, err := d.pool.GetDocument(d.documentReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
doc = spd.Document
|
||||
} else {
|
||||
// Load JSON directly
|
||||
doc, err = rootSchema.LoadJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// References need only be parsed if loading JSON directly
|
||||
// as pool.GetDocument already does this for us if loading by reference
|
||||
err = sl.pool.parseReferences(doc, ref, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if sl.Validate {
|
||||
if err := sl.validateMetaschema(doc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
draft := sl.Draft
|
||||
if sl.AutoDetect {
|
||||
_, detectedDraft, err := parseSchemaURL(doc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if detectedDraft != nil {
|
||||
draft = *detectedDraft
|
||||
}
|
||||
}
|
||||
|
||||
err = d.parse(doc, draft)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
}
|
215
vendor/github.com/xeipuuv/gojsonschema/schemaPool.go
generated
vendored
Normal file
215
vendor/github.com/xeipuuv/gojsonschema/schemaPool.go
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Defines resources pooling.
|
||||
// Eases referencing and avoids downloading the same resource twice.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
)
|
||||
|
||||
type schemaPoolDocument struct {
|
||||
Document interface{}
|
||||
Draft *Draft
|
||||
}
|
||||
|
||||
type schemaPool struct {
|
||||
schemaPoolDocuments map[string]*schemaPoolDocument
|
||||
jsonLoaderFactory JSONLoaderFactory
|
||||
autoDetect *bool
|
||||
}
|
||||
|
||||
func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error {
|
||||
|
||||
var (
|
||||
draft *Draft
|
||||
err error
|
||||
reference = ref.String()
|
||||
)
|
||||
// Only the root document should be added to the schema pool if pooled is true
|
||||
if _, ok := p.schemaPoolDocuments[reference]; pooled && ok {
|
||||
return fmt.Errorf("Reference already exists: \"%s\"", reference)
|
||||
}
|
||||
|
||||
if *p.autoDetect {
|
||||
_, draft, err = parseSchemaURL(document)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = p.parseReferencesRecursive(document, ref, draft)
|
||||
|
||||
if pooled {
|
||||
p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error {
|
||||
// parseReferencesRecursive parses a JSON document and resolves all $id and $ref references.
|
||||
// For $ref references it takes into account the $id scope it is in and replaces
|
||||
// the reference by the absolute resolved reference
|
||||
|
||||
// When encountering errors it fails silently. Error handling is done when the schema
|
||||
// is syntactically parsed and any error encountered here should also come up there.
|
||||
switch m := document.(type) {
|
||||
case []interface{}:
|
||||
for _, v := range m {
|
||||
p.parseReferencesRecursive(v, ref, draft)
|
||||
}
|
||||
case map[string]interface{}:
|
||||
localRef := &ref
|
||||
|
||||
keyID := KEY_ID_NEW
|
||||
if existsMapKey(m, KEY_ID) {
|
||||
keyID = KEY_ID
|
||||
}
|
||||
if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) {
|
||||
jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string))
|
||||
if err == nil {
|
||||
localRef, err = ref.Inherits(jsonReference)
|
||||
if err == nil {
|
||||
if _, ok := p.schemaPoolDocuments[localRef.String()]; ok {
|
||||
return fmt.Errorf("Reference already exists: \"%s\"", localRef.String())
|
||||
}
|
||||
p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) {
|
||||
jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string))
|
||||
if err == nil {
|
||||
absoluteRef, err := localRef.Inherits(jsonReference)
|
||||
if err == nil {
|
||||
m[KEY_REF] = absoluteRef.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
// const and enums should be interpreted literally, so ignore them
|
||||
if k == KEY_CONST || k == KEY_ENUM {
|
||||
continue
|
||||
}
|
||||
// Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc
|
||||
// Therefore don't treat it like a schema.
|
||||
if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES {
|
||||
if child, ok := v.(map[string]interface{}); ok {
|
||||
for _, v := range child {
|
||||
p.parseReferencesRecursive(v, *localRef, draft)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
p.parseReferencesRecursive(v, *localRef, draft)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
|
||||
|
||||
var (
|
||||
spd *schemaPoolDocument
|
||||
draft *Draft
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("Get Document ( %s )", reference.String())
|
||||
}
|
||||
|
||||
// Create a deep copy, so we can remove the fragment part later on without altering the original
|
||||
refToURL, _ := gojsonreference.NewJsonReference(reference.String())
|
||||
|
||||
// First check if the given fragment is a location independent identifier
|
||||
// http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3
|
||||
|
||||
if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok {
|
||||
if internalLogEnabled {
|
||||
internalLog(" From pool")
|
||||
}
|
||||
return spd, nil
|
||||
}
|
||||
|
||||
// If the given reference is not a location independent identifier,
|
||||
// strip the fragment and look for a document with it's base URI
|
||||
|
||||
refToURL.GetUrl().Fragment = ""
|
||||
|
||||
if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok {
|
||||
document, _, err := reference.GetPointer().Get(cachedSpd.Document)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog(" From pool")
|
||||
}
|
||||
|
||||
spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft}
|
||||
p.schemaPoolDocuments[reference.String()] = spd
|
||||
|
||||
return spd, nil
|
||||
}
|
||||
|
||||
// It is not possible to load anything remotely that is not canonical...
|
||||
if !reference.IsCanonical() {
|
||||
return nil, errors.New(formatErrorDescription(
|
||||
Locale.ReferenceMustBeCanonical(),
|
||||
ErrorDetails{"reference": reference.String()},
|
||||
))
|
||||
}
|
||||
|
||||
jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String())
|
||||
document, err := jsonReferenceLoader.LoadJSON()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// add the whole document to the pool for potential re-use
|
||||
p.parseReferences(document, refToURL, true)
|
||||
|
||||
_, draft, _ = parseSchemaURL(document)
|
||||
|
||||
// resolve the potential fragment and also cache it
|
||||
document, _, err = reference.GetPointer().Get(document)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &schemaPoolDocument{Document: document, Draft: draft}, nil
|
||||
}
|
68
vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
generated
vendored
Normal file
68
vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Pool of referenced schemas.
|
||||
//
|
||||
// created 25-06-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type schemaReferencePool struct {
|
||||
documents map[string]*subSchema
|
||||
}
|
||||
|
||||
func newSchemaReferencePool() *schemaReferencePool {
|
||||
|
||||
p := &schemaReferencePool{}
|
||||
p.documents = make(map[string]*subSchema)
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
|
||||
}
|
||||
|
||||
if sch, ok := p.documents[ref]; ok {
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf(" From pool"))
|
||||
}
|
||||
return sch, true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
|
||||
}
|
||||
if _, ok := p.documents[ref]; !ok {
|
||||
p.documents[ref] = sch
|
||||
}
|
||||
}
|
83
vendor/github.com/xeipuuv/gojsonschema/schemaType.go
generated
vendored
Normal file
83
vendor/github.com/xeipuuv/gojsonschema/schemaType.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Helper structure to handle schema types, and the combination of them.
|
||||
//
|
||||
// created 28-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type jsonSchemaType struct {
|
||||
types []string
|
||||
}
|
||||
|
||||
// Is the schema typed ? that is containing at least one type
|
||||
// When not typed, the schema does not need any type validation
|
||||
func (t *jsonSchemaType) IsTyped() bool {
|
||||
return len(t.types) > 0
|
||||
}
|
||||
|
||||
func (t *jsonSchemaType) Add(etype string) error {
|
||||
|
||||
if !isStringInSlice(JSON_TYPES, etype) {
|
||||
return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES}))
|
||||
}
|
||||
|
||||
if t.Contains(etype) {
|
||||
return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype}))
|
||||
}
|
||||
|
||||
t.types = append(t.types, etype)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *jsonSchemaType) Contains(etype string) bool {
|
||||
|
||||
for _, v := range t.types {
|
||||
if v == etype {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *jsonSchemaType) String() string {
|
||||
|
||||
if len(t.types) == 0 {
|
||||
return STRING_UNDEFINED // should never happen
|
||||
}
|
||||
|
||||
// Displayed as a list [type1,type2,...]
|
||||
if len(t.types) > 1 {
|
||||
return fmt.Sprintf("[%s]", strings.Join(t.types, ","))
|
||||
}
|
||||
|
||||
// Only one type: name only
|
||||
return t.types[0]
|
||||
}
|
149
vendor/github.com/xeipuuv/gojsonschema/subSchema.go
generated
vendored
Normal file
149
vendor/github.com/xeipuuv/gojsonschema/subSchema.go
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Defines the structure of a sub-subSchema.
|
||||
// A sub-subSchema can contain other sub-schemas.
|
||||
//
|
||||
// created 27-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"github.com/xeipuuv/gojsonreference"
|
||||
"math/big"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
KEY_SCHEMA = "$schema"
|
||||
KEY_ID = "id"
|
||||
KEY_ID_NEW = "$id"
|
||||
KEY_REF = "$ref"
|
||||
KEY_TITLE = "title"
|
||||
KEY_DESCRIPTION = "description"
|
||||
KEY_TYPE = "type"
|
||||
KEY_ITEMS = "items"
|
||||
KEY_ADDITIONAL_ITEMS = "additionalItems"
|
||||
KEY_PROPERTIES = "properties"
|
||||
KEY_PATTERN_PROPERTIES = "patternProperties"
|
||||
KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
|
||||
KEY_PROPERTY_NAMES = "propertyNames"
|
||||
KEY_DEFINITIONS = "definitions"
|
||||
KEY_MULTIPLE_OF = "multipleOf"
|
||||
KEY_MINIMUM = "minimum"
|
||||
KEY_MAXIMUM = "maximum"
|
||||
KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum"
|
||||
KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum"
|
||||
KEY_MIN_LENGTH = "minLength"
|
||||
KEY_MAX_LENGTH = "maxLength"
|
||||
KEY_PATTERN = "pattern"
|
||||
KEY_FORMAT = "format"
|
||||
KEY_MIN_PROPERTIES = "minProperties"
|
||||
KEY_MAX_PROPERTIES = "maxProperties"
|
||||
KEY_DEPENDENCIES = "dependencies"
|
||||
KEY_REQUIRED = "required"
|
||||
KEY_MIN_ITEMS = "minItems"
|
||||
KEY_MAX_ITEMS = "maxItems"
|
||||
KEY_UNIQUE_ITEMS = "uniqueItems"
|
||||
KEY_CONTAINS = "contains"
|
||||
KEY_CONST = "const"
|
||||
KEY_ENUM = "enum"
|
||||
KEY_ONE_OF = "oneOf"
|
||||
KEY_ANY_OF = "anyOf"
|
||||
KEY_ALL_OF = "allOf"
|
||||
KEY_NOT = "not"
|
||||
KEY_IF = "if"
|
||||
KEY_THEN = "then"
|
||||
KEY_ELSE = "else"
|
||||
)
|
||||
|
||||
type subSchema struct {
|
||||
draft *Draft
|
||||
|
||||
// basic subSchema meta properties
|
||||
id *gojsonreference.JsonReference
|
||||
title *string
|
||||
description *string
|
||||
|
||||
property string
|
||||
|
||||
// Quick pass/fail for boolean schemas
|
||||
pass *bool
|
||||
|
||||
// Types associated with the subSchema
|
||||
types jsonSchemaType
|
||||
|
||||
// Reference url
|
||||
ref *gojsonreference.JsonReference
|
||||
// Schema referenced
|
||||
refSchema *subSchema
|
||||
|
||||
// hierarchy
|
||||
parent *subSchema
|
||||
itemsChildren []*subSchema
|
||||
itemsChildrenIsSingleSchema bool
|
||||
propertiesChildren []*subSchema
|
||||
|
||||
// validation : number / integer
|
||||
multipleOf *big.Rat
|
||||
maximum *big.Rat
|
||||
exclusiveMaximum *big.Rat
|
||||
minimum *big.Rat
|
||||
exclusiveMinimum *big.Rat
|
||||
|
||||
// validation : string
|
||||
minLength *int
|
||||
maxLength *int
|
||||
pattern *regexp.Regexp
|
||||
format string
|
||||
|
||||
// validation : object
|
||||
minProperties *int
|
||||
maxProperties *int
|
||||
required []string
|
||||
|
||||
dependencies map[string]interface{}
|
||||
additionalProperties interface{}
|
||||
patternProperties map[string]*subSchema
|
||||
propertyNames *subSchema
|
||||
|
||||
// validation : array
|
||||
minItems *int
|
||||
maxItems *int
|
||||
uniqueItems bool
|
||||
contains *subSchema
|
||||
|
||||
additionalItems interface{}
|
||||
|
||||
// validation : all
|
||||
_const *string //const is a golang keyword
|
||||
enum []string
|
||||
|
||||
// validation : subSchema
|
||||
oneOf []*subSchema
|
||||
anyOf []*subSchema
|
||||
allOf []*subSchema
|
||||
not *subSchema
|
||||
_if *subSchema // if/else are golang keywords
|
||||
_then *subSchema
|
||||
_else *subSchema
|
||||
}
|
62
vendor/github.com/xeipuuv/gojsonschema/types.go
generated
vendored
Normal file
62
vendor/github.com/xeipuuv/gojsonschema/types.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Contains const types for schema and JSON.
|
||||
//
|
||||
// created 28-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
// Type constants
|
||||
const (
|
||||
TYPE_ARRAY = `array`
|
||||
TYPE_BOOLEAN = `boolean`
|
||||
TYPE_INTEGER = `integer`
|
||||
TYPE_NUMBER = `number`
|
||||
TYPE_NULL = `null`
|
||||
TYPE_OBJECT = `object`
|
||||
TYPE_STRING = `string`
|
||||
)
|
||||
|
||||
// JSON_TYPES hosts the list of type that are supported in JSON
|
||||
var JSON_TYPES []string
|
||||
|
||||
// SCHEMA_TYPES hosts the list of type that are supported in schemas
|
||||
var SCHEMA_TYPES []string
|
||||
|
||||
func init() {
|
||||
JSON_TYPES = []string{
|
||||
TYPE_ARRAY,
|
||||
TYPE_BOOLEAN,
|
||||
TYPE_INTEGER,
|
||||
TYPE_NUMBER,
|
||||
TYPE_NULL,
|
||||
TYPE_OBJECT,
|
||||
TYPE_STRING}
|
||||
|
||||
SCHEMA_TYPES = []string{
|
||||
TYPE_ARRAY,
|
||||
TYPE_BOOLEAN,
|
||||
TYPE_INTEGER,
|
||||
TYPE_NUMBER,
|
||||
TYPE_OBJECT,
|
||||
TYPE_STRING}
|
||||
}
|
197
vendor/github.com/xeipuuv/gojsonschema/utils.go
generated
vendored
Normal file
197
vendor/github.com/xeipuuv/gojsonschema/utils.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Various utility functions.
|
||||
//
|
||||
// created 26-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func isKind(what interface{}, kinds ...reflect.Kind) bool {
|
||||
target := what
|
||||
if isJSONNumber(what) {
|
||||
// JSON Numbers are strings!
|
||||
target = *mustBeNumber(what)
|
||||
}
|
||||
targetKind := reflect.ValueOf(target).Kind()
|
||||
for _, kind := range kinds {
|
||||
if targetKind == kind {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func existsMapKey(m map[string]interface{}, k string) bool {
|
||||
_, ok := m[k]
|
||||
return ok
|
||||
}
|
||||
|
||||
func isStringInSlice(s []string, what string) bool {
|
||||
for i := range s {
|
||||
if s[i] == what {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s.
|
||||
func indexStringInSlice(s []string, what string) int {
|
||||
for i := range s {
|
||||
if s[i] == what {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func marshalToJSONString(value interface{}) (*string, error) {
|
||||
|
||||
mBytes, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sBytes := string(mBytes)
|
||||
return &sBytes, nil
|
||||
}
|
||||
|
||||
func marshalWithoutNumber(value interface{}) (*string, error) {
|
||||
|
||||
// The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
|
||||
// This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1
|
||||
// One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber
|
||||
// so that these differences in representation are removed
|
||||
|
||||
jsonString, err := marshalToJSONString(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var document interface{}
|
||||
|
||||
err = json.Unmarshal([]byte(*jsonString), &document)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return marshalToJSONString(document)
|
||||
}
|
||||
|
||||
func isJSONNumber(what interface{}) bool {
|
||||
|
||||
switch what.(type) {
|
||||
|
||||
case json.Number:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkJSONInteger(what interface{}) (isInt bool) {
|
||||
|
||||
jsonNumber := what.(json.Number)
|
||||
|
||||
bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber))
|
||||
|
||||
return isValidNumber && bigFloat.IsInt()
|
||||
|
||||
}
|
||||
|
||||
// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
|
||||
const (
|
||||
maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
|
||||
minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
|
||||
)
|
||||
|
||||
func mustBeInteger(what interface{}) *int {
|
||||
|
||||
if isJSONNumber(what) {
|
||||
|
||||
number := what.(json.Number)
|
||||
|
||||
isInt := checkJSONInteger(number)
|
||||
|
||||
if isInt {
|
||||
|
||||
int64Value, err := number.Int64()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
int32Value := int(int64Value)
|
||||
return &int32Value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustBeNumber(what interface{}) *big.Rat {
|
||||
|
||||
if isJSONNumber(what) {
|
||||
number := what.(json.Number)
|
||||
float64Value, success := new(big.Rat).SetString(string(number))
|
||||
if success {
|
||||
return float64Value
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func convertDocumentNode(val interface{}) interface{} {
|
||||
|
||||
if lval, ok := val.([]interface{}); ok {
|
||||
|
||||
res := []interface{}{}
|
||||
for _, v := range lval {
|
||||
res = append(res, convertDocumentNode(v))
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
if mval, ok := val.(map[interface{}]interface{}); ok {
|
||||
|
||||
res := map[string]interface{}{}
|
||||
|
||||
for k, v := range mval {
|
||||
res[k.(string)] = convertDocumentNode(v)
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
858
vendor/github.com/xeipuuv/gojsonschema/validation.go
generated
vendored
Normal file
858
vendor/github.com/xeipuuv/gojsonschema/validation.go
generated
vendored
Normal file
@ -0,0 +1,858 @@
|
||||
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// author xeipuuv
|
||||
// author-github https://github.com/xeipuuv
|
||||
// author-mail xeipuuv@gmail.com
|
||||
//
|
||||
// repository-name gojsonschema
|
||||
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
|
||||
//
|
||||
// description Extends Schema and subSchema, implements the validation phase.
|
||||
//
|
||||
// created 28-02-2013
|
||||
|
||||
package gojsonschema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Validate loads and validates a JSON schema
|
||||
func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) {
|
||||
// load schema
|
||||
schema, err := NewSchema(ls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return schema.Validate(ld)
|
||||
}
|
||||
|
||||
// Validate loads and validates a JSON document
|
||||
func (v *Schema) Validate(l JSONLoader) (*Result, error) {
|
||||
root, err := l.LoadJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.validateDocument(root), nil
|
||||
}
|
||||
|
||||
func (v *Schema) validateDocument(root interface{}) *Result {
|
||||
result := &Result{}
|
||||
context := NewJsonContext(STRING_CONTEXT_ROOT, nil)
|
||||
v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
|
||||
return result
|
||||
}
|
||||
|
||||
func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result {
|
||||
result := &Result{}
|
||||
v.validateRecursive(v, document, result, context)
|
||||
return result
|
||||
}
|
||||
|
||||
// Walker function to validate the json recursively against the subSchema
|
||||
func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateRecursive %s", context.String())
|
||||
internalLog(" %v", currentNode)
|
||||
}
|
||||
|
||||
// Handle true/false schema as early as possible as all other fields will be nil
|
||||
if currentSubSchema.pass != nil {
|
||||
if !*currentSubSchema.pass {
|
||||
result.addInternalError(
|
||||
new(FalseError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{},
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle referenced schemas, returns directly when a $ref is found
|
||||
if currentSubSchema.refSchema != nil {
|
||||
v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context)
|
||||
return
|
||||
}
|
||||
|
||||
// Check for null value
|
||||
if currentNode == nil {
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
|
||||
result.addInternalError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_NULL,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context)
|
||||
v.validateCommon(currentSubSchema, currentNode, result, context)
|
||||
|
||||
} else { // Not a null value
|
||||
|
||||
if isJSONNumber(currentNode) {
|
||||
|
||||
value := currentNode.(json.Number)
|
||||
|
||||
isInt := checkJSONInteger(value)
|
||||
|
||||
validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER))
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !validType {
|
||||
|
||||
givenType := TYPE_INTEGER
|
||||
if !isInt {
|
||||
givenType = TYPE_NUMBER
|
||||
}
|
||||
|
||||
result.addInternalError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": givenType,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
|
||||
v.validateNumber(currentSubSchema, value, result, context)
|
||||
v.validateCommon(currentSubSchema, value, result, context)
|
||||
v.validateString(currentSubSchema, value, result, context)
|
||||
|
||||
} else {
|
||||
|
||||
rValue := reflect.ValueOf(currentNode)
|
||||
rKind := rValue.Kind()
|
||||
|
||||
switch rKind {
|
||||
|
||||
// Slice => JSON array
|
||||
|
||||
case reflect.Slice:
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
|
||||
result.addInternalError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_ARRAY,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
castCurrentNode := currentNode.([]interface{})
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
v.validateArray(currentSubSchema, castCurrentNode, result, context)
|
||||
v.validateCommon(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
// Map => JSON object
|
||||
|
||||
case reflect.Map:
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
|
||||
result.addInternalError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_OBJECT,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
castCurrentNode, ok := currentNode.(map[string]interface{})
|
||||
if !ok {
|
||||
castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
|
||||
}
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
v.validateObject(currentSubSchema, castCurrentNode, result, context)
|
||||
v.validateCommon(currentSubSchema, castCurrentNode, result, context)
|
||||
|
||||
for _, pSchema := range currentSubSchema.propertiesChildren {
|
||||
nextNode, ok := castCurrentNode[pSchema.property]
|
||||
if ok {
|
||||
subContext := NewJsonContext(pSchema.property, context)
|
||||
v.validateRecursive(pSchema, nextNode, result, subContext)
|
||||
}
|
||||
}
|
||||
|
||||
// Simple JSON values : string, number, boolean
|
||||
|
||||
case reflect.Bool:
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
|
||||
result.addInternalError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_BOOLEAN,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
value := currentNode.(bool)
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
|
||||
v.validateNumber(currentSubSchema, value, result, context)
|
||||
v.validateCommon(currentSubSchema, value, result, context)
|
||||
v.validateString(currentSubSchema, value, result, context)
|
||||
|
||||
case reflect.String:
|
||||
|
||||
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
|
||||
result.addInternalError(
|
||||
new(InvalidTypeError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{
|
||||
"expected": currentSubSchema.types.String(),
|
||||
"given": TYPE_STRING,
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
value := currentNode.(string)
|
||||
|
||||
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
|
||||
v.validateNumber(currentSubSchema, value, result, context)
|
||||
v.validateCommon(currentSubSchema, value, result, context)
|
||||
v.validateString(currentSubSchema, value, result, context)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
// Different kinds of validation there, subSchema / common / array / object / string...
|
||||
func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateSchema %s", context.String())
|
||||
internalLog(" %v", currentNode)
|
||||
}
|
||||
|
||||
if len(currentSubSchema.anyOf) > 0 {
|
||||
|
||||
validatedAnyOf := false
|
||||
var bestValidationResult *Result
|
||||
|
||||
for _, anyOfSchema := range currentSubSchema.anyOf {
|
||||
if !validatedAnyOf {
|
||||
validationResult := anyOfSchema.subValidateWithContext(currentNode, context)
|
||||
validatedAnyOf = validationResult.Valid()
|
||||
|
||||
if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
|
||||
bestValidationResult = validationResult
|
||||
}
|
||||
}
|
||||
}
|
||||
if !validatedAnyOf {
|
||||
|
||||
result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
|
||||
|
||||
if bestValidationResult != nil {
|
||||
// add error messages of closest matching subSchema as
|
||||
// that's probably the one the user was trying to match
|
||||
result.mergeErrors(bestValidationResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(currentSubSchema.oneOf) > 0 {
|
||||
|
||||
nbValidated := 0
|
||||
var bestValidationResult *Result
|
||||
|
||||
for _, oneOfSchema := range currentSubSchema.oneOf {
|
||||
validationResult := oneOfSchema.subValidateWithContext(currentNode, context)
|
||||
if validationResult.Valid() {
|
||||
nbValidated++
|
||||
} else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
|
||||
bestValidationResult = validationResult
|
||||
}
|
||||
}
|
||||
|
||||
if nbValidated != 1 {
|
||||
|
||||
result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
|
||||
|
||||
if nbValidated == 0 {
|
||||
// add error messages of closest matching subSchema as
|
||||
// that's probably the one the user was trying to match
|
||||
result.mergeErrors(bestValidationResult)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(currentSubSchema.allOf) > 0 {
|
||||
nbValidated := 0
|
||||
|
||||
for _, allOfSchema := range currentSubSchema.allOf {
|
||||
validationResult := allOfSchema.subValidateWithContext(currentNode, context)
|
||||
if validationResult.Valid() {
|
||||
nbValidated++
|
||||
}
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
|
||||
if nbValidated != len(currentSubSchema.allOf) {
|
||||
result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
|
||||
}
|
||||
}
|
||||
|
||||
if currentSubSchema.not != nil {
|
||||
validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
|
||||
if validationResult.Valid() {
|
||||
result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{})
|
||||
}
|
||||
}
|
||||
|
||||
if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
|
||||
if isKind(currentNode, reflect.Map) {
|
||||
for elementKey := range currentNode.(map[string]interface{}) {
|
||||
if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
|
||||
switch dependency := dependency.(type) {
|
||||
|
||||
case []string:
|
||||
for _, dependOnKey := range dependency {
|
||||
if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
|
||||
result.addInternalError(
|
||||
new(MissingDependencyError),
|
||||
context,
|
||||
currentNode,
|
||||
ErrorDetails{"dependency": dependOnKey},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
case *subSchema:
|
||||
dependency.validateRecursive(dependency, currentNode, result, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if currentSubSchema._if != nil {
|
||||
validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context)
|
||||
if currentSubSchema._then != nil && validationResultIf.Valid() {
|
||||
validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context)
|
||||
if !validationResultThen.Valid() {
|
||||
result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{})
|
||||
result.mergeErrors(validationResultThen)
|
||||
}
|
||||
}
|
||||
if currentSubSchema._else != nil && !validationResultIf.Valid() {
|
||||
validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context)
|
||||
if !validationResultElse.Valid() {
|
||||
result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{})
|
||||
result.mergeErrors(validationResultElse)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateCommon %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
// const:
|
||||
if currentSubSchema._const != nil {
|
||||
vString, err := marshalWithoutNumber(value)
|
||||
if err != nil {
|
||||
result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
|
||||
}
|
||||
if *vString != *currentSubSchema._const {
|
||||
result.addInternalError(new(ConstError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{
|
||||
"allowed": *currentSubSchema._const,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// enum:
|
||||
if len(currentSubSchema.enum) > 0 {
|
||||
vString, err := marshalWithoutNumber(value)
|
||||
if err != nil {
|
||||
result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
|
||||
}
|
||||
if !isStringInSlice(currentSubSchema.enum, *vString) {
|
||||
result.addInternalError(
|
||||
new(EnumError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{
|
||||
"allowed": strings.Join(currentSubSchema.enum, ", "),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateArray %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
nbValues := len(value)
|
||||
|
||||
// TODO explain
|
||||
if currentSubSchema.itemsChildrenIsSingleSchema {
|
||||
for i := range value {
|
||||
subContext := NewJsonContext(strconv.Itoa(i), context)
|
||||
validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
} else {
|
||||
if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 {
|
||||
|
||||
nbItems := len(currentSubSchema.itemsChildren)
|
||||
|
||||
// while we have both schemas and values, check them against each other
|
||||
for i := 0; i != nbItems && i != nbValues; i++ {
|
||||
subContext := NewJsonContext(strconv.Itoa(i), context)
|
||||
validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
|
||||
if nbItems < nbValues {
|
||||
// we have less schemas than elements in the instance array,
|
||||
// but that might be ok if "additionalItems" is specified.
|
||||
|
||||
switch currentSubSchema.additionalItems.(type) {
|
||||
case bool:
|
||||
if !currentSubSchema.additionalItems.(bool) {
|
||||
result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
|
||||
}
|
||||
case *subSchema:
|
||||
additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
|
||||
for i := nbItems; i != nbValues; i++ {
|
||||
subContext := NewJsonContext(strconv.Itoa(i), context)
|
||||
validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// minItems & maxItems
|
||||
if currentSubSchema.minItems != nil {
|
||||
if nbValues < int(*currentSubSchema.minItems) {
|
||||
result.addInternalError(
|
||||
new(ArrayMinItemsError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"min": *currentSubSchema.minItems},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.maxItems != nil {
|
||||
if nbValues > int(*currentSubSchema.maxItems) {
|
||||
result.addInternalError(
|
||||
new(ArrayMaxItemsError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"max": *currentSubSchema.maxItems},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// uniqueItems:
|
||||
if currentSubSchema.uniqueItems {
|
||||
var stringifiedItems = make(map[string]int)
|
||||
for j, v := range value {
|
||||
vString, err := marshalWithoutNumber(v)
|
||||
if err != nil {
|
||||
result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err})
|
||||
}
|
||||
if i, ok := stringifiedItems[*vString]; ok {
|
||||
result.addInternalError(
|
||||
new(ItemsMustBeUniqueError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j},
|
||||
)
|
||||
}
|
||||
stringifiedItems[*vString] = j
|
||||
}
|
||||
}
|
||||
|
||||
// contains:
|
||||
|
||||
if currentSubSchema.contains != nil {
|
||||
validatedOne := false
|
||||
var bestValidationResult *Result
|
||||
|
||||
for i, v := range value {
|
||||
subContext := NewJsonContext(strconv.Itoa(i), context)
|
||||
|
||||
validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext)
|
||||
if validationResult.Valid() {
|
||||
validatedOne = true
|
||||
break
|
||||
} else {
|
||||
if bestValidationResult == nil || validationResult.score > bestValidationResult.score {
|
||||
bestValidationResult = validationResult
|
||||
}
|
||||
}
|
||||
}
|
||||
if !validatedOne {
|
||||
result.addInternalError(
|
||||
new(ArrayContainsError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{},
|
||||
)
|
||||
if bestValidationResult != nil {
|
||||
result.mergeErrors(bestValidationResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateObject %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
// minProperties & maxProperties:
|
||||
if currentSubSchema.minProperties != nil {
|
||||
if len(value) < int(*currentSubSchema.minProperties) {
|
||||
result.addInternalError(
|
||||
new(ArrayMinPropertiesError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"min": *currentSubSchema.minProperties},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.maxProperties != nil {
|
||||
if len(value) > int(*currentSubSchema.maxProperties) {
|
||||
result.addInternalError(
|
||||
new(ArrayMaxPropertiesError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"max": *currentSubSchema.maxProperties},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// required:
|
||||
for _, requiredProperty := range currentSubSchema.required {
|
||||
_, ok := value[requiredProperty]
|
||||
if ok {
|
||||
result.incrementScore()
|
||||
} else {
|
||||
result.addInternalError(
|
||||
new(RequiredError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"property": requiredProperty},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// additionalProperty & patternProperty:
|
||||
for pk := range value {
|
||||
|
||||
// Check whether this property is described by "properties"
|
||||
found := false
|
||||
for _, spValue := range currentSubSchema.propertiesChildren {
|
||||
if pk == spValue.property {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether this property is described by "patternProperties"
|
||||
ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
|
||||
|
||||
// If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties"
|
||||
if !found && !ppMatch {
|
||||
switch ap := currentSubSchema.additionalProperties.(type) {
|
||||
case bool:
|
||||
// Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema
|
||||
if !ap {
|
||||
result.addInternalError(
|
||||
new(AdditionalPropertyNotAllowedError),
|
||||
context,
|
||||
value[pk],
|
||||
ErrorDetails{"property": pk},
|
||||
)
|
||||
|
||||
}
|
||||
case *subSchema:
|
||||
validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context))
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// propertyNames:
|
||||
if currentSubSchema.propertyNames != nil {
|
||||
for pk := range value {
|
||||
validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context)
|
||||
if !validationResult.Valid() {
|
||||
result.addInternalError(new(InvalidPropertyNameError),
|
||||
context,
|
||||
value, ErrorDetails{
|
||||
"property": pk,
|
||||
})
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool {
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validatePatternProperty %s", context.String())
|
||||
internalLog(" %s %v", key, value)
|
||||
}
|
||||
|
||||
validated := false
|
||||
|
||||
for pk, pv := range currentSubSchema.patternProperties {
|
||||
if matches, _ := regexp.MatchString(pk, key); matches {
|
||||
validated = true
|
||||
subContext := NewJsonContext(key, context)
|
||||
validationResult := pv.subValidateWithContext(value, subContext)
|
||||
result.mergeErrors(validationResult)
|
||||
}
|
||||
}
|
||||
|
||||
if !validated {
|
||||
return false
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
return true
|
||||
}
|
||||
|
||||
func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
|
||||
|
||||
// Ignore JSON numbers
|
||||
if isJSONNumber(value) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore non strings
|
||||
if !isKind(value, reflect.String) {
|
||||
return
|
||||
}
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateString %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
stringValue := value.(string)
|
||||
|
||||
// minLength & maxLength:
|
||||
if currentSubSchema.minLength != nil {
|
||||
if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
|
||||
result.addInternalError(
|
||||
new(StringLengthGTEError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"min": *currentSubSchema.minLength},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.maxLength != nil {
|
||||
if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
|
||||
result.addInternalError(
|
||||
new(StringLengthLTEError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"max": *currentSubSchema.maxLength},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// pattern:
|
||||
if currentSubSchema.pattern != nil {
|
||||
if !currentSubSchema.pattern.MatchString(stringValue) {
|
||||
result.addInternalError(
|
||||
new(DoesNotMatchPatternError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"pattern": currentSubSchema.pattern},
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// format
|
||||
if currentSubSchema.format != "" {
|
||||
if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
|
||||
result.addInternalError(
|
||||
new(DoesNotMatchFormatError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"format": currentSubSchema.format},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
||||
|
||||
func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
|
||||
|
||||
// Ignore non numbers
|
||||
if !isJSONNumber(value) {
|
||||
return
|
||||
}
|
||||
|
||||
if internalLogEnabled {
|
||||
internalLog("validateNumber %s", context.String())
|
||||
internalLog(" %v", value)
|
||||
}
|
||||
|
||||
number := value.(json.Number)
|
||||
float64Value, _ := new(big.Rat).SetString(string(number))
|
||||
|
||||
// multipleOf:
|
||||
if currentSubSchema.multipleOf != nil {
|
||||
if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() {
|
||||
result.addInternalError(
|
||||
new(MultipleOfError),
|
||||
context,
|
||||
number,
|
||||
ErrorDetails{
|
||||
"multiple": new(big.Float).SetRat(currentSubSchema.multipleOf),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
//maximum & exclusiveMaximum:
|
||||
if currentSubSchema.maximum != nil {
|
||||
if float64Value.Cmp(currentSubSchema.maximum) == 1 {
|
||||
result.addInternalError(
|
||||
new(NumberLTEError),
|
||||
context,
|
||||
number,
|
||||
ErrorDetails{
|
||||
"max": new(big.Float).SetRat(currentSubSchema.maximum),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.exclusiveMaximum != nil {
|
||||
if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 {
|
||||
result.addInternalError(
|
||||
new(NumberLTError),
|
||||
context,
|
||||
number,
|
||||
ErrorDetails{
|
||||
"max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
//minimum & exclusiveMinimum:
|
||||
if currentSubSchema.minimum != nil {
|
||||
if float64Value.Cmp(currentSubSchema.minimum) == -1 {
|
||||
result.addInternalError(
|
||||
new(NumberGTEError),
|
||||
context,
|
||||
number,
|
||||
ErrorDetails{
|
||||
"min": new(big.Float).SetRat(currentSubSchema.minimum),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
if currentSubSchema.exclusiveMinimum != nil {
|
||||
if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 {
|
||||
result.addInternalError(
|
||||
new(NumberGTError),
|
||||
context,
|
||||
number,
|
||||
ErrorDetails{
|
||||
"min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// format
|
||||
if currentSubSchema.format != "" {
|
||||
if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) {
|
||||
result.addInternalError(
|
||||
new(DoesNotMatchFormatError),
|
||||
context,
|
||||
value,
|
||||
ErrorDetails{"format": currentSubSchema.format},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
result.incrementScore()
|
||||
}
|
1
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
1
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
@ -11,6 +11,7 @@ go:
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- "1.14.x"
|
||||
- "tip"
|
||||
|
||||
go_import_path: gopkg.in/yaml.v2
|
||||
|
6
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
6
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
@ -79,6 +79,8 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
var disableLineWrapping = false
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
@ -86,7 +88,9 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
best_width: -1,
|
||||
}
|
||||
if disableLineWrapping {
|
||||
emitter.best_width = -1
|
||||
}
|
||||
}
|
||||
|
||||
|
8
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
8
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
@ -1,5 +1,5 @@
|
||||
module "gopkg.in/yaml.v2"
|
||||
module gopkg.in/yaml.v2
|
||||
|
||||
require (
|
||||
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
|
||||
)
|
||||
go 1.15
|
||||
|
||||
require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405
|
||||
|
14
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
14
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
@ -175,7 +175,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
// Zero valued structs will be omitted if all their public
|
||||
// fields are zero, unless they implement an IsZero
|
||||
// method (see the IsZeroer interface type), in which
|
||||
// case the field will be included if that method returns true.
|
||||
// case the field will be excluded if IsZero returns true.
|
||||
//
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
@ -464,3 +464,15 @@ func isZero(v reflect.Value) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FutureLineWrap globally disables line wrapping when encoding long strings.
|
||||
// This is a temporary and thus deprecated method introduced to faciliate
|
||||
// migration towards v3, which offers more control of line lengths on
|
||||
// individual encodings, and has a default matching the behavior introduced
|
||||
// by this function.
|
||||
//
|
||||
// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
|
||||
// in v2.4.0, at which point this function was introduced to help migration.
|
||||
func FutureLineWrap() {
|
||||
disableLineWrapping = true
|
||||
}
|
||||
|
13
vendor/modules.txt
vendored
13
vendor/modules.txt
vendored
@ -281,6 +281,14 @@ github.com/subosito/gotenv
|
||||
github.com/syndtr/gocapability/capability
|
||||
# github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243
|
||||
github.com/willf/bitset
|
||||
# github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
|
||||
## explicit
|
||||
github.com/xeipuuv/gojsonpointer
|
||||
# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
|
||||
github.com/xeipuuv/gojsonreference
|
||||
# github.com/xeipuuv/gojsonschema v1.2.0
|
||||
## explicit
|
||||
github.com/xeipuuv/gojsonschema
|
||||
# go.opencensus.io v0.22.0
|
||||
go.opencensus.io
|
||||
go.opencensus.io/internal
|
||||
@ -409,7 +417,7 @@ gopkg.in/inf.v0
|
||||
# gopkg.in/ini.v1 v1.58.0
|
||||
## explicit
|
||||
gopkg.in/ini.v1
|
||||
# gopkg.in/yaml.v2 v2.3.0
|
||||
# gopkg.in/yaml.v2 v2.4.0
|
||||
## explicit
|
||||
gopkg.in/yaml.v2
|
||||
# gotest.tools v2.2.0+incompatible
|
||||
@ -480,5 +488,6 @@ k8s.io/client-go/util/keyutil
|
||||
k8s.io/klog
|
||||
# k8s.io/utils v0.0.0-20191114184206-e782cd3c129f
|
||||
k8s.io/utils/integer
|
||||
# sigs.k8s.io/yaml v1.1.0
|
||||
# sigs.k8s.io/yaml v1.2.0
|
||||
## explicit
|
||||
sigs.k8s.io/yaml
|
||||
|
15
vendor/sigs.k8s.io/yaml/.travis.yml
generated
vendored
15
vendor/sigs.k8s.io/yaml/.travis.yml
generated
vendored
@ -1,14 +1,13 @@
|
||||
language: go
|
||||
dist: xenial
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- diff -u <(echo -n) <(gofmt -d *.go)
|
||||
- diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
||||
- GO111MODULE=on go vet .
|
||||
- GO111MODULE=on go test -v -race ./...
|
||||
- git diff --exit-code
|
||||
install:
|
||||
- go get golang.org/x/lint/golint
|
||||
- GO111MODULE=off go get golang.org/x/lint/golint
|
||||
|
2
vendor/sigs.k8s.io/yaml/OWNERS
generated
vendored
2
vendor/sigs.k8s.io/yaml/OWNERS
generated
vendored
@ -1,3 +1,5 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dims
|
||||
- lavalamp
|
||||
|
14
vendor/sigs.k8s.io/yaml/README.md
generated
vendored
14
vendor/sigs.k8s.io/yaml/README.md
generated
vendored
@ -1,12 +1,14 @@
|
||||
# YAML marshaling and unmarshaling support for Go
|
||||
|
||||
[](https://travis-ci.org/ghodss/yaml)
|
||||
[](https://travis-ci.org/kubernetes-sigs/yaml)
|
||||
|
||||
kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml).
|
||||
|
||||
## Introduction
|
||||
|
||||
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||
|
||||
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||
|
||||
## Compatibility
|
||||
|
||||
@ -32,13 +34,13 @@ GOOD:
|
||||
To install, run:
|
||||
|
||||
```
|
||||
$ go get github.com/ghodss/yaml
|
||||
$ go get sigs.k8s.io/yaml
|
||||
```
|
||||
|
||||
And import using:
|
||||
|
||||
```
|
||||
import "github.com/ghodss/yaml"
|
||||
import "sigs.k8s.io/yaml"
|
||||
```
|
||||
|
||||
Usage is very similar to the JSON library:
|
||||
@ -49,7 +51,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type Person struct {
|
||||
@ -93,7 +95,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
8
vendor/sigs.k8s.io/yaml/go.mod
generated
vendored
Normal file
8
vendor/sigs.k8s.io/yaml/go.mod
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
module sigs.k8s.io/yaml
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
)
|
9
vendor/sigs.k8s.io/yaml/go.sum
generated
vendored
Normal file
9
vendor/sigs.k8s.io/yaml/go.sum
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
|
||||
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
61
vendor/sigs.k8s.io/yaml/yaml.go
generated
vendored
61
vendor/sigs.k8s.io/yaml/yaml.go
generated
vendored
@ -317,3 +317,64 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in
|
||||
return yamlObj, nil
|
||||
}
|
||||
}
|
||||
|
||||
// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice,
|
||||
// without going through a byte representation. A nil or empty map[string]interface{} input is
|
||||
// converted to an empty map, i.e. yaml.MapSlice(nil).
|
||||
//
|
||||
// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice.
|
||||
//
|
||||
// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml:
|
||||
// - float64s are down-casted as far as possible without data-loss to int, int64, uint64.
|
||||
// - int64s are down-casted to int if possible without data-loss.
|
||||
//
|
||||
// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case.
|
||||
//
|
||||
// string, bool and any other types are unchanged.
|
||||
func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice {
|
||||
if len(j) == 0 {
|
||||
return nil
|
||||
}
|
||||
ret := make(yaml.MapSlice, 0, len(j))
|
||||
for k, v := range j {
|
||||
ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)})
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func jsonToYAMLValue(j interface{}) interface{} {
|
||||
switch j := j.(type) {
|
||||
case map[string]interface{}:
|
||||
if j == nil {
|
||||
return interface{}(nil)
|
||||
}
|
||||
return JSONObjectToYAMLObject(j)
|
||||
case []interface{}:
|
||||
if j == nil {
|
||||
return interface{}(nil)
|
||||
}
|
||||
ret := make([]interface{}, len(j))
|
||||
for i := range j {
|
||||
ret[i] = jsonToYAMLValue(j[i])
|
||||
}
|
||||
return ret
|
||||
case float64:
|
||||
// replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151
|
||||
if i64 := int64(j); j == float64(i64) {
|
||||
if i := int(i64); i64 == int64(i) {
|
||||
return i
|
||||
}
|
||||
return i64
|
||||
}
|
||||
if ui64 := uint64(j); j == float64(ui64) {
|
||||
return ui64
|
||||
}
|
||||
return j
|
||||
case int64:
|
||||
if i := int(j); j == int64(i) {
|
||||
return i
|
||||
}
|
||||
return j
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user