Added HANA database plugin (#2811)

* Added HANA dynamic secret backend

* Added acceptance tests for HANA secret backend

* Add HANA backend as a logical backend to server

* Added documentation to HANA secret backend

* Added vendored libraries

* Go fmt

* Migrate hana credential creation to plugin

* Removed deprecated hana logical backend

* Migrated documentation for HANA database plugin

* Updated HANA DB plugin to use role name in credential generation

* Update HANA plugin tests

* If env vars are not configured, tests will skip rather than succeed

* Fixed some improperly named string variables

* Removed unused import

* Import SAP hdb driver
This commit is contained in:
Tony Cai 2017-07-07 13:11:23 -07:00 committed by Brian Kassouf
parent 7d592ecbff
commit f92f4d4972
94 changed files with 14274 additions and 0 deletions

View File

@ -2,6 +2,7 @@ package builtinplugins
import (
"github.com/hashicorp/vault/plugins/database/cassandra"
"github.com/hashicorp/vault/plugins/database/hana"
"github.com/hashicorp/vault/plugins/database/mongodb"
"github.com/hashicorp/vault/plugins/database/mssql"
"github.com/hashicorp/vault/plugins/database/mysql"
@ -22,6 +23,7 @@ var plugins map[string]BuiltinFactory = map[string]BuiltinFactory{
"mssql-database-plugin": mssql.New,
"cassandra-database-plugin": cassandra.New,
"mongodb-database-plugin": mongodb.New,
"hana-database-plugin": hana.New,
}
func Get(name string) (BuiltinFactory, bool) {

View File

@ -0,0 +1,21 @@
package main
import (
"log"
"os"
"github.com/hashicorp/vault/helper/pluginutil"
"github.com/hashicorp/vault/plugins/database/hana"
)
func main() {
apiClientMeta := &pluginutil.APIClientMeta{}
flags := apiClientMeta.FlagSet()
flags.Parse(os.Args)
err := hana.Run(apiClientMeta.GetTLSConfig())
if err != nil {
log.Println(err)
os.Exit(1)
}
}

View File

@ -0,0 +1,283 @@
package hana
import (
"database/sql"
"fmt"
"strings"
"time"
_ "github.com/SAP/go-hdb/driver"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/plugins"
"github.com/hashicorp/vault/plugins/helper/database/connutil"
"github.com/hashicorp/vault/plugins/helper/database/credsutil"
"github.com/hashicorp/vault/plugins/helper/database/dbutil"
)
const (
hanaTypeName = "hdb"
)
// HANA is an implementation of Database interface
type HANA struct {
connutil.ConnectionProducer
credsutil.CredentialsProducer
}
// New implements builtinplugins.BuiltinFactory
func New() (interface{}, error) {
connProducer := &connutil.SQLConnectionProducer{}
connProducer.Type = hanaTypeName
credsProducer := &credsutil.SQLCredentialsProducer{
DisplayNameLen: 32,
RoleNameLen: 20,
UsernameLen: 128,
Separator: "_",
}
dbType := &HANA{
ConnectionProducer: connProducer,
CredentialsProducer: credsProducer,
}
return dbType, nil
}
// Run instantiates a HANA object, and runs the RPC server for the plugin
func Run(apiTLSConfig *api.TLSConfig) error {
dbType, err := New()
if err != nil {
return err
}
plugins.Serve(dbType.(*HANA), apiTLSConfig)
return nil
}
// Type returns the TypeName for this backend
func (h *HANA) Type() (string, error) {
return hanaTypeName, nil
}
func (h *HANA) getConnection() (*sql.DB, error) {
db, err := h.Connection()
if err != nil {
return nil, err
}
return db.(*sql.DB), nil
}
// CreateUser generates the username/password on the underlying HANA secret backend
// as instructed by the CreationStatement provided.
func (h *HANA) CreateUser(statements dbplugin.Statements, usernameConfig dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) {
// Grab the lock
h.Lock()
defer h.Unlock()
// Get the connection
db, err := h.getConnection()
if err != nil {
return "", "", err
}
if statements.CreationStatements == "" {
return "", "", dbutil.ErrEmptyCreationStatement
}
// Generate username
username, err = h.GenerateUsername(usernameConfig)
if err != nil {
return "", "", err
}
// HANA does not allow hyphens in usernames, and highly prefers capital letters
username = strings.Replace(username, "-", "_", -1)
username = strings.ToUpper(username)
// Generate password
password, err = h.GeneratePassword()
if err != nil {
return "", "", err
}
// Most HANA configurations have password constraints
// Prefix with A1a to satisfy these constraints. User will be forced to change upon login
password = strings.Replace(password, "-", "_", -1)
password = "A1a" + password
// If expiration is in the role SQL, HANA will deactivate the user when time is up,
// regardless of whether vault is alive to revoke lease
expirationStr, err := h.GenerateExpiration(expiration)
if err != nil {
return "", "", err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return "", "", err
}
defer tx.Rollback()
// Execute each query
for _, query := range strutil.ParseArbitraryStringSlice(statements.CreationStatements, ";") {
query = strings.TrimSpace(query)
if len(query) == 0 {
continue
}
stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
"name": username,
"password": password,
"expiration": expirationStr,
}))
if err != nil {
return "", "", err
}
defer stmt.Close()
if _, err := stmt.Exec(); err != nil {
return "", "", err
}
}
// Commit the transaction
if err := tx.Commit(); err != nil {
return "", "", err
}
return username, password, nil
}
// Renewing hana user just means altering user's valid until property
func (h *HANA) RenewUser(statements dbplugin.Statements, username string, expiration time.Time) error {
// Get connection
db, err := h.getConnection()
if err != nil {
return err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
// If expiration is in the role SQL, HANA will deactivate the user when time is up,
// regardless of whether vault is alive to revoke lease
expirationStr, err := h.GenerateExpiration(expiration)
if err != nil {
return err
}
// Renew user's valid until property field
stmt, err := tx.Prepare("ALTER USER " + username + " VALID UNTIL " + "'" + expirationStr + "'")
if err != nil {
return err
}
defer stmt.Close()
if _, err := stmt.Exec(); err != nil {
return err
}
// Commit the transaction
if err := tx.Commit(); err != nil {
return err
}
return nil
}
// Revoking hana user will deactivate user and try to perform a soft drop
func (h *HANA) RevokeUser(statements dbplugin.Statements, username string) error {
// default revoke will be a soft drop on user
if statements.RevocationStatements == "" {
return h.revokeUserDefault(username)
}
// Get connection
db, err := h.getConnection()
if err != nil {
return err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
// Execute each query
for _, query := range strutil.ParseArbitraryStringSlice(statements.RevocationStatements, ";") {
query = strings.TrimSpace(query)
if len(query) == 0 {
continue
}
stmt, err := tx.Prepare(dbutil.QueryHelper(query, map[string]string{
"name": username,
}))
if err != nil {
return err
}
defer stmt.Close()
if _, err := stmt.Exec(); err != nil {
return err
}
}
// Commit the transaction
if err := tx.Commit(); err != nil {
return err
}
return nil
}
func (h *HANA) revokeUserDefault(username string) error {
// Get connection
db, err := h.getConnection()
if err != nil {
return err
}
// Start a transaction
tx, err := db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
// Disable server login for user
disableStmt, err := tx.Prepare(fmt.Sprintf("ALTER USER %s DEACTIVATE USER NOW", username))
if err != nil {
return err
}
defer disableStmt.Close()
if _, err := disableStmt.Exec(); err != nil {
return err
}
// Invalidates current sessions and performs soft drop (drop if no dependencies)
// if hard drop is desired, custom revoke statements should be written for role
dropStmt, err := tx.Prepare(fmt.Sprintf("DROP USER %s RESTRICT", username))
if err != nil {
return err
}
defer dropStmt.Close()
if _, err := dropStmt.Exec(); err != nil {
return err
}
// Commit transaction
if err := tx.Commit(); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,167 @@
package hana
import (
"database/sql"
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/plugins/helper/database/connutil"
)
func TestHANA_Initialize(t *testing.T) {
if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
t.SkipNow()
}
connURL := os.Getenv("HANA_URL")
connectionDetails := map[string]interface{}{
"connection_url": connURL,
}
dbRaw, _ := New()
db := dbRaw.(*HANA)
err := db.Initialize(connectionDetails, true)
if err != nil {
t.Fatalf("err: %s", err)
}
connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
if !connProducer.Initialized {
t.Fatal("Database should be initialized")
}
err = db.Close()
if err != nil {
t.Fatalf("err: %s", err)
}
}
// this test will leave a lingering user on the system
func TestHANA_CreateUser(t *testing.T) {
if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
t.SkipNow()
}
connURL := os.Getenv("HANA_URL")
connectionDetails := map[string]interface{}{
"connection_url": connURL,
}
dbRaw, _ := New()
db := dbRaw.(*HANA)
err := db.Initialize(connectionDetails, true)
if err != nil {
t.Fatalf("err: %s", err)
}
usernameConfig := dbplugin.UsernameConfig{
DisplayName: "test-test",
RoleName: "test-test",
}
// Test with no configured Creation Statememt
_, _, err = db.CreateUser(dbplugin.Statements{}, usernameConfig, time.Now().Add(time.Hour))
if err == nil {
t.Fatal("Expected error when no creation statement is provided")
}
statements := dbplugin.Statements{
CreationStatements: testHANARole,
}
username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
}
func TestHANA_RevokeUser(t *testing.T) {
if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" {
t.SkipNow()
}
connURL := os.Getenv("HANA_URL")
connectionDetails := map[string]interface{}{
"connection_url": connURL,
}
dbRaw, _ := New()
db := dbRaw.(*HANA)
err := db.Initialize(connectionDetails, true)
if err != nil {
t.Fatalf("err: %s", err)
}
statements := dbplugin.Statements{
CreationStatements: testHANARole,
}
usernameConfig := dbplugin.UsernameConfig{
DisplayName: "test-test",
RoleName: "test-test",
}
// Test default revoke statememts
username, password, err := db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
err = db.RevokeUser(statements, username)
if err != nil {
t.Fatalf("err: %s", err)
}
if err := testCredsExist(t, connURL, username, password); err == nil {
t.Fatal("Credentials were not revoked")
}
// Test custom revoke statememt
username, password, err = db.CreateUser(statements, usernameConfig, time.Now().Add(time.Hour))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
statements.RevocationStatements = testHANADrop
err = db.RevokeUser(statements, username)
if err != nil {
t.Fatalf("err: %s", err)
}
if err := testCredsExist(t, connURL, username, password); err == nil {
t.Fatal("Credentials were not revoked")
}
}
func testCredsExist(t testing.TB, connURL, username, password string) error {
// Log in with the new creds
parts := strings.Split(connURL, "@")
connURL = fmt.Sprintf("hdb://%s:%s@%s", username, password, parts[1])
db, err := sql.Open("hdb", connURL)
if err != nil {
return err
}
defer db.Close()
return db.Ping()
}
const testHANARole = `
CREATE USER {{name}} PASSWORD {{password}} VALID UNTIL '{{expiration}}';`
const testHANADrop = `
DROP USER {{name}} CASCADE;`

201
vendor/github.com/SAP/go-hdb/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

5
vendor/github.com/SAP/go-hdb/NOTICE generated vendored Normal file
View File

@ -0,0 +1,5 @@
SAP HANA Database driver for the Go Programming Language
Copyright 2014 SAP SE
This product includes software developed at
SAP SE (http://www.sap.com).

43
vendor/github.com/SAP/go-hdb/driver/bytes.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
/*
Copyright 2017 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"database/sql/driver"
)
// NullBytes represents an []byte that may be null.
// NullBytes implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullBytes struct {
Bytes []byte
Valid bool // Valid is true if Bytes is not NULL
}
// Scan implements the Scanner interface.
func (n *NullBytes) Scan(value interface{}) error {
n.Bytes, n.Valid = value.([]byte)
return nil
}
// Value implements the driver Valuer interface.
func (n NullBytes) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Bytes, nil
}

335
vendor/github.com/SAP/go-hdb/driver/converter.go generated vendored Normal file
View File

@ -0,0 +1,335 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"database/sql/driver"
"errors"
"fmt"
"math"
"reflect"
"time"
p "github.com/SAP/go-hdb/internal/protocol"
)
const (
minTinyint = 0
maxTinyint = math.MaxUint8
minSmallint = math.MinInt16
maxSmallint = math.MaxInt16
minInteger = math.MinInt32
maxInteger = math.MaxInt32
minBigint = math.MinInt64
maxBigint = math.MaxInt64
maxReal = math.MaxFloat32
maxDouble = math.MaxFloat64
)
// ErrorIntegerOutOfRange means that an integer exceeds the size of the hdb integer field.
var ErrIntegerOutOfRange = errors.New("integer out of range error")
// ErrorIntegerOutOfRange means that a float exceeds the size of the hdb float field.
var ErrFloatOutOfRange = errors.New("float out of range error")
var typeOfTime = reflect.TypeOf((*time.Time)(nil)).Elem()
var typeOfBytes = reflect.TypeOf((*[]byte)(nil)).Elem()
func columnConverter(dt p.DataType) driver.ValueConverter {
switch dt {
default:
return dbUnknownType{}
case p.DtTinyint:
return dbTinyint
case p.DtSmallint:
return dbSmallint
case p.DtInt:
return dbInt
case p.DtBigint:
return dbBigint
case p.DtReal:
return dbReal
case p.DtDouble:
return dbDouble
case p.DtTime:
return dbTime
case p.DtDecimal:
return dbDecimal
case p.DtString:
return dbString
case p.DtBytes:
return dbBytes
case p.DtLob:
return dbLob
}
}
// unknown type
type dbUnknownType struct{}
var _ driver.ValueConverter = dbUnknownType{} //check that type implements interface
func (t dbUnknownType) ConvertValue(v interface{}) (driver.Value, error) {
return nil, fmt.Errorf("column converter for data %v type %T is not implemented", v, v)
}
// int types
var dbTinyint = dbIntType{min: minTinyint, max: maxTinyint}
var dbSmallint = dbIntType{min: minSmallint, max: maxSmallint}
var dbInt = dbIntType{min: minInteger, max: maxInteger}
var dbBigint = dbIntType{min: minBigint, max: maxBigint}
type dbIntType struct {
min int64
max int64
}
var _ driver.ValueConverter = dbIntType{} //check that type implements interface
func (i dbIntType) ConvertValue(v interface{}) (driver.Value, error) {
if v == nil {
return v, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
i64 := rv.Int()
if i64 > i.max || i64 < i.min {
return nil, ErrIntegerOutOfRange
}
return i64, nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
u64 := rv.Uint()
if u64 > uint64(i.max) {
return nil, ErrIntegerOutOfRange
}
return int64(u64), nil
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
}
return i.ConvertValue(rv.Elem().Interface())
}
return nil, fmt.Errorf("unsupported integer conversion type error %T %v", v, v)
}
//float types
var dbReal = dbFloatType{max: maxReal}
var dbDouble = dbFloatType{max: maxDouble}
type dbFloatType struct {
max float64
}
var _ driver.ValueConverter = dbFloatType{} //check that type implements interface
func (f dbFloatType) ConvertValue(v interface{}) (driver.Value, error) {
if v == nil {
return v, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Float32, reflect.Float64:
f64 := rv.Float()
if math.Abs(f64) > f.max {
return nil, ErrFloatOutOfRange
}
return f64, nil
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
}
return f.ConvertValue(rv.Elem().Interface())
}
return nil, fmt.Errorf("unsupported float conversion type error %T %v", v, v)
}
//time
var dbTime = dbTimeType{}
type dbTimeType struct{}
var _ driver.ValueConverter = dbTimeType{} //check that type implements interface
func (t dbTimeType) ConvertValue(v interface{}) (driver.Value, error) {
if v == nil {
return nil, nil
}
switch v := v.(type) {
case time.Time:
return v, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
}
return t.ConvertValue(rv.Elem().Interface())
}
if rv.Type().ConvertibleTo(typeOfTime) {
tv := rv.Convert(typeOfTime)
return tv.Interface().(time.Time), nil
}
return nil, fmt.Errorf("unsupported time conversion type error %T %v", v, v)
}
//decimal
var dbDecimal = dbDecimalType{}
type dbDecimalType struct{}
var _ driver.ValueConverter = dbDecimalType{} //check that type implements interface
func (d dbDecimalType) ConvertValue(v interface{}) (driver.Value, error) {
if v == nil {
return nil, nil
}
if v, ok := v.([]byte); ok {
return v, nil
}
return nil, fmt.Errorf("unsupported decimal conversion type error %T %v", v, v)
}
//string
var dbString = dbStringType{}
type dbStringType struct{}
var _ driver.ValueConverter = dbStringType{} //check that type implements interface
func (d dbStringType) ConvertValue(v interface{}) (driver.Value, error) {
if v == nil {
return v, nil
}
switch v := v.(type) {
case string, []byte:
return v, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.String:
return rv.String(), nil
case reflect.Slice:
if rv.Type() == typeOfBytes {
return rv.Bytes(), nil
}
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
}
return d.ConvertValue(rv.Elem().Interface())
}
if rv.Type().ConvertibleTo(typeOfBytes) {
bv := rv.Convert(typeOfBytes)
return bv.Interface().([]byte), nil
}
return nil, fmt.Errorf("unsupported character conversion type error %T %v", v, v)
}
//bytes
var dbBytes = dbBytesType{}
type dbBytesType struct{}
var _ driver.ValueConverter = dbBytesType{} //check that type implements interface
func (d dbBytesType) ConvertValue(v interface{}) (driver.Value, error) {
if v == nil {
return v, nil
}
if v, ok := v.([]byte); ok {
return v, nil
}
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Slice:
if rv.Type() == typeOfBytes {
return rv.Bytes(), nil
}
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
}
return d.ConvertValue(rv.Elem().Interface())
}
if rv.Type().ConvertibleTo(typeOfBytes) {
bv := rv.Convert(typeOfBytes)
return bv.Interface().([]byte), nil
}
return nil, fmt.Errorf("unsupported bytes conversion type error %T %v", v, v)
}
//lob
var dbLob = dbLobType{}
type dbLobType struct{}
var _ driver.ValueConverter = dbLobType{} //check that type implements interface
func (d dbLobType) ConvertValue(v interface{}) (driver.Value, error) {
if v, ok := v.(int64); ok {
return v, nil
}
return nil, fmt.Errorf("unsupported lob conversion type error %T %v", v, v)
}

377
vendor/github.com/SAP/go-hdb/driver/decimal.go generated vendored Normal file
View File

@ -0,0 +1,377 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"database/sql/driver"
"errors"
"fmt"
"math"
"math/big"
"sync"
)
//bigint word size (*--> src/pkg/math/big/arith.go)
const (
// Compute the size _S of a Word in bytes.
_m = ^big.Word(0)
_logS = _m>>8&1 + _m>>16&1 + _m>>32&1
_S = 1 << _logS
)
const (
// http://en.wikipedia.org/wiki/Decimal128_floating-point_format
dec128Digits = 34
dec128Bias = 6176
dec128MinExp = -6176
dec128MaxExp = 6111
)
const (
decimalSize = 16 //number of bytes
)
var natZero = big.NewInt(0)
var natOne = big.NewInt(1)
var natTen = big.NewInt(10)
var nat = []*big.Int{
natOne, //10^0
natTen, //10^1
big.NewInt(100), //10^2
big.NewInt(1000), //10^3
big.NewInt(10000), //10^4
big.NewInt(100000), //10^5
big.NewInt(1000000), //10^6
big.NewInt(10000000), //10^7
big.NewInt(100000000), //10^8
big.NewInt(1000000000), //10^9
big.NewInt(10000000000), //10^10
}
const lg10 = math.Ln10 / math.Ln2 // ~log2(10)
var maxDecimal = new(big.Int).SetBytes([]byte{0x01, 0xED, 0x09, 0xBE, 0xAD, 0x87, 0xC0, 0x37, 0x8D, 0x8E, 0x63, 0xFF, 0xFF, 0xFF, 0xFF})
type decFlags byte
const (
dfNotExact decFlags = 1 << iota
dfOverflow
dfUnderflow
)
// ErrDecimalOutOfRange means that a big.Rat exceeds the size of hdb decimal fields.
var ErrDecimalOutOfRange = errors.New("decimal out of range error")
// big.Int free list
var bigIntFree = sync.Pool{
New: func() interface{} { return new(big.Int) },
}
// big.Rat free list
var bigRatFree = sync.Pool{
New: func() interface{} { return new(big.Rat) },
}
// A Decimal is the driver representation of a database decimal field value as big.Rat.
type Decimal big.Rat
// Scan implements the database/sql/Scanner interface.
func (d *Decimal) Scan(src interface{}) error {
b, ok := src.([]byte)
if !ok {
return fmt.Errorf("decimal: invalid data type %T", src)
}
if len(b) != decimalSize {
return fmt.Errorf("decimal: invalid size %d of %v - %d expected", len(b), b, decimalSize)
}
if (b[15] & 0x60) == 0x60 {
return fmt.Errorf("decimal: format (infinity, nan, ...) not supported : %v", b)
}
v := (*big.Rat)(d)
p := v.Num()
q := v.Denom()
neg, exp := decodeDecimal(b, p)
switch {
case exp < 0:
q.Set(exp10(exp * -1))
case exp == 0:
q.Set(natOne)
case exp > 0:
p.Mul(p, exp10(exp))
q.Set(natOne)
}
if neg {
v.Neg(v)
}
return nil
}
// Value implements the database/sql/Valuer interface.
func (d Decimal) Value() (driver.Value, error) {
m := bigIntFree.Get().(*big.Int)
neg, exp, df := convertRatToDecimal((*big.Rat)(&d), m, dec128Digits, dec128MinExp, dec128MaxExp)
var v driver.Value
var err error
switch {
default:
v, err = encodeDecimal(m, neg, exp)
case df&dfUnderflow != 0: // set to zero
m.Set(natZero)
v, err = encodeDecimal(m, false, 0)
case df&dfOverflow != 0:
err = ErrDecimalOutOfRange
}
// performance (avoid expensive defer)
bigIntFree.Put(m)
return v, err
}
func convertRatToDecimal(x *big.Rat, m *big.Int, digits, minExp, maxExp int) (bool, int, decFlags) {
neg := x.Sign() < 0 //store sign
if x.Num().Cmp(natZero) == 0 { // zero
m.Set(natZero)
return neg, 0, 0
}
c := bigRatFree.Get().(*big.Rat).Abs(x) // copy && abs
a := c.Num()
b := c.Denom()
exp, shift := 0, 0
if c.IsInt() {
exp = digits10(a) - 1
} else {
shift = digits10(a) - digits10(b)
switch {
case shift < 0:
a.Mul(a, exp10(shift*-1))
case shift > 0:
b.Mul(b, exp10(shift))
}
if a.Cmp(b) == -1 {
exp = shift - 1
} else {
exp = shift
}
}
var df decFlags
switch {
default:
exp = max(exp-digits+1, minExp)
case exp < minExp:
df |= dfUnderflow
exp = exp - digits + 1
}
if exp > maxExp {
df |= dfOverflow
}
shift = exp - shift
switch {
case shift < 0:
a.Mul(a, exp10(shift*-1))
case exp > 0:
b.Mul(b, exp10(shift))
}
m.QuoRem(a, b, a) // reuse a as rest
if a.Cmp(natZero) != 0 {
// round (business >= 0.5 up)
df |= dfNotExact
if a.Add(a, a).Cmp(b) >= 0 {
m.Add(m, natOne)
if m.Cmp(exp10(digits)) == 0 {
shift := min(digits, maxExp-exp)
if shift < 1 { // overflow -> shift one at minimum
df |= dfOverflow
shift = 1
}
m.Set(exp10(digits - shift))
exp += shift
}
}
}
// norm
for exp < maxExp {
a.QuoRem(m, natTen, b) // reuse a, b
if b.Cmp(natZero) != 0 {
break
}
m.Set(a)
exp++
}
// performance (avoid expensive defer)
bigRatFree.Put(c)
return neg, exp, df
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
// performance: tested with reference work variable
// - but int.Set is expensive, so let's live with big.Int creation for n >= len(nat)
func exp10(n int) *big.Int {
if n < len(nat) {
return nat[n]
}
r := big.NewInt(int64(n))
return r.Exp(natTen, r, nil)
}
func digits10(p *big.Int) int {
k := p.BitLen() // 2^k <= p < 2^(k+1) - 1
//i := int(float64(k) / lg10) //minimal digits base 10
//i := int(float64(k) / lg10) //minimal digits base 10
i := k * 100 / 332
if i < 1 {
i = 1
}
for ; ; i++ {
if p.Cmp(exp10(i)) < 0 {
return i
}
}
}
func decodeDecimal(b []byte, m *big.Int) (bool, int) {
neg := (b[15] & 0x80) != 0
exp := int((((uint16(b[15])<<8)|uint16(b[14]))<<1)>>2) - dec128Bias
b14 := b[14] // save b[14]
b[14] &= 0x01 // keep the mantissa bit (rest: sign and exp)
//most significand byte
msb := 14
for msb > 0 {
if b[msb] != 0 {
break
}
msb--
}
//calc number of words
numWords := (msb / _S) + 1
w := make([]big.Word, numWords)
k := numWords - 1
d := big.Word(0)
for i := msb; i >= 0; i-- {
d |= big.Word(b[i])
if k*_S == i {
w[k] = d
k--
d = 0
}
d <<= 8
}
b[14] = b14 // restore b[14]
m.SetBits(w)
return neg, exp
}
func encodeDecimal(m *big.Int, neg bool, exp int) (driver.Value, error) {
b := make([]byte, decimalSize)
// little endian bigint words (significand) -> little endian db decimal format
j := 0
for _, d := range m.Bits() {
for i := 0; i < 8; i++ {
b[j] = byte(d)
d >>= 8
j++
}
}
exp += dec128Bias
b[14] |= (byte(exp) << 1)
b[15] = byte(uint16(exp) >> 7)
if neg {
b[15] |= 0x80
}
return b, nil
}
// NullDecimal represents an Decimal that may be null.
// NullDecimal implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullDecimal struct {
Decimal *Decimal
Valid bool // Valid is true if Decimal is not NULL
}
// Scan implements the Scanner interface.
func (n *NullDecimal) Scan(value interface{}) error {
var b []byte
b, n.Valid = value.([]byte)
if !n.Valid {
return nil
}
if n.Decimal == nil {
return fmt.Errorf("invalid decimal value %v", n.Decimal)
}
return n.Decimal.Scan(b)
}
// Value implements the driver Valuer interface.
func (n NullDecimal) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
if n.Decimal == nil {
return nil, fmt.Errorf("invalid decimal value %v", n.Decimal)
}
return n.Decimal.Value()
}

18
vendor/github.com/SAP/go-hdb/driver/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package driver is a native Go SAP HANA driver implementation for the database/sql package.
package driver

617
vendor/github.com/SAP/go-hdb/driver/driver.go generated vendored Normal file
View File

@ -0,0 +1,617 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"database/sql"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"io"
"regexp"
"sync"
"github.com/SAP/go-hdb/driver/sqltrace"
p "github.com/SAP/go-hdb/internal/protocol"
)
// DriverVersion is the version number of the hdb driver.
const DriverVersion = "0.9"
// DriverName is the driver name to use with sql.Open for hdb databases.
const DriverName = "hdb"
func init() {
sql.Register(DriverName, &drv{})
}
var reBulk = regexp.MustCompile("(?i)^(\\s)*(bulk +)(.*)")
func checkBulkInsert(sql string) (string, bool) {
if reBulk.MatchString(sql) {
return reBulk.ReplaceAllString(sql, "${3}"), true
}
return sql, false
}
var reCall = regexp.MustCompile("(?i)^(\\s)*(call +)(.*)")
func checkCallProcedure(sql string) bool {
return reCall.MatchString(sql)
}
var errProcTableQuery = errors.New("Invalid procedure table query")
// driver
type drv struct{}
func (d *drv) Open(dsn string) (driver.Conn, error) {
return newConn(dsn)
}
// database connection
type conn struct {
session *p.Session
}
func newConn(dsn string) (driver.Conn, error) {
sessionPrm, err := parseDSN(dsn)
if err != nil {
return nil, err
}
session, err := p.NewSession(sessionPrm)
if err != nil {
return nil, err
}
return &conn{session: session}, nil
}
func (c *conn) Prepare(query string) (driver.Stmt, error) {
if c.session.IsBad() {
return nil, driver.ErrBadConn
}
prepareQuery, bulkInsert := checkBulkInsert(query)
qt, id, parameterFieldSet, resultFieldSet, err := c.session.Prepare(prepareQuery)
if err != nil {
return nil, err
}
if bulkInsert {
return newBulkInsertStmt(c.session, prepareQuery, id, parameterFieldSet)
}
return newStmt(qt, c.session, prepareQuery, id, parameterFieldSet, resultFieldSet)
}
func (c *conn) Close() error {
c.session.Close()
return nil
}
func (c *conn) Begin() (driver.Tx, error) {
if c.session.IsBad() {
return nil, driver.ErrBadConn
}
if c.session.InTx() {
return nil, fmt.Errorf("nested transactions are not supported")
}
c.session.SetInTx(true)
return newTx(c.session), nil
}
// Exec implements the database/sql/driver/Execer interface.
func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) {
if c.session.IsBad() {
return nil, driver.ErrBadConn
}
if len(args) != 0 {
return nil, driver.ErrSkip //fast path not possible (prepare needed)
}
sqltrace.Traceln(query)
return c.session.ExecDirect(query)
}
// bug?: check args is performed indepently of queryer raising ErrSkip or not
// - leads to different behavior to prepare - stmt - execute default logic
// - seems to be the same for Execer interface
// Queryer implements the database/sql/driver/Queryer interface.
func (c *conn) Query(query string, args []driver.Value) (driver.Rows, error) {
if c.session.IsBad() {
return nil, driver.ErrBadConn
}
if len(args) != 0 {
return nil, driver.ErrSkip //fast path not possible (prepare needed)
}
// direct execution of call procedure
// - returns no parameter metadata (sps 82) but only field values
// --> let's take the 'prepare way' for stored procedures
if checkCallProcedure(query) {
return nil, driver.ErrSkip
}
sqltrace.Traceln(query)
id, idx, ok := decodeTableQuery(query)
if ok {
r := procedureCallResultStore.get(id)
if r == nil {
return nil, fmt.Errorf("invalid procedure table query %s", query)
}
return r.tableRows(int(idx))
}
id, meta, values, attributes, err := c.session.QueryDirect(query)
if err != nil {
return nil, err
}
if id == 0 { // non select query
return noResult, nil
}
return newQueryResult(c.session, id, meta, values, attributes)
}
//transaction
type tx struct {
session *p.Session
}
func newTx(session *p.Session) *tx {
return &tx{
session: session,
}
}
func (t *tx) Commit() error {
if t.session.IsBad() {
return driver.ErrBadConn
}
return t.session.Commit()
}
func (t *tx) Rollback() error {
if t.session.IsBad() {
return driver.ErrBadConn
}
return t.session.Rollback()
}
//statement
type stmt struct {
qt p.QueryType
session *p.Session
query string
id uint64
prmFieldSet *p.FieldSet
resultFieldSet *p.FieldSet
}
func newStmt(qt p.QueryType, session *p.Session, query string, id uint64, prmFieldSet *p.FieldSet, resultFieldSet *p.FieldSet) (*stmt, error) {
return &stmt{qt: qt, session: session, query: query, id: id, prmFieldSet: prmFieldSet, resultFieldSet: resultFieldSet}, nil
}
func (s *stmt) Close() error {
return s.session.DropStatementID(s.id)
}
func (s *stmt) NumInput() int {
return s.prmFieldSet.NumInputField()
}
func (s *stmt) Exec(args []driver.Value) (driver.Result, error) {
if s.session.IsBad() {
return nil, driver.ErrBadConn
}
numField := s.prmFieldSet.NumInputField()
if len(args) != numField {
return nil, fmt.Errorf("invalid number of arguments %d - %d expected", len(args), numField)
}
sqltrace.Tracef("%s %v", s.query, args)
return s.session.Exec(s.id, s.prmFieldSet, args)
}
func (s *stmt) Query(args []driver.Value) (driver.Rows, error) {
if s.session.IsBad() {
return nil, driver.ErrBadConn
}
switch s.qt {
default:
rows, err := s.defaultQuery(args)
return rows, err
case p.QtProcedureCall:
rows, err := s.procedureCall(args)
return rows, err
}
}
func (s *stmt) defaultQuery(args []driver.Value) (driver.Rows, error) {
sqltrace.Tracef("%s %v", s.query, args)
rid, values, attributes, err := s.session.Query(s.id, s.prmFieldSet, s.resultFieldSet, args)
if err != nil {
return nil, err
}
if rid == 0 { // non select query
return noResult, nil
}
return newQueryResult(s.session, rid, s.resultFieldSet, values, attributes)
}
func (s *stmt) procedureCall(args []driver.Value) (driver.Rows, error) {
sqltrace.Tracef("%s %v", s.query, args)
fieldValues, tableResults, err := s.session.Call(s.id, s.prmFieldSet, args)
if err != nil {
return nil, err
}
return newProcedureCallResult(s.session, s.prmFieldSet, fieldValues, tableResults)
}
func (s *stmt) ColumnConverter(idx int) driver.ValueConverter {
return columnConverter(s.prmFieldSet.DataType(idx))
}
// bulk insert statement
type bulkInsertStmt struct {
session *p.Session
query string
id uint64
parameterFieldSet *p.FieldSet
numArg int
args []driver.Value
}
func newBulkInsertStmt(session *p.Session, query string, id uint64, parameterFieldSet *p.FieldSet) (*bulkInsertStmt, error) {
return &bulkInsertStmt{session: session, query: query, id: id, parameterFieldSet: parameterFieldSet, args: make([]driver.Value, 0)}, nil
}
func (s *bulkInsertStmt) Close() error {
return s.session.DropStatementID(s.id)
}
func (s *bulkInsertStmt) NumInput() int {
return -1
}
func (s *bulkInsertStmt) Exec(args []driver.Value) (driver.Result, error) {
if s.session.IsBad() {
return nil, driver.ErrBadConn
}
sqltrace.Tracef("%s %v", s.query, args)
if args == nil || len(args) == 0 {
return s.execFlush()
}
return s.execBuffer(args)
}
func (s *bulkInsertStmt) execFlush() (driver.Result, error) {
if s.numArg == 0 {
return driver.ResultNoRows, nil
}
result, err := s.session.Exec(s.id, s.parameterFieldSet, s.args)
s.args = s.args[:0]
s.numArg = 0
return result, err
}
func (s *bulkInsertStmt) execBuffer(args []driver.Value) (driver.Result, error) {
numField := s.parameterFieldSet.NumInputField()
if len(args) != numField {
return nil, fmt.Errorf("invalid number of arguments %d - %d expected", len(args), numField)
}
var result driver.Result = driver.ResultNoRows
var err error
if s.numArg == maxSmallint { // TODO: check why bigArgument count does not work
result, err = s.execFlush()
}
s.args = append(s.args, args...)
s.numArg++
return result, err
}
func (s *bulkInsertStmt) Query(args []driver.Value) (driver.Rows, error) {
return nil, fmt.Errorf("query not allowed in context of bulk insert statement %s", s.query)
}
func (s *bulkInsertStmt) ColumnConverter(idx int) driver.ValueConverter {
return columnConverter(s.parameterFieldSet.DataType(idx))
}
// driver.Rows drop-in replacement if driver Query or QueryRow is used for statements that doesn't return rows
var noColumns = []string{}
var noResult = new(noResultType)
type noResultType struct{}
func (r *noResultType) Columns() []string { return noColumns }
func (r *noResultType) Close() error { return nil }
func (r *noResultType) Next(dest []driver.Value) error { return io.EOF }
// query result
type queryResult struct {
session *p.Session
id uint64
fieldSet *p.FieldSet
fieldValues *p.FieldValues
pos int
attrs p.PartAttributes
columns []string
lastErr error
}
func newQueryResult(session *p.Session, id uint64, fieldSet *p.FieldSet, fieldValues *p.FieldValues, attrs p.PartAttributes) (driver.Rows, error) {
columns := make([]string, fieldSet.NumOutputField())
if err := fieldSet.OutputNames(columns); err != nil {
return nil, err
}
return &queryResult{
session: session,
id: id,
fieldSet: fieldSet,
fieldValues: fieldValues,
attrs: attrs,
columns: columns,
}, nil
}
func (r *queryResult) Columns() []string {
return r.columns
}
func (r *queryResult) Close() error {
// if lastError is set, attrs are nil
if r.lastErr != nil {
return r.lastErr
}
if !r.attrs.ResultsetClosed() {
return r.session.CloseResultsetID(r.id)
}
return nil
}
func (r *queryResult) Next(dest []driver.Value) error {
if r.session.IsBad() {
return driver.ErrBadConn
}
if r.pos >= r.fieldValues.NumRow() {
if r.attrs.LastPacket() {
return io.EOF
}
var err error
if r.fieldValues, r.attrs, err = r.session.FetchNext(r.id, r.fieldSet); err != nil {
r.lastErr = err //fieldValues and attrs are nil
return err
}
if r.attrs.NoRows() {
return io.EOF
}
r.pos = 0
}
r.fieldValues.Row(r.pos, dest)
r.pos++
return nil
}
//call result store
type callResultStore struct {
mu sync.RWMutex
store map[uint64]*procedureCallResult
cnt uint64
free []uint64
}
func (s *callResultStore) get(k uint64) *procedureCallResult {
s.mu.RLock()
defer s.mu.RUnlock()
if r, ok := s.store[k]; ok {
return r
}
return nil
}
func (s *callResultStore) add(v *procedureCallResult) uint64 {
s.mu.Lock()
defer s.mu.Unlock()
var k uint64
if s.free == nil || len(s.free) == 0 {
s.cnt++
k = s.cnt
} else {
size := len(s.free)
k = s.free[size-1]
s.free = s.free[:size-1]
}
if s.store == nil {
s.store = make(map[uint64]*procedureCallResult)
}
s.store[k] = v
return k
}
func (s *callResultStore) del(k uint64) {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.store, k)
if s.free == nil {
s.free = []uint64{k}
} else {
s.free = append(s.free, k)
}
}
var procedureCallResultStore = new(callResultStore)
//procedure call result
type procedureCallResult struct {
id uint64
session *p.Session
fieldSet *p.FieldSet
fieldValues *p.FieldValues
_tableRows []driver.Rows
columns []string
eof error
}
func newProcedureCallResult(session *p.Session, fieldSet *p.FieldSet, fieldValues *p.FieldValues, tableResults []*p.TableResult) (driver.Rows, error) {
fieldIdx := fieldSet.NumOutputField()
columns := make([]string, fieldIdx+len(tableResults))
if err := fieldSet.OutputNames(columns); err != nil {
return nil, err
}
tableRows := make([]driver.Rows, len(tableResults))
for i, tableResult := range tableResults {
var err error
if tableRows[i], err = newQueryResult(session, tableResult.ID(), tableResult.FieldSet(), tableResult.FieldValues(), tableResult.Attrs()); err != nil {
return nil, err
}
columns[fieldIdx] = fmt.Sprintf("table %d", i)
fieldIdx++
}
result := &procedureCallResult{
session: session,
fieldSet: fieldSet,
fieldValues: fieldValues,
_tableRows: tableRows,
columns: columns,
}
id := procedureCallResultStore.add(result)
result.id = id
return result, nil
}
func (r *procedureCallResult) Columns() []string {
return r.columns
}
func (r *procedureCallResult) Close() error {
procedureCallResultStore.del(r.id)
return nil
}
func (r *procedureCallResult) Next(dest []driver.Value) error {
if r.session.IsBad() {
return driver.ErrBadConn
}
if r.eof != nil {
return r.eof
}
if r.fieldValues.NumRow() == 0 && len(r._tableRows) == 0 {
r.eof = io.EOF
return r.eof
}
if r.fieldValues.NumRow() != 0 {
r.fieldValues.Row(0, dest)
}
i := r.fieldSet.NumOutputField()
for j := range r._tableRows {
dest[i] = encodeTableQuery(r.id, uint64(j))
i++
}
r.eof = io.EOF
return nil
}
func (r *procedureCallResult) tableRows(idx int) (driver.Rows, error) {
if idx >= len(r._tableRows) {
return nil, fmt.Errorf("table row index %d exceeds maximun %d", idx, len(r._tableRows)-1)
}
return r._tableRows[idx], nil
}
// helper
const tableQueryPrefix = "@tq"
func encodeTableQuery(id, idx uint64) string {
start := len(tableQueryPrefix)
b := make([]byte, start+8+8)
copy(b, tableQueryPrefix)
binary.LittleEndian.PutUint64(b[start:start+8], id)
binary.LittleEndian.PutUint64(b[start+8:start+8+8], idx)
return string(b)
}
func decodeTableQuery(query string) (uint64, uint64, bool) {
size := len(query)
start := len(tableQueryPrefix)
if size != start+8+8 {
return 0, 0, false
}
if query[:start] != tableQueryPrefix {
return 0, 0, false
}
id := binary.LittleEndian.Uint64([]byte(query[start : start+8]))
idx := binary.LittleEndian.Uint64([]byte(query[start+8 : start+8+8]))
return id, idx, true
}

87
vendor/github.com/SAP/go-hdb/driver/dsn.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"net/url"
"strconv"
p "github.com/SAP/go-hdb/internal/protocol"
)
// DSN query parameters. For parameter client locale see http://help.sap.com/hana/SAP_HANA_SQL_Command_Network_Protocol_Reference_en.pdf.
const (
DSNLocale = "locale" // Client locale as described in the protocol reference.
DSNTimeout = "timeout" // Driver side connection timeout in seconds.
)
const (
dsnBufferSize = "bufferSize"
dsnFetchSize = "fetchSize"
)
// DSN query default values.
const (
DSNDefaultTimeout = 300 // Default value connection timeout (300 seconds = 5 minutes).
)
const (
dsnDefaultFetchSize = 128
)
/*
DSN is here for the purposes of documentation only. A DSN string is an URL string with the following format
hdb://<username>:<password>@<host address>:<port number>
and optional query parameters (see DSN query parameters and DSN query default values).
Example:
hdb://myuser:mypassword@localhost:30015?timeout=60
*/
type DSN string
func parseDSN(dsn string) (*p.SessionPrm, error) {
url, err := url.Parse(dsn)
if err != nil {
return nil, err
}
prm := &p.SessionPrm{Host: url.Host}
if url.User != nil {
prm.Username = url.User.Username()
prm.Password, _ = url.User.Password()
}
values := url.Query()
prm.BufferSize, _ = strconv.Atoi(values.Get(dsnBufferSize))
prm.FetchSize, err = strconv.Atoi(values.Get(dsnFetchSize))
if err != nil {
prm.FetchSize = dsnDefaultFetchSize
}
prm.Timeout, err = strconv.Atoi(values.Get(DSNTimeout))
if err != nil {
prm.Timeout = DSNDefaultTimeout
}
prm.Locale = values.Get(DSNLocale)
return prm, nil
}

32
vendor/github.com/SAP/go-hdb/driver/error.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
p "github.com/SAP/go-hdb/internal/protocol"
)
// Error represents errors send by the database server.
type Error interface {
Code() int // Code return the database error code.
Position() int // Position returns the start position of erroneous sql statements sent to the database server.
Level() p.ErrorLevel // Level return one of the database server predefined error levels.
Text() string // Text return the error description sent from database server.
IsWarning() bool // IsWarning returns true if the HDB error level equals 0.
IsError() bool // IsError returns true if the HDB error level equals 1.
IsFatal() bool // IsFatal returns true if the HDB error level equals 2.
}

49
vendor/github.com/SAP/go-hdb/driver/identifier.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"crypto/rand"
"fmt"
"io"
"regexp"
"strconv"
)
var reSimple = regexp.MustCompile("^[_A-Z][_#$A-Z0-9]*$")
// Identifier in hdb SQL statements like schema or table name.
type Identifier string
// Random Identifier returns a random Identifier prefixed by the prefix parameter.
// This function is used to generate database objects with random names for test and example code.
func RandomIdentifier(prefix string) Identifier {
b := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
panic(err.Error()) // rand should never fail
}
return Identifier(fmt.Sprintf(fmt.Sprintf("%s%x", prefix, b)))
}
// String implements Stringer interface.
func (i Identifier) String() string {
s := string(i)
if reSimple.MatchString(s) {
return s
}
return strconv.Quote(s)
}

83
vendor/github.com/SAP/go-hdb/driver/lob.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"database/sql/driver"
"errors"
"fmt"
"io"
p "github.com/SAP/go-hdb/internal/protocol"
)
// A Lob is the driver representation of a database large object field.
// A Lob object uses an io.Reader object as source for writing content to a database lob field.
// A Lob object uses an io.Writer object as destination for reading content from a database lob field.
// A Lob can be created by contructor method NewLob with io.Reader and io.Writer as parameters or
// created by new, setting io.Reader and io.Writer by SetReader and SetWriter methods.
type Lob struct {
rd io.Reader
wr io.Writer
writeDescr *p.LobWriteDescr
}
// NewLob creates a new Lob instance with the io.Reader and io.Writer given as parameters.
func NewLob(rd io.Reader, wr io.Writer) *Lob {
return &Lob{rd: rd, wr: wr}
}
// SetReader sets the io.Reader source for a lob field to be written to database.
func (l *Lob) SetReader(rd io.Reader) {
l.rd = rd
}
// SetWriter sets the io.Writer destination for a lob field to be read from database.
func (l *Lob) SetWriter(wr io.Writer) {
l.wr = wr
}
// Scan implements the database/sql/Scanner interface.
func (l *Lob) Scan(src interface{}) error {
if l.wr == nil {
return errors.New("lob error: initial writer")
}
ptr, ok := src.(int64)
if !ok {
return fmt.Errorf("lob: invalid pointer type %T", src)
}
descr := p.PointerToLobReadDescr(ptr)
if err := descr.SetWriter(l.wr); err != nil {
return err
}
return nil
}
// Value implements the database/sql/Valuer interface.
func (l *Lob) Value() (driver.Value, error) {
if l.rd == nil {
return nil, errors.New("lob error: initial reader")
}
if l.writeDescr == nil {
l.writeDescr = new(p.LobWriteDescr)
}
l.writeDescr.SetReader(l.rd)
return p.LobWriteDescrToPointer(l.writeDescr), nil
}

18
vendor/github.com/SAP/go-hdb/driver/sqltrace/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package sqltrace implements driver sql trace functions.
package sqltrace

View File

@ -0,0 +1,78 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sqltrace
import (
"flag"
"log"
"os"
"sync"
)
type sqlTrace struct {
mu sync.RWMutex //protects field on
on bool
*log.Logger
}
func newSqlTrace() *sqlTrace {
return &sqlTrace{
Logger: log.New(os.Stdout, "hdb ", log.Ldate|log.Ltime|log.Lshortfile),
}
}
var tracer = newSqlTrace()
func init() {
flag.BoolVar(&tracer.on, "hdb.sqlTrace", false, "enabling hdb sql trace")
}
// On returns if tracing methods output is active.
func On() bool {
tracer.mu.RLock()
on := tracer.on
tracer.mu.RUnlock()
return on
}
// SetOn sets tracing methods output active or inactive.
func SetOn(on bool) {
tracer.mu.Lock()
tracer.on = on
tracer.mu.Unlock()
}
// Trace calls trace logger Print method to print to the trace logger.
func Trace(v ...interface{}) {
if On() {
tracer.Print(v...)
}
}
// Trace calls trace logger Printf method to print to the trace logger.
func Tracef(format string, v ...interface{}) {
if On() {
tracer.Printf(format, v...)
}
}
// Traceln calls trace logger Println method to print to the trace logger.
func Traceln(v ...interface{}) {
if On() {
tracer.Println(v...)
}
}

44
vendor/github.com/SAP/go-hdb/driver/time.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"database/sql/driver"
"time"
)
// NullTime represents an time.Time that may be null.
// NullTime implements the Scanner interface so
// it can be used as a scan destination, similar to NullString.
type NullTime struct {
Time time.Time
Valid bool // Valid is true if Time is not NULL
}
// Scan implements the Scanner interface.
func (n *NullTime) Scan(value interface{}) error {
n.Time, n.Valid = value.(time.Time)
return nil
}
// Value implements the driver Valuer interface.
func (n NullTime) Value() (driver.Value, error) {
if !n.Valid {
return nil, nil
}
return n.Time, nil
}

329
vendor/github.com/SAP/go-hdb/internal/bufio/bufio.go generated vendored Normal file
View File

@ -0,0 +1,329 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package bufio implements buffered I/O for database read and writes on basis of the standard Go bufio package.
package bufio
import (
"bufio"
"encoding/binary"
"io"
"math"
"github.com/SAP/go-hdb/internal/unicode"
"golang.org/x/text/transform"
)
const (
bufferSize = 128
)
// Reader is a bufio.Reader extended by methods needed for hdb protocol.
type Reader struct {
*bufio.Reader
b []byte // scratch buffer (min 8 Bytes)
tr transform.Transformer
}
// NewReader creates a new Reader instance.
func NewReader(r io.Reader) *Reader {
return &Reader{
Reader: bufio.NewReader(r),
b: make([]byte, bufferSize),
tr: unicode.Cesu8ToUtf8Transformer,
}
}
// NewReaderSize creates a new Reader instance with given size for bufio.Reader.
func NewReaderSize(r io.Reader, size int) *Reader {
return &Reader{
Reader: bufio.NewReaderSize(r, size),
b: make([]byte, bufferSize),
tr: unicode.Cesu8ToUtf8Transformer,
}
}
// Skip skips cnt bytes from reading.
func (r *Reader) Skip(cnt int) error {
for i := 0; i < cnt; {
j := cnt - i
if j > len(r.b) {
j = len(r.b)
}
n, err := io.ReadFull(r.Reader, r.b[:j])
i += n
if err != nil {
return err
}
}
return nil
}
// ReadFull implements io.ReadFull on Reader.
func (r *Reader) ReadFull(p []byte) error {
_, err := io.ReadFull(r.Reader, p)
return err
}
// ReadBool reads and returns a boolean.
func (r *Reader) ReadBool() (bool, error) {
c, err := r.Reader.ReadByte()
if err != nil {
return false, err
}
if c == 0 {
return false, nil
}
return true, nil
}
// ReadInt8 reads and returns an int8.
func (r *Reader) ReadInt8() (int8, error) {
c, err := r.Reader.ReadByte()
if err != nil {
return 0, err
}
return int8(c), nil
}
// ReadInt16 reads and returns an int16.
func (r *Reader) ReadInt16() (int16, error) {
if _, err := io.ReadFull(r.Reader, r.b[:2]); err != nil {
return 0, err
}
return int16(binary.LittleEndian.Uint16(r.b[:2])), nil
}
// ReadUint16 reads and returns an uint16.
func (r *Reader) ReadUint16() (uint16, error) {
if _, err := io.ReadFull(r.Reader, r.b[:2]); err != nil {
return 0, err
}
return binary.LittleEndian.Uint16(r.b[:2]), nil
}
// ReadInt32 reads and returns an int32.
func (r *Reader) ReadInt32() (int32, error) {
if _, err := io.ReadFull(r.Reader, r.b[:4]); err != nil {
return 0, err
}
return int32(binary.LittleEndian.Uint32(r.b[:4])), nil
}
// ReadUint32 reads and returns an uint32.
func (r *Reader) ReadUint32() (uint32, error) {
if _, err := io.ReadFull(r.Reader, r.b[:4]); err != nil {
return 0, err
}
return binary.LittleEndian.Uint32(r.b[:4]), nil
}
// ReadInt64 reads and returns an int64.
func (r *Reader) ReadInt64() (int64, error) {
if _, err := io.ReadFull(r.Reader, r.b[:8]); err != nil {
return 0, err
}
return int64(binary.LittleEndian.Uint64(r.b[:8])), nil
}
// ReadUint64 reads and returns an uint64.
func (r *Reader) ReadUint64() (uint64, error) {
if _, err := io.ReadFull(r.Reader, r.b[:8]); err != nil {
return 0, err
}
return binary.LittleEndian.Uint64(r.b[:8]), nil
}
// ReadFloat32 reads and returns a float32.
func (r *Reader) ReadFloat32() (float32, error) {
if _, err := io.ReadFull(r.Reader, r.b[:4]); err != nil {
return 0, err
}
bits := binary.LittleEndian.Uint32(r.b[:4])
return math.Float32frombits(bits), nil
}
// ReadFloat64 reads and returns a float64.
func (r *Reader) ReadFloat64() (float64, error) {
if _, err := io.ReadFull(r.Reader, r.b[:8]); err != nil {
return 0, err
}
bits := binary.LittleEndian.Uint64(r.b[:8])
return math.Float64frombits(bits), nil
}
// ReadCesu8 reads a size CESU-8 encoded byte sequence and returns an UTF-8 byte slice.
func (r *Reader) ReadCesu8(size int) ([]byte, error) {
p := make([]byte, size)
if _, err := io.ReadFull(r.Reader, p); err != nil {
return nil, err
}
r.tr.Reset()
n, _, err := r.tr.Transform(p, p, true) // inplace transformation
if err != nil {
return nil, err
}
return p[:n], nil
}
// Writer is a bufio.Writer extended by methods needed for hdb protocol.
type Writer struct {
*bufio.Writer
b []byte // // scratch buffer (min 8 Bytes)
tr transform.Transformer
}
// NewWriter creates a new Writer instance.
func NewWriter(w io.Writer) *Writer {
return &Writer{
Writer: bufio.NewWriter(w),
b: make([]byte, bufferSize),
tr: unicode.Utf8ToCesu8Transformer,
}
}
// NewWriterSize creates a new Writer instance with given size for bufio.Writer.
func NewWriterSize(w io.Writer, size int) *Writer {
return &Writer{
Writer: bufio.NewWriterSize(w, size),
b: make([]byte, bufferSize),
tr: unicode.Utf8ToCesu8Transformer,
}
}
// WriteZeroes writes cnt zero byte values.
func (w *Writer) WriteZeroes(cnt int) error {
// zero out scratch area
l := cnt
if l > len(w.b) {
l = len(w.b)
}
for i := 0; i < l; i++ {
w.b[i] = 0
}
for i := 0; i < cnt; {
j := cnt - i
if j > len(w.b) {
j = len(w.b)
}
n, err := w.Writer.Write(w.b[:j])
i += n
if err != nil {
return err
}
}
return nil
}
// WriteBool writes a boolean.
func (w *Writer) WriteBool(v bool) error {
if v {
return w.Writer.WriteByte(1)
}
return w.Writer.WriteByte(0)
}
// WriteInt8 writes an int8.
func (w *Writer) WriteInt8(i int8) error {
return w.Writer.WriteByte(byte(i))
}
// WriteInt16 writes an int16.
func (w *Writer) WriteInt16(i int16) error {
binary.LittleEndian.PutUint16(w.b[:2], uint16(i))
_, err := w.Writer.Write(w.b[:2])
return err
}
// WriteUint16 writes an uint16.
func (w *Writer) WriteUint16(i uint16) error {
binary.LittleEndian.PutUint16(w.b[:2], i)
_, err := w.Writer.Write(w.b[:2])
return err
}
// WriteInt32 writes an int32.
func (w *Writer) WriteInt32(i int32) error {
binary.LittleEndian.PutUint32(w.b[:4], uint32(i))
_, err := w.Writer.Write(w.b[:4])
return err
}
// WriteUint32 writes an uint32.
func (w *Writer) WriteUint32(i uint32) error {
binary.LittleEndian.PutUint32(w.b[:4], i)
_, err := w.Writer.Write(w.b[:4])
return err
}
// WriteInt64 writes an int64.
func (w *Writer) WriteInt64(i int64) error {
binary.LittleEndian.PutUint64(w.b[:8], uint64(i))
_, err := w.Writer.Write(w.b[:8])
return err
}
// WriteUint64 writes an uint64.
func (w *Writer) WriteUint64(i uint64) error {
binary.LittleEndian.PutUint64(w.b[:8], i)
_, err := w.Writer.Write(w.b[:8])
return err
}
// WriteFloat32 writes a float32.
func (w *Writer) WriteFloat32(f float32) error {
bits := math.Float32bits(f)
binary.LittleEndian.PutUint32(w.b[:4], bits)
_, err := w.Writer.Write(w.b[:4])
return err
}
// WriteFloat64 writes a float64.
func (w *Writer) WriteFloat64(f float64) error {
bits := math.Float64bits(f)
binary.LittleEndian.PutUint64(w.b[:8], bits)
_, err := w.Writer.Write(w.b[:8])
return err
}
// WriteCesu8 writes an UTF-8 byte slice as CESU-8 and returns the CESU-8 bytes written.
func (w *Writer) WriteCesu8(p []byte) (int, error) {
w.tr.Reset()
cnt := 0
i := 0
for i < len(p) {
m, n, err := w.tr.Transform(w.b, p[i:], true)
if err != nil && err != transform.ErrShortDst {
return cnt, err
}
if m == 0 {
return cnt, transform.ErrShortDst
}
o, err := w.Writer.Write(w.b[:m])
cnt += o
if err != nil {
return cnt, err
}
i += n
}
return cnt, nil
}
// WriteStringCesu8 is like WriteCesu8 with an UTF-8 string as parameter.
func (w *Writer) WriteStringCesu8(s string) (int, error) {
return w.WriteCesu8([]byte(s))
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"os"
"strconv"
"strings"
"github.com/SAP/go-hdb/internal/bufio"
)
type clientID []byte
func newClientID() clientID {
if h, err := os.Hostname(); err == nil {
return clientID(strings.Join([]string{strconv.Itoa(os.Getpid()), h}, "@"))
}
return clientID(strconv.Itoa(os.Getpid()))
}
func (id clientID) kind() partKind {
return partKind(35) //TODO: extend part kind
}
func (id clientID) size() (int, error) {
return len(id), nil
}
func (id clientID) numArg() int {
return 1
}
func (id clientID) write(wr *bufio.Writer) error {
if _, err := wr.Write(id); err != nil {
return err
}
if trace {
outLogger.Printf("client id: %s", id)
}
return nil
}

View File

@ -0,0 +1,49 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"github.com/SAP/go-hdb/internal/bufio"
"github.com/SAP/go-hdb/internal/unicode/cesu8"
)
// cesu8 command
type command []byte
func (c command) kind() partKind {
return pkCommand
}
func (c command) size() (int, error) {
return cesu8.Size(c), nil
}
func (c command) numArg() int {
return 1
}
func (c command) write(wr *bufio.Writer) error {
if _, err := wr.WriteCesu8(c); err != nil {
return err
}
if trace {
outLogger.Printf("command: %s", c)
}
return nil
}

View File

@ -0,0 +1,49 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=connectOption
type connectOption int8
const (
coConnectionID connectOption = 1
coCompleteArrayExecution connectOption = 2
coClientLocale connectOption = 3
coSupportsLargeBulkOperations connectOption = 4
// docu: error field mentioned twice
//coDataFormatVersion2 connectOption = 5
coLargeNumberOfParameterSupport connectOption = 10
coSystemID connectOption = 11
// missing in docu
coDataFormatVersion connectOption = 12
coAbapVarcharMode connectOption = 13
coSelectForUpdateSupported connectOption = 14
coClientDistributionMode connectOption = 15
coEngineDataFormatVersion connectOption = 16
coDistributionProtocolVersion connectOption = 17
coSplitBatchCommands connectOption = 18
coUseTransactionFlagsOnly connectOption = 19
coRowAndColumnOptimizedFormat connectOption = 20
coIgnoreUnknownParts connectOption = 21
coTableOutputParameter connectOption = 22
coDataFormatVersion2 connectOption = 23
coItabParameter connectOption = 24
coDescribeTableOutputParameter connectOption = 25
coScrollablResultSet connectOption = 27
// docu? connectOption = 28 //boolean
)

View File

@ -0,0 +1,32 @@
// generated by stringer -type=connectOption; DO NOT EDIT
package protocol
import "fmt"
const (
_connectOption_name_0 = "coConnectionIDcoCompleteArrayExecutioncoClientLocalecoSupportsLargeBulkOperations"
_connectOption_name_1 = "coLargeNumberOfParameterSupportcoSystemIDcoDataFormatVersioncoAbapVarcharModecoSelectForUpdateSupportedcoClientDistributionModecoEngineDataFormatVersioncoDistributionProtocolVersioncoSplitBatchCommandscoUseTransactionFlagsOnlycoRowAndColumnOptimizedFormatcoIgnoreUnknownPartscoTableOutputParametercoDataFormatVersion2coItabParametercoDescribeTableOutputParameter"
_connectOption_name_2 = "coScrollablResultSet"
)
var (
_connectOption_index_0 = [...]uint8{0, 14, 38, 52, 81}
_connectOption_index_1 = [...]uint16{0, 31, 41, 60, 77, 103, 127, 152, 181, 201, 226, 255, 275, 297, 317, 332, 362}
_connectOption_index_2 = [...]uint8{0, 20}
)
func (i connectOption) String() string {
switch {
case 1 <= i && i <= 4:
i -= 1
return _connectOption_name_0[_connectOption_index_0[i]:_connectOption_index_0[i+1]]
case 10 <= i && i <= 25:
i -= 10
return _connectOption_name_1[_connectOption_index_1[i]:_connectOption_index_1[i+1]]
case i == 27:
return _connectOption_name_2
default:
return fmt.Sprintf("connectOption(%d)", i)
}
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
// data format version
const (
dfvBaseline intType = 1
dfvDoNotUse = 3
dfvSPS06 = 4 //see docu
dfvBINTEXT = 6
)
// client distribution mode
const (
cdmOff intType = 0
cdmConnection = 1
cdmStatement = 2
cdmConnectionStatement = 3
)
// distribution protocol version
const (
dpvBaseline = 0
dpvClientHandlesStatementSequence = 1
)
type connectOptions struct {
po plainOptions
_numArg int
}
func newConnectOptions() *connectOptions {
return &connectOptions{
po: plainOptions{},
}
}
func (o *connectOptions) String() string {
m := make(map[connectOption]interface{})
for k, v := range o.po {
m[connectOption(k)] = v
}
return fmt.Sprintf("%s", m)
}
func (o *connectOptions) kind() partKind {
return pkConnectOptions
}
func (o *connectOptions) size() (int, error) {
return o.po.size(), nil
}
func (o *connectOptions) numArg() int {
return len(o.po)
}
func (o *connectOptions) setNumArg(numArg int) {
o._numArg = numArg
}
func (o *connectOptions) set(k connectOption, v interface{}) {
o.po[int8(k)] = v
}
func (o *connectOptions) get(k connectOption) (interface{}, bool) {
v, ok := o.po[int8(k)]
return v, ok
}
func (o *connectOptions) read(rd *bufio.Reader) error {
if err := o.po.read(rd, o._numArg); err != nil {
return err
}
if trace {
outLogger.Printf("connect options: %v", o)
}
return nil
}
func (o *connectOptions) write(wr *bufio.Writer) error {
if err := o.po.write(wr); err != nil {
return err
}
if trace {
outLogger.Printf("connect options: %v", o)
}
return nil
}

View File

@ -0,0 +1,38 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=DataType
// DataType is the type definition for data types supported by this package.
type DataType byte
// Data type constants.
const (
DtUnknown DataType = iota // unknown data type
DtTinyint
DtSmallint
DtInt
DtBigint
DtReal
DtDouble
DtDecimal
DtTime
DtString
DtBytes
DtLob
)

View File

@ -0,0 +1,16 @@
// generated by stringer -type=DataType; DO NOT EDIT
package protocol
import "fmt"
const _DataType_name = "DtUnknownDtTinyintDtSmallintDtIntDtBigintDtRealDtDoubleDtDecimalDtTimeDtVarcharDtNvarcharDtLob"
var _DataType_index = [...]uint8{0, 9, 18, 28, 33, 41, 47, 55, 64, 70, 79, 89, 94}
func (i DataType) String() string {
if i >= DataType(len(_DataType_index)-1) {
return fmt.Sprintf("DataType(%d)", i)
}
return _DataType_name[_DataType_index[i]:_DataType_index[i+1]]
}

20
vendor/github.com/SAP/go-hdb/internal/protocol/doc.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package protocol implements the hdb command network protocol.
//
// http://help.sap.com/hana/SAP_HANA_SQL_Command_Network_Protocol_Reference_en.pdf
package protocol

View File

@ -0,0 +1,26 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=endianess
type endianess int8
const (
bigEndian endianess = 0
littleEndian endianess = 1
)

View File

@ -0,0 +1,16 @@
// generated by stringer -type=endianess; DO NOT EDIT
package protocol
import "fmt"
const _endianess_name = "bigEndianlittleEndian"
var _endianess_index = [...]uint8{0, 9, 21}
func (i endianess) String() string {
if i < 0 || i >= endianess(len(_endianess_index)-1) {
return fmt.Sprintf("endianess(%d)", i)
}
return _endianess_name[_endianess_index[i]:_endianess_index[i+1]]
}

146
vendor/github.com/SAP/go-hdb/internal/protocol/error.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
const (
sqlStateSize = 5
)
type sqlState [sqlStateSize]byte
type hdbError struct {
errorCode int32
errorPosition int32
errorTextLength int32
errorLevel ErrorLevel
sqlState sqlState
errorText []byte
}
func newHdbError() *hdbError {
return &hdbError{}
}
// String implements the Stringer interface.
func (e *hdbError) String() string {
return fmt.Sprintf("errorCode %d, errorPosition %d, errorTextLength % d errorLevel %s, sqlState %s errorText %s",
e.errorCode,
e.errorPosition,
e.errorTextLength,
e.errorLevel,
e.sqlState,
e.errorText,
)
}
// Error implements the Error interface.
func (e *hdbError) Error() string {
return fmt.Sprintf("SQL %s %d - %s", e.errorLevel, e.errorCode, e.errorText)
}
// Code implements the hdb.Error interface.
func (e *hdbError) Code() int {
return int(e.errorCode)
}
// Position implements the hdb.Error interface.
func (e *hdbError) Position() int {
return int(e.errorPosition)
}
// Level implements the hdb.Error interface.
func (e *hdbError) Level() ErrorLevel {
return e.errorLevel
}
// Text implements the hdb.Error interface.
func (e *hdbError) Text() string {
return string(e.errorText)
}
// IsWarning implements the hdb.Error interface.
func (e *hdbError) IsWarning() bool {
return e.errorLevel == HdbWarning
}
// IsError implements the hdb.Error interface.
func (e *hdbError) IsError() bool {
return e.errorLevel == HdbError
}
// IsFatal implements the hdb.Error interface.
func (e *hdbError) IsFatal() bool {
return e.errorLevel == HdbFatalError
}
func (e *hdbError) kind() partKind {
return pkError
}
func (e *hdbError) setNumArg(int) {
// not needed
}
func (e *hdbError) read(rd *bufio.Reader) error {
var err error
if e.errorCode, err = rd.ReadInt32(); err != nil {
return err
}
if e.errorPosition, err = rd.ReadInt32(); err != nil {
return err
}
if e.errorTextLength, err = rd.ReadInt32(); err != nil {
return err
}
el, err := rd.ReadInt8()
if err != nil {
return err
}
e.errorLevel = ErrorLevel(el)
if err := rd.ReadFull(e.sqlState[:]); err != nil {
return err
}
// read error text as ASCII data as some errors return invalid CESU-8 characters
// e.g: SQL HdbError 7 - feature not supported: invalid character encoding: <invaid CESU-8 characters>
// if e.errorText, err = rd.ReadCesu8(int(e.errorTextLength)); err != nil {
// return err
// }
e.errorText = make([]byte, int(e.errorTextLength))
if _, err = rd.Read(e.errorText); err != nil {
return err
}
// part bufferlength is by one greater than real error length? --> read filler byte
if _, err := rd.ReadByte(); err != nil {
return err
}
if trace {
outLogger.Printf("error: %s", e)
}
return nil
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=ErrorLevel
// ErrorLevel send from database server.
type ErrorLevel int8
// HDB error level constants.
const (
HdbWarning ErrorLevel = 0
HdbError ErrorLevel = 1
HdbFatalError ErrorLevel = 2
)

View File

@ -0,0 +1,16 @@
// generated by stringer -type=ErrorLevel; DO NOT EDIT
package protocol
import "fmt"
const _ErrorLevel_name = "HdbWarningHdbErrorHdbFatalError"
var _ErrorLevel_index = [...]uint8{0, 10, 18, 31}
func (i ErrorLevel) String() string {
if i < 0 || i >= ErrorLevel(len(_ErrorLevel_index)-1) {
return fmt.Sprintf("ErrorLevel(%d)", i)
}
return _ErrorLevel_name[_ErrorLevel_index[i]:_ErrorLevel_index[i+1]]
}

View File

@ -0,0 +1,48 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"github.com/SAP/go-hdb/internal/bufio"
)
//fetch size
type fetchsize int32
func (s fetchsize) kind() partKind {
return pkFetchSize
}
func (s fetchsize) size() (int, error) {
return 4, nil
}
func (s fetchsize) numArg() int {
return 1
}
func (s fetchsize) write(wr *bufio.Writer) error {
if err := wr.WriteInt32(int32(s)); err != nil {
return err
}
if trace {
outLogger.Printf("fetchsize: %d", s)
}
return nil
}

909
vendor/github.com/SAP/go-hdb/internal/protocol/field.go generated vendored Normal file
View File

@ -0,0 +1,909 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"database/sql/driver"
"fmt"
"math"
"sort"
"time"
"github.com/SAP/go-hdb/internal/bufio"
"github.com/SAP/go-hdb/internal/unicode/cesu8"
)
const (
realNullValue uint32 = ^uint32(0)
doubleNullValue uint64 = ^uint64(0)
)
type uint32Slice []uint32
func (p uint32Slice) Len() int { return len(p) }
func (p uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p uint32Slice) sort() { sort.Sort(p) }
type field interface {
typeCode() typeCode
in() bool
out() bool
name(map[uint32]string) string
nameOffsets() []uint32
String() string
}
// FieldSet contains database field metadata.
type FieldSet struct {
fields []field
names map[uint32]string
}
func newFieldSet(size int) *FieldSet {
return &FieldSet{
fields: make([]field, size),
names: make(map[uint32]string),
}
}
// String implements the Stringer interface.
func (f *FieldSet) String() string {
a := make([]string, len(f.fields))
for i, f := range f.fields {
a[i] = f.String()
}
return fmt.Sprintf("%v", a)
}
func (f *FieldSet) nameOffsets() []uint32 {
for _, field := range f.fields {
for _, offset := range field.nameOffsets() {
if offset != 0xFFFFFFFF {
f.names[offset] = ""
}
}
}
// sort offsets (not sure if offsets are monotonically increasing in any case)
offsets := make([]uint32, len(f.names))
i := 0
for offset := range f.names {
offsets[i] = offset
i++
}
uint32Slice(offsets).sort()
return offsets
}
// NumInputField returns the number of input fields in a database statement.
func (f *FieldSet) NumInputField() int {
cnt := 0
for _, field := range f.fields {
if field.in() {
cnt++
}
}
return cnt
}
// NumOutputField returns the number of output fields of a query or stored procedure.
func (f *FieldSet) NumOutputField() int {
cnt := 0
for _, field := range f.fields {
if field.out() {
cnt++
}
}
return cnt
}
// DataType returns the datatype of the field at index idx.
func (f *FieldSet) DataType(idx int) DataType {
return f.fields[idx].typeCode().dataType()
}
// OutputNames fills the names parameter with field names of all output fields. The size of the names slice must be at least
// NumOutputField big.
func (f *FieldSet) OutputNames(names []string) error {
i := 0
for _, field := range f.fields {
if field.out() {
if i >= len(names) { // assert names size
return fmt.Errorf("names size too short %d - expected min %d", len(names), i)
}
names[i] = field.name(f.names)
i++
}
}
return nil
}
// FieldValues contains rows read from database.
type FieldValues struct {
s *Session
rows int
cols int
lobCols int
values []driver.Value
descrs []*LobReadDescr // Caution: store descriptor to guarantee valid addresses
writers []lobWriter
}
func newFieldValues(s *Session) *FieldValues {
return &FieldValues{s: s}
}
func (f *FieldValues) String() string {
return fmt.Sprintf("rows %d columns %d lob columns %d", f.rows, f.cols, f.lobCols)
}
func (f *FieldValues) read(rows int, fieldSet *FieldSet, rd *bufio.Reader) error {
f.rows = rows
f.descrs = make([]*LobReadDescr, 0)
f.cols, f.lobCols = 0, 0
for _, field := range fieldSet.fields {
if field.out() {
if field.typeCode().isLob() {
f.descrs = append(f.descrs, &LobReadDescr{col: f.cols})
f.lobCols++
}
f.cols++
}
}
f.values = make([]driver.Value, f.rows*f.cols)
f.writers = make([]lobWriter, f.lobCols)
for i := 0; i < f.rows; i++ {
j := 0
for _, field := range fieldSet.fields {
if !field.out() {
continue
}
var err error
if f.values[i*f.cols+j], err = readField(rd, field.typeCode()); err != nil {
return err
}
j++
}
}
return nil
}
// NumRow returns the number of rows available in FieldValues.
func (f *FieldValues) NumRow() int {
return f.rows
}
// Row fills the dest value slice with row data at index idx.
func (f *FieldValues) Row(idx int, dest []driver.Value) {
copy(dest, f.values[idx*f.cols:(idx+1)*f.cols])
if f.lobCols == 0 {
return
}
for i, descr := range f.descrs {
col := descr.col
writer := dest[col].(lobWriter)
f.writers[i] = writer
descr.w = writer
dest[col] = lobReadDescrToPointer(descr)
}
// last descriptor triggers lob read
f.descrs[f.lobCols-1].fn = func() error {
return f.s.readLobStream(f.writers)
}
}
const (
tinyintFieldSize = 1
smallintFieldSize = 2
intFieldSize = 4
bigintFieldSize = 8
realFieldSize = 4
doubleFieldSize = 8
dateFieldSize = 4
timeFieldSize = 4
timestampFieldSize = dateFieldSize + timeFieldSize
decimalFieldSize = 16
lobInputDescriptorSize = 9
)
func fieldSize(tc typeCode, v driver.Value) (int, error) {
if v == nil {
return 0, nil
}
switch tc {
case tcTinyint:
return tinyintFieldSize, nil
case tcSmallint:
return smallintFieldSize, nil
case tcInt:
return intFieldSize, nil
case tcBigint:
return bigintFieldSize, nil
case tcReal:
return realFieldSize, nil
case tcDouble:
return doubleFieldSize, nil
case tcDate:
return dateFieldSize, nil
case tcTime:
return timeFieldSize, nil
case tcTimestamp:
return timestampFieldSize, nil
case tcDecimal:
return decimalFieldSize, nil
case tcChar, tcVarchar, tcString:
switch v := v.(type) {
case []byte:
return bytesSize(len(v))
case string:
return bytesSize(len(v))
default:
outLogger.Fatalf("data type %s mismatch %T", tc, v)
}
case tcNchar, tcNvarchar, tcNstring:
switch v := v.(type) {
case []byte:
return bytesSize(cesu8.Size(v))
case string:
return bytesSize(cesu8.StringSize(v))
default:
outLogger.Fatalf("data type %s mismatch %T", tc, v)
}
case tcBinary, tcVarbinary:
v, ok := v.([]byte)
if !ok {
outLogger.Fatalf("data type %s mismatch %T", tc, v)
}
return bytesSize(len(v))
case tcBlob, tcClob, tcNclob:
return lobInputDescriptorSize, nil
}
outLogger.Fatalf("data type %s not implemented", tc)
return 0, nil
}
func readField(rd *bufio.Reader, tc typeCode) (interface{}, error) {
switch tc {
case tcTinyint, tcSmallint, tcInt, tcBigint:
valid, err := rd.ReadBool()
if err != nil {
return nil, err
}
if !valid { //null value
return nil, nil
}
switch tc {
case tcTinyint:
if v, err := rd.ReadByte(); err == nil {
return int64(v), nil
}
return nil, err
case tcSmallint:
if v, err := rd.ReadInt16(); err == nil {
return int64(v), nil
}
return nil, err
case tcInt:
if v, err := rd.ReadInt32(); err == nil {
return int64(v), nil
}
return nil, err
case tcBigint:
if v, err := rd.ReadInt64(); err == nil {
return v, nil
}
return nil, err
}
case tcReal:
v, err := rd.ReadUint32()
if err != nil {
return nil, err
}
if v == realNullValue {
return nil, nil
}
return float64(math.Float32frombits(v)), nil
case tcDouble:
v, err := rd.ReadUint64()
if err != nil {
return nil, err
}
if v == doubleNullValue {
return nil, nil
}
return math.Float64frombits(v), nil
case tcDate:
year, month, day, null, err := readDate(rd)
if err != nil {
return nil, err
}
if null {
return nil, nil
}
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC), nil
// time read gives only seconds (cut), no milliseconds
case tcTime:
hour, minute, nanosecs, null, err := readTime(rd)
if err != nil {
return nil, err
}
if null {
return nil, nil
}
return time.Date(1, 1, 1, hour, minute, 0, nanosecs, time.UTC), nil
case tcTimestamp:
year, month, day, dateNull, err := readDate(rd)
if err != nil {
return nil, err
}
hour, minute, nanosecs, timeNull, err := readTime(rd)
if err != nil {
return nil, err
}
if dateNull || timeNull {
return nil, nil
}
return time.Date(year, month, day, hour, minute, 0, nanosecs, time.UTC), nil
case tcDecimal:
b, null, err := readDecimal(rd)
switch {
case err != nil:
return nil, err
case null:
return nil, nil
default:
return b, nil
}
case tcChar, tcVarchar:
value, null, err := readBytes(rd)
if err != nil {
return nil, err
}
if null {
return nil, nil
}
return value, nil
case tcNchar, tcNvarchar:
value, null, err := readUtf8(rd)
if err != nil {
return nil, err
}
if null {
return nil, nil
}
return value, nil
case tcBinary, tcVarbinary:
value, null, err := readBytes(rd)
if err != nil {
return nil, err
}
if null {
return nil, nil
}
return value, nil
case tcBlob, tcClob, tcNclob:
null, writer, err := readLob(rd, tc)
if err != nil {
return nil, err
}
if null {
return nil, nil
}
return writer, nil
}
outLogger.Fatalf("read field: type code %s not implemented", tc)
return nil, nil
}
func writeField(wr *bufio.Writer, tc typeCode, v driver.Value) error {
// null value
if v == nil {
if err := wr.WriteByte(byte(tc) | 0x80); err != nil { //set high bit
return err
}
return nil
}
// type code
if err := wr.WriteByte(byte(tc)); err != nil {
return err
}
switch tc {
// TODO: char, ...
case tcTinyint, tcSmallint, tcInt, tcBigint:
i64, ok := v.(int64)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
switch tc {
case tcTinyint:
return wr.WriteByte(byte(i64))
case tcSmallint:
return wr.WriteInt16(int16(i64))
case tcInt:
return wr.WriteInt32(int32(i64))
case tcBigint:
return wr.WriteInt64(i64)
}
case tcReal:
f64, ok := v.(float64)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
return wr.WriteFloat32(float32(f64))
case tcDouble:
f64, ok := v.(float64)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
return wr.WriteFloat64(f64)
case tcDate:
t, ok := v.(time.Time)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
return writeDate(wr, t)
case tcTime:
t, ok := v.(time.Time)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
return writeTime(wr, t)
case tcTimestamp:
t, ok := v.(time.Time)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
if err := writeDate(wr, t); err != nil {
return err
}
return writeTime(wr, t)
case tcDecimal:
b, ok := v.([]byte)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
if len(b) != 16 {
return fmt.Errorf("invalid argument length %d of type %T - expected %d", len(b), v, 16)
}
_, err := wr.Write(b)
return err
case tcChar, tcVarchar, tcString:
switch v := v.(type) {
case []byte:
return writeBytes(wr, v)
case string:
return writeString(wr, v)
default:
return fmt.Errorf("invalid argument type %T", v)
}
case tcNchar, tcNvarchar, tcNstring:
switch v := v.(type) {
case []byte:
return writeUtf8Bytes(wr, v)
case string:
return writeUtf8String(wr, v)
default:
return fmt.Errorf("invalid argument type %T", v)
}
case tcBinary, tcVarbinary:
v, ok := v.([]byte)
if !ok {
return fmt.Errorf("invalid argument type %T", v)
}
return writeBytes(wr, v)
case tcBlob, tcClob, tcNclob:
return writeLob(wr)
}
outLogger.Fatalf("write field: type code %s not implemented", tc)
return nil
}
// null values: most sig bit unset
// year: unset second most sig bit (subtract 2^15)
// --> read year as unsigned
// month is 0-based
// day is 1 byte
func readDate(rd *bufio.Reader) (int, time.Month, int, bool, error) {
year, err := rd.ReadUint16()
if err != nil {
return 0, 0, 0, false, err
}
if (year & 0x8000) == 0 { //null value
if err := rd.Skip(2); err != nil {
return 0, 0, 0, false, err
}
return 0, 0, 0, true, nil
}
year &= 0x3fff
month, err := rd.ReadInt8()
if err != nil {
return 0, 0, 0, false, err
}
month++
day, err := rd.ReadInt8()
if err != nil {
return 0, 0, 0, false, err
}
return int(year), time.Month(month), int(day), false, nil
}
// year: set most sig bit
// month 0 based
func writeDate(wr *bufio.Writer, t time.Time) error {
//store in utc
utc := t.In(time.UTC)
year, month, day := utc.Date()
if err := wr.WriteUint16(uint16(year) | 0x8000); err != nil {
return err
}
if err := wr.WriteInt8(int8(month) - 1); err != nil {
return err
}
if err := wr.WriteInt8(int8(day)); err != nil {
return err
}
return nil
}
func readTime(rd *bufio.Reader) (int, int, int, bool, error) {
hour, err := rd.ReadByte()
if err != nil {
return 0, 0, 0, false, err
}
if (hour & 0x80) == 0 { //null value
if err := rd.Skip(3); err != nil {
return 0, 0, 0, false, err
}
return 0, 0, 0, true, nil
}
hour &= 0x7f
minute, err := rd.ReadInt8()
if err != nil {
return 0, 0, 0, false, err
}
millisecs, err := rd.ReadUint16()
if err != nil {
return 0, 0, 0, false, err
}
nanosecs := int(millisecs) * 1000000
return int(hour), int(minute), nanosecs, false, nil
}
func writeTime(wr *bufio.Writer, t time.Time) error {
//store in utc
utc := t.UTC()
if err := wr.WriteByte(byte(utc.Hour()) | 0x80); err != nil {
return err
}
if err := wr.WriteInt8(int8(utc.Minute())); err != nil {
return err
}
millisecs := utc.Second()*1000 + utc.Round(time.Millisecond).Nanosecond()/1000000
if err := wr.WriteUint16(uint16(millisecs)); err != nil {
return err
}
return nil
}
func readDecimal(rd *bufio.Reader) ([]byte, bool, error) {
b := make([]byte, 16)
if err := rd.ReadFull(b); err != nil {
return nil, false, err
}
if (b[15] & 0x70) == 0x70 { //null value (bit 4,5,6 set)
return nil, true, nil
}
return b, false, nil
}
// string / binary length indicators
const (
bytesLenIndNullValue byte = 255
bytesLenIndSmall byte = 245
bytesLenIndMedium byte = 246
bytesLenIndBig byte = 247
)
func bytesSize(size int) (int, error) { //size + length indicator
switch {
default:
return 0, fmt.Errorf("max string length %d exceeded %d", math.MaxInt32, size)
case size <= int(bytesLenIndSmall):
return size + 1, nil
case size <= math.MaxInt16:
return size + 3, nil
case size <= math.MaxInt32:
return size + 5, nil
}
}
func readBytesSize(rd *bufio.Reader) (int, bool, error) {
ind, err := rd.ReadByte() //length indicator
if err != nil {
return 0, false, err
}
switch {
default:
return 0, false, fmt.Errorf("invalid length indicator %d", ind)
case ind == bytesLenIndNullValue:
return 0, true, nil
case ind <= bytesLenIndSmall:
return int(ind), false, nil
case ind == bytesLenIndMedium:
if size, err := rd.ReadInt16(); err == nil {
return int(size), false, nil
}
return 0, false, err
case ind == bytesLenIndBig:
if size, err := rd.ReadInt32(); err == nil {
return int(size), false, nil
}
return 0, false, err
}
}
func writeBytesSize(wr *bufio.Writer, size int) error {
switch {
default:
return fmt.Errorf("max argument length %d of string exceeded", size)
case size <= int(bytesLenIndSmall):
if err := wr.WriteByte(byte(size)); err != nil {
return err
}
case size <= math.MaxInt16:
if err := wr.WriteByte(bytesLenIndMedium); err != nil {
return err
}
if err := wr.WriteInt16(int16(size)); err != nil {
return err
}
case size <= math.MaxInt32:
if err := wr.WriteByte(bytesLenIndBig); err != nil {
return err
}
if err := wr.WriteInt32(int32(size)); err != nil {
return err
}
}
return nil
}
func readBytes(rd *bufio.Reader) ([]byte, bool, error) {
size, null, err := readBytesSize(rd)
if err != nil {
return nil, false, err
}
if null {
return nil, true, nil
}
b := make([]byte, size)
if err := rd.ReadFull(b); err != nil {
return nil, false, err
}
return b, false, nil
}
func readUtf8(rd *bufio.Reader) ([]byte, bool, error) {
size, null, err := readBytesSize(rd)
if err != nil {
return nil, false, err
}
if null {
return nil, true, nil
}
b, err := rd.ReadCesu8(size)
if err != nil {
return nil, false, err
}
return b, false, nil
}
// strings with one byte length
func readShortUtf8(rd *bufio.Reader) ([]byte, int, error) {
size, err := rd.ReadByte()
if err != nil {
return nil, 0, err
}
b, err := rd.ReadCesu8(int(size))
if err != nil {
return nil, 0, err
}
return b, int(size), nil
}
func writeBytes(wr *bufio.Writer, b []byte) error {
if err := writeBytesSize(wr, len(b)); err != nil {
return err
}
_, err := wr.Write(b)
return err
}
func writeString(wr *bufio.Writer, s string) error {
if err := writeBytesSize(wr, len(s)); err != nil {
return err
}
_, err := wr.WriteString(s)
return err
}
func writeUtf8Bytes(wr *bufio.Writer, b []byte) error {
size := cesu8.Size(b)
if err := writeBytesSize(wr, size); err != nil {
return err
}
_, err := wr.WriteCesu8(b)
return err
}
func writeUtf8String(wr *bufio.Writer, s string) error {
size := cesu8.StringSize(s)
if err := writeBytesSize(wr, size); err != nil {
return err
}
_, err := wr.WriteStringCesu8(s)
return err
}
func readLob(rd *bufio.Reader, tc typeCode) (bool, lobWriter, error) {
if _, err := rd.ReadInt8(); err != nil { // type code (is int here)
return false, nil, err
}
opt, err := rd.ReadInt8()
if err != nil {
return false, nil, err
}
if err := rd.Skip(2); err != nil {
return false, nil, err
}
charLen, err := rd.ReadInt64()
if err != nil {
return false, nil, err
}
byteLen, err := rd.ReadInt64()
if err != nil {
return false, nil, err
}
id, err := rd.ReadUint64()
if err != nil {
return false, nil, err
}
chunkLen, err := rd.ReadInt32()
if err != nil {
return false, nil, err
}
null := (lobOptions(opt) & loNullindicator) != 0
eof := (lobOptions(opt) & loLastdata) != 0
var writer lobWriter
if tc.isCharBased() {
writer = newCharLobWriter(locatorID(id), charLen, byteLen)
} else {
writer = newBinaryLobWriter(locatorID(id), charLen, byteLen)
}
if err := writer.write(rd, int(chunkLen), eof); err != nil {
return null, writer, err
}
return null, writer, nil
}
// TODO: first write: add content? - actually no data transferred
func writeLob(wr *bufio.Writer) error {
if err := wr.WriteByte(0); err != nil {
return err
}
if err := wr.WriteInt32(0); err != nil {
return err
}
if err := wr.WriteInt32(0); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,60 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=functionCode
type functionCode int16
const (
fcNil functionCode = 0
fcDDL functionCode = 1
fcInsert functionCode = 2
fcUpdate functionCode = 3
fcDelete functionCode = 4
fcSelect functionCode = 5
fcSelectForUpdate functionCode = 6
fcExplain functionCode = 7
fcDBProcedureCall functionCode = 8
fcDBProcedureCallWithResult functionCode = 9
fcFetch functionCode = 10
fcCommit functionCode = 11
fcRollback functionCode = 12
fcSavepoint functionCode = 13
fcConnect functionCode = 14
fcWriteLob functionCode = 15
fcReadLob functionCode = 16
fcPing functionCode = 17 //reserved: do not use
fcDisconnect functionCode = 18
fcCloseCursor functionCode = 19
fcFindLob functionCode = 20
fcAbapStream functionCode = 21
fcXAStart functionCode = 22
fcXAJoin functionCode = 23
)
func (k functionCode) queryType() QueryType {
switch k {
default:
return QtNone
case fcSelect, fcSelectForUpdate:
return QtSelect
case fcDBProcedureCall:
return QtProcedureCall
}
}

View File

@ -0,0 +1,16 @@
// generated by stringer -type=functionCode; DO NOT EDIT
package protocol
import "fmt"
const _functionCode_name = "fcNilfcDDLfcInsertfcUpdatefcDeletefcSelectfcSelectForUpdatefcExplainfcDBProcedureCallfcDBProcedureCallWithResultfcFetchfcCommitfcRollbackfcSavepointfcConnectfcWriteLobfcReadLobfcPingfcDisconnectfcCloseCursorfcFindLobfcAbapStreamfcXAStartfcXAJoin"
var _functionCode_index = [...]uint8{0, 5, 10, 18, 26, 34, 42, 59, 68, 85, 112, 119, 127, 137, 148, 157, 167, 176, 182, 194, 207, 216, 228, 237, 245}
func (i functionCode) String() string {
if i < 0 || i >= functionCode(len(_functionCode_index)-1) {
return fmt.Sprintf("functionCode(%d)", i)
}
return _functionCode_name[_functionCode_index[i]:_functionCode_index[i+1]]
}

268
vendor/github.com/SAP/go-hdb/internal/protocol/init.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
const (
okEndianess int8 = 1
)
const (
initRequestFillerSize = 4
)
var initRequestFiller uint32 = 0xffffffff
type productVersion struct {
major int8
minor int16
}
func (v *productVersion) String() string {
return fmt.Sprintf("%d.%d", v.major, v.minor)
}
type protocolVersion struct {
major int8
minor int16
}
func (v *protocolVersion) String() string {
return fmt.Sprintf("%d.%d", v.major, v.minor)
}
type version struct {
major int8
minor int16
}
func (v *version) String() string {
return fmt.Sprintf("%d.%d", v.major, v.minor)
}
type initRequest struct {
product *version
protocol *version
numOptions int8
endianess endianess
}
func newInitRequest() *initRequest {
return &initRequest{
product: new(version),
protocol: new(version),
}
}
func (r *initRequest) String() string {
switch r.numOptions {
default:
return fmt.Sprintf("init request: product version %s protocol version %s", r.product, r.protocol)
case 1:
return fmt.Sprintf("init request: product version %s protocol version %s endianess %s", r.product, r.protocol, r.endianess)
}
}
func (r *initRequest) read(rd *bufio.Reader) error {
var err error
if err := rd.Skip(initRequestFillerSize); err != nil { //filler
return err
}
if r.product.major, err = rd.ReadInt8(); err != nil {
return err
}
if r.product.minor, err = rd.ReadInt16(); err != nil {
return err
}
if r.protocol.major, err = rd.ReadInt8(); err != nil {
return err
}
if r.protocol.minor, err = rd.ReadInt16(); err != nil {
return err
}
if err := rd.Skip(1); err != nil { //reserved filler
return err
}
if r.numOptions, err = rd.ReadInt8(); err != nil {
return err
}
switch r.numOptions {
default:
outLogger.Fatalf("invalid number of options %d", r.numOptions)
case 0:
if err := rd.Skip(2); err != nil {
return err
}
case 1:
if cnt, err := rd.ReadInt8(); err == nil {
if cnt != 1 {
return fmt.Errorf("endianess %d - 1 expected", cnt)
}
} else {
return err
}
_endianess, err := rd.ReadInt8()
if err != nil {
return err
}
r.endianess = endianess(_endianess)
}
if trace {
outLogger.Printf("read %s", r)
}
return nil
}
func (r *initRequest) write(wr *bufio.Writer) error {
if err := wr.WriteUint32(initRequestFiller); err != nil {
return err
}
if err := wr.WriteInt8(r.product.major); err != nil {
return err
}
if err := wr.WriteInt16(r.product.minor); err != nil {
return err
}
if err := wr.WriteInt8(r.protocol.major); err != nil {
return err
}
if err := wr.WriteInt16(r.protocol.minor); err != nil {
return err
}
switch r.numOptions {
default:
outLogger.Fatalf("invalid number of options %d", r.numOptions)
case 0:
if err := wr.WriteZeroes(4); err != nil {
return err
}
case 1:
// reserved
if err := wr.WriteZeroes(1); err != nil {
return err
}
if err := wr.WriteInt8(r.numOptions); err != nil {
return err
}
if err := wr.WriteInt8(int8(okEndianess)); err != nil {
return err
}
if err := wr.WriteInt8(int8(r.endianess)); err != nil {
return err
}
}
// flush
if err := wr.Flush(); err != nil {
return err
}
if trace {
outLogger.Printf("write %s", r)
}
return nil
}
type initReply struct {
product *version
protocol *version
}
func newInitReply() *initReply {
return &initReply{
product: new(version),
protocol: new(version),
}
}
func (r *initReply) String() string {
return fmt.Sprintf("init reply: product version %s protocol version %s", r.product, r.protocol)
}
func (r *initReply) read(rd *bufio.Reader) error {
var err error
if r.product.major, err = rd.ReadInt8(); err != nil {
return err
}
if r.product.minor, err = rd.ReadInt16(); err != nil {
return err
}
if r.protocol.major, err = rd.ReadInt8(); err != nil {
return err
}
if r.protocol.minor, err = rd.ReadInt16(); err != nil {
return err
}
if err := rd.Skip(2); err != nil { //commitInitReplySize
return err
}
if trace {
outLogger.Printf("read %s", r)
}
return nil
}
func (r *initReply) write(wr *bufio.Writer) error {
if err := wr.WriteInt8(r.product.major); err != nil {
return err
}
if err := wr.WriteInt16(r.product.minor); err != nil {
return err
}
if err := wr.WriteInt8(r.product.major); err != nil {
return err
}
if err := wr.WriteInt16(r.protocol.minor); err != nil {
return err
}
if err := wr.WriteZeroes(2); err != nil { // commitInitReplySize
return err
}
// flush
if err := wr.Flush(); err != nil {
return err
}
if trace {
outLogger.Printf("write %s", r)
}
return nil
}

View File

@ -0,0 +1,23 @@
// +build amd64 386 arm arm64
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//amd64, 386 architectures: little endian
//arm, arm64: go supports little endian only
var archEndian = littleEndian

589
vendor/github.com/SAP/go-hdb/internal/protocol/lob.go generated vendored Normal file
View File

@ -0,0 +1,589 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"io"
"math"
"unicode/utf8"
"golang.org/x/text/transform"
"github.com/SAP/go-hdb/internal/bufio"
"github.com/SAP/go-hdb/internal/unicode"
"github.com/SAP/go-hdb/internal/unicode/cesu8"
)
const (
locatorIDSize = 8
writeLobRequestHeaderSize = 21
readLobRequestSize = 24
)
// variable (unit testing)
//var lobChunkSize = 1 << 14 //TODO: check size
var lobChunkSize int32 = 256 //TODO: check size
//lob options
type lobOptions int8
const (
loNullindicator lobOptions = 0x01
loDataincluded lobOptions = 0x02
loLastdata lobOptions = 0x04
)
var lobOptionsText = map[lobOptions]string{
loNullindicator: "null indicator",
loDataincluded: "data included",
loLastdata: "last data",
}
func (k lobOptions) String() string {
t := make([]string, 0, len(lobOptionsText))
for option, text := range lobOptionsText {
if (k & option) != 0 {
t = append(t, text)
}
}
return fmt.Sprintf("%v", t)
}
// LobReadDescr is the package internal representation of a lob field to be read from database.
type LobReadDescr struct {
col int
fn func() error
w lobWriter
}
// SetWriter sets the io.Writer destination for a lob field to be read from database.
func (d *LobReadDescr) SetWriter(w io.Writer) error {
if err := d.w.setWriter(w); err != nil {
return err
}
if d.fn != nil {
return d.fn()
}
return nil
}
// LobWriteDescr is the package internal representation of a lob field to be written to database.
type LobWriteDescr struct {
r io.Reader
}
// SetReader sets the io.Reader source for a lob field to be written to database.
func (d *LobWriteDescr) SetReader(r io.Reader) {
d.r = r
}
type locatorID uint64 // byte[locatorIdSize]
// write lob reply
type writeLobReply struct {
ids []locatorID
numArg int
}
func newWriteLobReply() *writeLobReply {
return &writeLobReply{
ids: make([]locatorID, 0),
}
}
func (r *writeLobReply) String() string {
return fmt.Sprintf("write lob reply: %v", r.ids)
}
func (r *writeLobReply) kind() partKind {
return pkWriteLobReply
}
func (r *writeLobReply) setNumArg(numArg int) {
r.numArg = numArg
}
func (r *writeLobReply) read(rd *bufio.Reader) error {
//resize ids
if cap(r.ids) < r.numArg {
r.ids = make([]locatorID, r.numArg)
} else {
r.ids = r.ids[:r.numArg]
}
for i := 0; i < r.numArg; i++ {
if id, err := rd.ReadUint64(); err == nil {
r.ids[i] = locatorID(id)
} else {
return err
}
}
return nil
}
//write lob request
type writeLobRequest struct {
readers []lobReader
}
func newWriteLobRequest(readers []lobReader) *writeLobRequest {
return &writeLobRequest{
readers: readers,
}
}
func (r *writeLobRequest) kind() partKind {
return pkWriteLobRequest
}
func (r *writeLobRequest) size() (int, error) {
// TODO: check size limit
size := 0
for _, reader := range r.readers {
if reader.done() {
continue
}
if err := reader.fill(); err != nil {
return 0, err
}
size += writeLobRequestHeaderSize
size += reader.size()
}
return size, nil
}
func (r *writeLobRequest) numArg() int {
n := 0
for _, reader := range r.readers {
if !reader.done() {
n++
}
}
return n
}
func (r *writeLobRequest) write(wr *bufio.Writer) error {
for _, reader := range r.readers {
if !reader.done() {
if err := wr.WriteUint64(uint64(reader.id())); err != nil {
return err
}
opt := int8(0x02) // data included
if reader.eof() {
opt |= 0x04 // last data
}
if err := wr.WriteInt8(opt); err != nil {
return err
}
if err := wr.WriteInt64(-1); err != nil { //offset (-1 := append)
return err
}
if err := wr.WriteInt32(int32(reader.size())); err != nil { // size
return err
}
if _, err := wr.Write(reader.bytes()); err != nil {
return err
}
}
}
return nil
}
//read lob request
type readLobRequest struct {
writers []lobWriter
}
func (r *readLobRequest) numWriter() int {
n := 0
for _, writer := range r.writers {
if !writer.eof() {
n++
}
}
return n
}
func (r *readLobRequest) kind() partKind {
return pkReadLobRequest
}
func (r *readLobRequest) size() (int, error) {
return r.numWriter() * readLobRequestSize, nil
}
func (r *readLobRequest) numArg() int {
return r.numWriter()
}
func (r *readLobRequest) write(wr *bufio.Writer) error {
for _, writer := range r.writers {
if writer.eof() {
continue
}
if err := wr.WriteUint64(uint64(writer.id())); err != nil {
return err
}
readOfs, readLen := writer.readOfsLen()
if err := wr.WriteInt64(readOfs + 1); err != nil { //1-based
return err
}
if err := wr.WriteInt32(readLen); err != nil {
return err
}
if err := wr.WriteZeroes(4); err != nil {
return err
}
}
return nil
}
// read lob reply
// - seems like readLobreply gives only an result for one lob - even if more then one is requested
type readLobReply struct {
writers []lobWriter
numArg int
}
func (r *readLobReply) kind() partKind {
return pkReadLobReply
}
func (r *readLobReply) setNumArg(numArg int) {
r.numArg = numArg
}
func (r *readLobReply) read(rd *bufio.Reader) error {
for i := 0; i < r.numArg; i++ {
id, err := rd.ReadUint64()
if err != nil {
return err
}
var writer lobWriter
for _, writer = range r.writers {
if writer.id() == locatorID(id) {
break // writer found
}
}
if writer == nil {
return fmt.Errorf("internal error: no lob writer found for id %d", id)
}
opt, err := rd.ReadInt8()
if err != nil {
return err
}
chunkLen, err := rd.ReadInt32()
if err != nil {
return err
}
if err := rd.Skip(3); err != nil {
return err
}
eof := (lobOptions(opt) & loLastdata) != 0
if err := writer.write(rd, int(chunkLen), eof); err != nil {
return err
}
}
return nil
}
// lobWriter reads lob chunks and writes them into lob field.
type lobWriter interface {
id() locatorID
setWriter(w io.Writer) error
write(rd *bufio.Reader, size int, eof bool) error
readOfsLen() (int64, int32)
eof() bool
}
// baseLobWriter is a reuse struct for binary and char lob writers.
type baseLobWriter struct {
_id locatorID
charLen int64
byteLen int64
readOfs int64
_eof bool
ofs int
wr io.Writer
_flush func() error
b []byte
}
func (l *baseLobWriter) id() locatorID {
return l._id
}
func (l *baseLobWriter) eof() bool {
return l._eof
}
func (l *baseLobWriter) setWriter(wr io.Writer) error {
l.wr = wr
return l._flush()
}
func (l *baseLobWriter) write(rd *bufio.Reader, size int, eof bool) error {
l._eof = eof // store eof
if size == 0 {
return nil
}
l.b = resizeBuffer(l.b, size+l.ofs)
if err := rd.ReadFull(l.b[l.ofs:]); err != nil {
return err
}
if l.wr != nil {
return l._flush()
}
return nil
}
func (l *baseLobWriter) readOfsLen() (int64, int32) {
readLen := l.charLen - l.readOfs
if readLen > int64(math.MaxInt32) || readLen > int64(lobChunkSize) {
return l.readOfs, lobChunkSize
}
return l.readOfs, int32(readLen)
}
// binaryLobWriter (byte based lobs).
type binaryLobWriter struct {
*baseLobWriter
}
func newBinaryLobWriter(id locatorID, charLen, byteLen int64) *binaryLobWriter {
l := &binaryLobWriter{
baseLobWriter: &baseLobWriter{_id: id, charLen: charLen, byteLen: byteLen},
}
l._flush = l.flush
return l
}
func (l *binaryLobWriter) flush() error {
if _, err := l.wr.Write(l.b); err != nil {
return err
}
l.readOfs += int64(len(l.b))
return nil
}
type charLobWriter struct {
*baseLobWriter
}
func newCharLobWriter(id locatorID, charLen, byteLen int64) *charLobWriter {
l := &charLobWriter{
baseLobWriter: &baseLobWriter{_id: id, charLen: charLen, byteLen: byteLen},
}
l._flush = l.flush
return l
}
func (l *charLobWriter) flush() error {
nDst, nSrc, err := unicode.Cesu8ToUtf8Transformer.Transform(l.b, l.b, true) // inline cesu8 to utf8 transformation
if err != nil && err != transform.ErrShortSrc {
return err
}
if _, err := l.wr.Write(l.b[:nDst]); err != nil {
return err
}
l.ofs = len(l.b) - nSrc
if l.ofs != 0 && l.ofs != cesu8.CESUMax/2 { // assert remaining bytes
return unicode.ErrInvalidCesu8
}
l.readOfs += int64(l.runeCount(l.b[:nDst]))
if l.ofs != 0 {
l.readOfs++ // add half encoding
copy(l.b, l.b[nSrc:len(l.b)]) // move half encoding to buffer begin
}
return nil
}
// Caution: hdb counts 4 byte utf-8 encodings (cesu-8 6 bytes) as 2 (3 byte) chars
func (l *charLobWriter) runeCount(b []byte) int {
numChars := 0
for len(b) > 0 {
_, size := utf8.DecodeRune(b)
b = b[size:]
numChars++
if size == utf8.UTFMax {
numChars++
}
}
return numChars
}
// lobWriter reads field lob data chunks.
type lobReader interface {
id() locatorID
fill() error
size() int
bytes() []byte
eof() bool
done() bool
}
// baseLobWriter is a reuse struct for binary and char lob writers.
type baseLobReader struct {
r io.Reader
_id locatorID
_size int
_eof bool
_done bool
b []byte
}
func (l *baseLobReader) id() locatorID {
return l._id
}
func (l *baseLobReader) eof() bool {
return l._eof
}
func (l *baseLobReader) done() bool {
return l._done
}
func (l *baseLobReader) size() int {
return l._size
}
func (l *baseLobReader) bytes() []byte {
if l._eof {
l._done = true
}
return l.b[:l._size]
}
// binaryLobReader (byte based lobs).
type binaryLobReader struct {
*baseLobReader
}
func newBinaryLobReader(r io.Reader, id locatorID) *binaryLobReader {
return &binaryLobReader{
baseLobReader: &baseLobReader{r: r, _id: id},
}
}
func (l *binaryLobReader) fill() error {
if l._eof {
return fmt.Errorf("locator id %d eof error", l._id)
}
var err error
l.b = resizeBuffer(l.b, int(lobChunkSize))
l._size, err = l.r.Read(l.b)
if err != nil && err != io.EOF {
return err
}
l._eof = err == io.EOF
return nil
}
// charLobReader (character based lobs - cesu8).
type charLobReader struct {
*baseLobReader
c []byte
ofs int
}
func newCharLobReader(r io.Reader, id locatorID) *charLobReader {
return &charLobReader{
baseLobReader: &baseLobReader{r: r, _id: id},
}
}
func (l *charLobReader) fill() error {
if l._eof {
return fmt.Errorf("locator id %d eof error", l._id)
}
l.c = resizeBuffer(l.c, int(lobChunkSize)+l.ofs)
n, err := l.r.Read(l.c[l.ofs:])
size := n + l.ofs
if err != nil && err != io.EOF {
return err
}
l._eof = err == io.EOF
if l._eof && size == 0 {
l._size = 0
return nil
}
l.b = resizeBuffer(l.b, cesu8.Size(l.c[:size])) // last rune might be incomplete, so size is one greater than needed
nDst, nSrc, err := unicode.Utf8ToCesu8Transformer.Transform(l.b, l.c[:size], l._eof)
if err != nil && err != transform.ErrShortSrc {
return err
}
if l._eof && err == transform.ErrShortSrc {
return unicode.ErrInvalidUtf8
}
l._size = nDst
l.ofs = size - nSrc
if l.ofs > 0 {
copy(l.c, l.c[nSrc:size]) // copy rest to buffer beginn
}
return nil
}
// helper
func resizeBuffer(b1 []byte, size int) []byte {
if b1 == nil || cap(b1) < size {
b2 := make([]byte, size)
copy(b2, b1) // !!!
return b2
}
return b1[:size]
}

View File

@ -0,0 +1,103 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
const (
messageHeaderSize = 32
)
//message header
type messageHeader struct {
sessionID int64
packetCount int32
varPartLength uint32
varPartSize uint32
noOfSegm int16
}
func (h *messageHeader) String() string {
return fmt.Sprintf("session id %d packetCount %d varPartLength %d, varPartSize %d noOfSegm %d",
h.sessionID,
h.packetCount,
h.varPartLength,
h.varPartSize,
h.noOfSegm)
}
func (h *messageHeader) write(wr *bufio.Writer) error {
if err := wr.WriteInt64(h.sessionID); err != nil {
return err
}
if err := wr.WriteInt32(h.packetCount); err != nil {
return err
}
if err := wr.WriteUint32(h.varPartLength); err != nil {
return err
}
if err := wr.WriteUint32(h.varPartSize); err != nil {
return err
}
if err := wr.WriteInt16(h.noOfSegm); err != nil {
return err
}
if err := wr.WriteZeroes(10); err != nil { //messageHeaderSize
return err
}
if trace {
outLogger.Printf("write message header: %s", h)
}
return nil
}
func (h *messageHeader) read(rd *bufio.Reader) error {
var err error
if h.sessionID, err = rd.ReadInt64(); err != nil {
return err
}
if h.packetCount, err = rd.ReadInt32(); err != nil {
return err
}
if h.varPartLength, err = rd.ReadUint32(); err != nil {
return err
}
if h.varPartSize, err = rd.ReadUint32(); err != nil {
return err
}
if h.noOfSegm, err = rd.ReadInt16(); err != nil {
return err
}
if err := rd.Skip(10); err != nil { //messageHeaderSize
return err
}
if trace {
outLogger.Printf("read message header: %s", h)
}
return nil
}

View File

@ -0,0 +1,49 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=messageType
type messageType int8
const (
mtNil messageType = 0
mtExecuteDirect messageType = 2
mtPrepare messageType = 3
mtAbapStream messageType = 4
mtXAStart messageType = 5
mtXAJoin messageType = 6
mtExecute messageType = 13
mtWriteLob messageType = 16
mtReadLob messageType = 17
mtFindLob messageType = 18
mtAuthenticate messageType = 65
mtConnect messageType = 66
mtCommit messageType = 67
mtRollback messageType = 68
mtCloseResultset messageType = 69
mtDropStatementID messageType = 70
mtFetchNext messageType = 71
mtFetchAbsolute messageType = 72
mtFetchRelative messageType = 73
mtFetchFirst messageType = 74
mtFetchLast messageType = 75
mtDisconnect messageType = 77
mtExecuteITab messageType = 78
mtFetchNextITab messageType = 79
mtInsertNextITab messageType = 80
)

View File

@ -0,0 +1,46 @@
// generated by stringer -type=messageType; DO NOT EDIT
package protocol
import "fmt"
const (
_messageType_name_0 = "mtNil"
_messageType_name_1 = "mtExecuteDirectmtPreparemtAbapStreammtXAStartmtXAJoin"
_messageType_name_2 = "mtExecute"
_messageType_name_3 = "mtWriteLobmtReadLobmtFindLob"
_messageType_name_4 = "mtAuthenticatemtConnectmtCommitmtRollbackmtCloseResultsetmtDropStatementIDmtFetchNextmtFetchAbsolutemtFetchRelativemtFetchFirstmtFetchLast"
_messageType_name_5 = "mtDisconnectmtExecuteITabmtFetchNextITabmtInsertNextITab"
)
var (
_messageType_index_0 = [...]uint8{0, 5}
_messageType_index_1 = [...]uint8{0, 15, 24, 36, 45, 53}
_messageType_index_2 = [...]uint8{0, 9}
_messageType_index_3 = [...]uint8{0, 10, 19, 28}
_messageType_index_4 = [...]uint8{0, 14, 23, 31, 41, 57, 74, 85, 100, 115, 127, 138}
_messageType_index_5 = [...]uint8{0, 12, 25, 40, 56}
)
func (i messageType) String() string {
switch {
case i == 0:
return _messageType_name_0
case 2 <= i && i <= 6:
i -= 2
return _messageType_name_1[_messageType_index_1[i]:_messageType_index_1[i+1]]
case i == 13:
return _messageType_name_2
case 16 <= i && i <= 18:
i -= 16
return _messageType_name_3[_messageType_index_3[i]:_messageType_index_3[i+1]]
case 65 <= i && i <= 75:
i -= 65
return _messageType_name_4[_messageType_index_4[i]:_messageType_index_4[i+1]]
case 77 <= i && i <= 80:
i -= 77
return _messageType_name_5[_messageType_index_5[i]:_messageType_index_5[i+1]]
default:
return fmt.Sprintf("messageType(%d)", i)
}
}

View File

@ -0,0 +1,270 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
type booleanType bool
func (t booleanType) String() string {
return fmt.Sprintf("%t", t)
}
type intType int32
func (t intType) String() string {
return fmt.Sprintf("%d", t)
}
type bigintType int64
func (t bigintType) String() string {
return fmt.Sprintf("%d", t)
}
type doubleType float64
func (t doubleType) String() string {
return fmt.Sprintf("%g", t)
}
type stringType []byte
type binaryStringType []byte
func (t binaryStringType) String() string {
return fmt.Sprintf("%v", []byte(t))
}
//multi line options (number of lines in part header argumentCount)
type multiLineOptions []plainOptions
func (o multiLineOptions) size() int {
size := 0
for _, m := range o {
size += m.size()
}
return size
}
//pointer: append multiLineOptions itself
func (o *multiLineOptions) read(rd *bufio.Reader, lineCnt int) error {
for i := 0; i < lineCnt; i++ {
m := plainOptions{}
cnt, err := rd.ReadInt16()
if err != nil {
return err
}
if err := m.read(rd, int(cnt)); err != nil {
return err
}
*o = append(*o, m)
}
return nil
}
func (o multiLineOptions) write(wr *bufio.Writer) error {
for _, m := range o {
if err := wr.WriteInt16(int16(len(m))); err != nil {
return err
}
if err := m.write(wr); err != nil {
return err
}
}
return nil
}
type plainOptions map[int8]interface{}
func (o plainOptions) size() int {
size := 2 * len(o) //option + type
for _, v := range o {
switch v := v.(type) {
default:
outLogger.Fatalf("type %T not implemented", v)
case booleanType:
size++
case intType:
size += 4
case bigintType:
size += 8
case doubleType:
size += 8
case stringType:
size += (2 + len(v)) //length int16 + string length
case binaryStringType:
size += (2 + len(v)) //length int16 + string length
}
}
return size
}
func (o plainOptions) read(rd *bufio.Reader, cnt int) error {
for i := 0; i < cnt; i++ {
k, err := rd.ReadInt8()
if err != nil {
return err
}
tc, err := rd.ReadByte()
if err != nil {
return err
}
switch typeCode(tc) {
default:
outLogger.Fatalf("type code %s not implemented", typeCode(tc))
case tcBoolean:
if v, err := rd.ReadBool(); err == nil {
o[k] = booleanType(v)
} else {
return err
}
case tcInt:
if v, err := rd.ReadInt32(); err == nil {
o[k] = intType(v)
} else {
return err
}
case tcBigint:
if v, err := rd.ReadInt64(); err == nil {
o[k] = bigintType(v)
} else {
return err
}
case tcDouble:
if v, err := rd.ReadFloat64(); err == nil {
o[k] = doubleType(v)
} else {
return err
}
case tcString:
size, err := rd.ReadInt16()
if err != nil {
return err
}
v := make([]byte, size)
if err := rd.ReadFull(v); err == nil {
o[k] = stringType(v)
} else {
return err
}
case tcBstring:
size, err := rd.ReadInt16()
if err != nil {
return err
}
v := make([]byte, size)
if err := rd.ReadFull(v); err == nil {
o[k] = binaryStringType(v)
} else {
return err
}
}
}
return nil
}
func (o plainOptions) write(wr *bufio.Writer) error {
for k, v := range o {
if err := wr.WriteInt8(k); err != nil {
return err
}
switch v := v.(type) {
default:
outLogger.Fatalf("type %T not implemented", v)
case booleanType:
if err := wr.WriteInt8(int8(tcBoolean)); err != nil {
return err
}
if err := wr.WriteBool(bool(v)); err != nil {
return err
}
case intType:
if err := wr.WriteInt8(int8(tcInt)); err != nil {
return err
}
if err := wr.WriteInt32(int32(v)); err != nil {
return err
}
case bigintType:
if err := wr.WriteInt8(int8(tcBigint)); err != nil {
return err
}
if err := wr.WriteInt64(int64(v)); err != nil {
return err
}
case doubleType:
if err := wr.WriteInt8(int8(tcDouble)); err != nil {
return err
}
if err := wr.WriteFloat64(float64(v)); err != nil {
return err
}
case stringType:
if err := wr.WriteInt8(int8(tcString)); err != nil {
return err
}
if err := wr.WriteInt16(int16(len(v))); err != nil {
return err
}
if _, err := wr.Write(v); err != nil {
return err
}
case binaryStringType:
if err := wr.WriteInt8(int8(tcBstring)); err != nil {
return err
}
if err := wr.WriteInt16(int16(len(v))); err != nil {
return err
}
if _, err := wr.Write(v); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,316 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"database/sql/driver"
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
type parameterOptions int8
const (
poMandatory parameterOptions = 0x01
poOptional parameterOptions = 0x02
poDefault parameterOptions = 0x04
)
var parameterOptionsText = map[parameterOptions]string{
poMandatory: "mandatory",
poOptional: "optional",
poDefault: "default",
}
func (k parameterOptions) String() string {
t := make([]string, 0, len(parameterOptionsText))
for option, text := range parameterOptionsText {
if (k & option) != 0 {
t = append(t, text)
}
}
return fmt.Sprintf("%v", t)
}
type parameterMode int8
const (
pmIn parameterMode = 0x01
pmInout parameterMode = 0x02
pmOut parameterMode = 0x04
)
var parameterModeText = map[parameterMode]string{
pmIn: "in",
pmInout: "inout",
pmOut: "out",
}
func (k parameterMode) String() string {
t := make([]string, 0, len(parameterModeText))
for mode, text := range parameterModeText {
if (k & mode) != 0 {
t = append(t, text)
}
}
return fmt.Sprintf("%v", t)
}
type parameterField struct {
parameterOptions parameterOptions
tc typeCode
mode parameterMode
fraction int16
length int16
nameOffset uint32
}
func newParameterField() *parameterField {
return &parameterField{}
}
func (f *parameterField) String() string {
return fmt.Sprintf("parameterOptions %s typeCode %s mode %s fraction %d length %d nameOffset %d",
f.parameterOptions,
f.tc,
f.mode,
f.fraction,
f.length,
f.nameOffset,
)
}
// field interface
func (f *parameterField) typeCode() typeCode {
return f.tc
}
func (f *parameterField) in() bool {
return f.mode == pmInout || f.mode == pmIn
}
func (f *parameterField) out() bool {
return f.mode == pmInout || f.mode == pmOut
}
func (f *parameterField) name(names map[uint32]string) string {
return names[f.nameOffset]
}
func (f *parameterField) nameOffsets() []uint32 {
return []uint32{f.nameOffset}
}
//
func (f *parameterField) read(rd *bufio.Reader) error {
var err error
if po, err := rd.ReadInt8(); err == nil {
f.parameterOptions = parameterOptions(po)
} else {
return err
}
if tc, err := rd.ReadInt8(); err == nil {
f.tc = typeCode(tc)
} else {
return err
}
if mode, err := rd.ReadInt8(); err == nil {
f.mode = parameterMode(mode)
} else {
return err
}
if err := rd.Skip(1); err != nil { //filler
return err
}
if f.nameOffset, err = rd.ReadUint32(); err != nil {
return err
}
if f.length, err = rd.ReadInt16(); err != nil {
return err
}
if f.fraction, err = rd.ReadInt16(); err != nil {
return err
}
if err := rd.Skip(4); err != nil { //filler
return err
}
return nil
}
// parameter metadata
type parameterMetadata struct {
fieldSet *FieldSet
numArg int
}
func (m *parameterMetadata) String() string {
return fmt.Sprintf("parameter metadata: %s", m.fieldSet.fields)
}
func (m *parameterMetadata) kind() partKind {
return pkParameterMetadata
}
func (m *parameterMetadata) setNumArg(numArg int) {
m.numArg = numArg
}
func (m *parameterMetadata) read(rd *bufio.Reader) error {
for i := 0; i < m.numArg; i++ {
field := newParameterField()
if err := field.read(rd); err != nil {
return err
}
m.fieldSet.fields[i] = field
}
pos := uint32(0)
for _, offset := range m.fieldSet.nameOffsets() {
if diff := int(offset - pos); diff > 0 {
rd.Skip(diff)
}
b, size, err := readShortUtf8(rd)
if err != nil {
return err
}
m.fieldSet.names[offset] = string(b)
pos += uint32(1 + size)
}
if trace {
outLogger.Printf("read %s", m)
}
return nil
}
// parameters
type parameters struct {
fields []field //input fields
args []driver.Value
}
func newParameters(fieldSet *FieldSet, args []driver.Value) *parameters {
m := &parameters{
fields: make([]field, 0, len(fieldSet.fields)),
args: args,
}
for _, field := range fieldSet.fields {
if field.in() {
m.fields = append(m.fields, field)
}
}
return m
}
func (m *parameters) kind() partKind {
return pkParameters
}
func (m *parameters) size() (int, error) {
size := len(m.args)
cnt := len(m.fields)
for i, arg := range m.args {
if arg == nil { // null value
continue
}
// mass insert
field := m.fields[i%cnt]
fieldSize, err := fieldSize(field.typeCode(), arg)
if err != nil {
return 0, err
}
size += fieldSize
}
return size, nil
}
func (m *parameters) numArg() int {
cnt := len(m.fields)
if cnt == 0 { // avoid divide-by-zero (e.g. prepare without parameters)
return 0
}
return len(m.args) / cnt
}
func (m parameters) write(wr *bufio.Writer) error {
cnt := len(m.fields)
for i, arg := range m.args {
//mass insert
field := m.fields[i%cnt]
if err := writeField(wr, field.typeCode(), arg); err != nil {
return err
}
}
if trace {
outLogger.Printf("parameters: %s", m)
}
return nil
}
// output parameter
type outputParameters struct {
numArg int
fieldSet *FieldSet
fieldValues *FieldValues
}
func (r *outputParameters) String() string {
return fmt.Sprintf("output parameters: %v", r.fieldValues)
}
func (r *outputParameters) kind() partKind {
return pkOutputParameters
}
func (r *outputParameters) setNumArg(numArg int) {
r.numArg = numArg // should always be 1
}
func (r *outputParameters) read(rd *bufio.Reader) error {
if err := r.fieldValues.read(r.numArg, r.fieldSet, rd); err != nil {
return err
}
if trace {
outLogger.Printf("read %s", r)
}
return nil
}

174
vendor/github.com/SAP/go-hdb/internal/protocol/part.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
const (
partHeaderSize = 16
)
type requestPart interface {
kind() partKind
size() (int, error)
numArg() int
write(*bufio.Writer) error
}
type replyPart interface {
//kind() partKind
setNumArg(int)
read(*bufio.Reader) error
}
// PartAttributes is an interface defining methods for reading query resultset parts.
type PartAttributes interface {
ResultsetClosed() bool
LastPacket() bool
NoRows() bool
}
type partAttributes int8
const (
paLastPacket partAttributes = 0x01
paNextPacket partAttributes = 0x02
paFirstPacket partAttributes = 0x04
paRowNotFound partAttributes = 0x08
paResultsetClosed partAttributes = 0x10
)
var partAttributesText = map[partAttributes]string{
paLastPacket: "lastPacket",
paNextPacket: "nextPacket",
paFirstPacket: "firstPacket",
paRowNotFound: "rowNotFound",
paResultsetClosed: "resultsetClosed",
}
func (k partAttributes) String() string {
t := make([]string, 0, len(partAttributesText))
for attr, text := range partAttributesText {
if (k & attr) != 0 {
t = append(t, text)
}
}
return fmt.Sprintf("%v", t)
}
func (k partAttributes) ResultsetClosed() bool {
return (k & paResultsetClosed) == paResultsetClosed
}
func (k partAttributes) LastPacket() bool {
return (k & paLastPacket) == paLastPacket
}
func (k partAttributes) NoRows() bool {
attrs := paLastPacket | paRowNotFound
return (k & attrs) == attrs
}
// part header
type partHeader struct {
partKind partKind
partAttributes partAttributes
argumentCount int16
bigArgumentCount int32
bufferLength int32
bufferSize int32
}
func (h *partHeader) String() string {
return fmt.Sprintf("part kind %s partAttributes %s argumentCount %d bigArgumentCount %d bufferLength %d bufferSize %d",
h.partKind,
h.partAttributes,
h.argumentCount,
h.bigArgumentCount,
h.bufferLength,
h.bufferSize,
)
}
func (h *partHeader) write(wr *bufio.Writer) error {
if err := wr.WriteInt8(int8(h.partKind)); err != nil {
return err
}
if err := wr.WriteInt8(int8(h.partAttributes)); err != nil {
return err
}
if err := wr.WriteInt16(h.argumentCount); err != nil {
return err
}
if err := wr.WriteInt32(h.bigArgumentCount); err != nil {
return err
}
if err := wr.WriteInt32(h.bufferLength); err != nil {
return err
}
if err := wr.WriteInt32(h.bufferSize); err != nil {
return err
}
//no filler
if trace {
outLogger.Printf("write part header: %s", h)
}
return nil
}
func (h *partHeader) read(rd *bufio.Reader) error {
var err error
if pk, err := rd.ReadInt8(); err == nil {
h.partKind = partKind(pk)
} else {
return err
}
if pa, err := rd.ReadInt8(); err == nil {
h.partAttributes = partAttributes(pa)
} else {
return err
}
if h.argumentCount, err = rd.ReadInt16(); err != nil {
return err
}
if h.bigArgumentCount, err = rd.ReadInt32(); err != nil {
return err
}
if h.bufferLength, err = rd.ReadInt32(); err != nil {
return err
}
if h.bufferSize, err = rd.ReadInt32(); err != nil {
return err
}
// no filler
if trace {
outLogger.Printf("read part header: %s", h)
}
return nil
}

View File

@ -0,0 +1,65 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=partKind
type partKind int8
const (
pkNil partKind = 0
pkCommand partKind = 3
pkResultset partKind = 5
pkError partKind = 6
pkStatementID partKind = 10
pkTransactionID partKind = 11
pkRowsAffected partKind = 12
pkResultsetID partKind = 13
pkTopologyInformation partKind = 15
pkTableLocation partKind = 16
pkReadLobRequest partKind = 17
pkReadLobReply partKind = 18
pkAbapIStream partKind = 25
pkAbapOStream partKind = 26
pkCommandInfo partKind = 27
pkWriteLobRequest partKind = 28
pkWriteLobReply partKind = 30
pkParameters partKind = 32
pkAuthentication partKind = 33
pkSessionContext partKind = 34
pkStatementContext partKind = 39
pkPartitionInformation partKind = 40
pkOutputParameters partKind = 41
pkConnectOptions partKind = 42
pkCommitOptions partKind = 43
pkFetchOptions partKind = 44
pkFetchSize partKind = 45
pkParameterMetadata partKind = 47
pkResultMetadata partKind = 48
pkFindLobRequest partKind = 49
pkFindLobReply partKind = 50
pkItabSHM partKind = 51
pkItabChunkMetadata partKind = 53
pkItabMetadata partKind = 55
pkItabResultChunk partKind = 56
pkClientInfo partKind = 57
pkStreamData partKind = 58
pkOStreamResult partKind = 59
pkFDARequestMetadata partKind = 60
pkFDAReplyMetadata partKind = 61
pkTransactionFlags partKind = 64
)

View File

@ -0,0 +1,58 @@
// generated by stringer -type=partKind; DO NOT EDIT
package protocol
import "fmt"
const _partKind_name = "pkNilpkCommandpkResultsetpkErrorpkStatementIDpkTransactionIDpkRowsAffectedpkResultsetIDpkTopologyInformationpkTableLocationpkReadLobRequestpkReadLobReplypkAbapIStreampkAbapOStreampkCommandInfopkWriteLobRequestpkWriteLobReplypkParameterspkAuthenticationpkSessionContextpkStatementContextpkPartitionInformationpkOutputParameterspkConnectOptionspkCommitOptionspkFetchOptionspkFetchSizepkParameterMetadatapkResultMetadatapkFindLobRequestpkFindLobReplypkItabSHMpkItabChunkMetadatapkItabMetadatapkItabResultChunkpkClientInfopkStreamDatapkOStreamResultpkFDARequestMetadatapkFDAReplyMetadatapkTransactionFlags"
var _partKind_map = map[partKind]string{
0: _partKind_name[0:5],
3: _partKind_name[5:14],
5: _partKind_name[14:25],
6: _partKind_name[25:32],
10: _partKind_name[32:45],
11: _partKind_name[45:60],
12: _partKind_name[60:74],
13: _partKind_name[74:87],
15: _partKind_name[87:108],
16: _partKind_name[108:123],
17: _partKind_name[123:139],
18: _partKind_name[139:153],
25: _partKind_name[153:166],
26: _partKind_name[166:179],
27: _partKind_name[179:192],
28: _partKind_name[192:209],
30: _partKind_name[209:224],
32: _partKind_name[224:236],
33: _partKind_name[236:252],
34: _partKind_name[252:268],
39: _partKind_name[268:286],
40: _partKind_name[286:308],
41: _partKind_name[308:326],
42: _partKind_name[326:342],
43: _partKind_name[342:357],
44: _partKind_name[357:371],
45: _partKind_name[371:382],
47: _partKind_name[382:401],
48: _partKind_name[401:417],
49: _partKind_name[417:433],
50: _partKind_name[433:447],
51: _partKind_name[447:456],
53: _partKind_name[456:475],
55: _partKind_name[475:489],
56: _partKind_name[489:506],
57: _partKind_name[506:518],
58: _partKind_name[518:530],
59: _partKind_name[530:545],
60: _partKind_name[545:565],
61: _partKind_name[565:583],
64: _partKind_name[583:601],
}
func (i partKind) String() string {
if str, ok := _partKind_map[i]; ok {
return str
}
return fmt.Sprintf("partKind(%d)", i)
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=QueryType
// QueryType is the type definition for query types supported by this package.
type QueryType byte
// Query type constants.
const (
QtNone QueryType = iota
QtSelect
QtProcedureCall
)

View File

@ -0,0 +1,16 @@
// generated by stringer -type=QueryType; DO NOT EDIT
package protocol
import "fmt"
const _QueryType_name = "QtNoneQtSelectQtProcedureCall"
var _QueryType_index = [...]uint8{0, 6, 14, 29}
func (i QueryType) String() string {
if i >= QueryType(len(_QueryType_index)-1) {
return fmt.Sprintf("QueryType(%d)", i)
}
return _QueryType_name[_QueryType_index[i]:_QueryType_index[i+1]]
}

View File

@ -0,0 +1,277 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
const (
resultsetIDSize = 8
)
type columnOptions int8
const (
coMandatory columnOptions = 0x01
coOptional columnOptions = 0x02
)
var columnOptionsText = map[columnOptions]string{
coMandatory: "mandatory",
coOptional: "optional",
}
func (k columnOptions) String() string {
t := make([]string, 0, len(columnOptionsText))
for option, text := range columnOptionsText {
if (k & option) != 0 {
t = append(t, text)
}
}
return fmt.Sprintf("%v", t)
}
//resultset id
type resultsetID struct {
id *uint64
}
func (id *resultsetID) kind() partKind {
return pkResultsetID
}
func (id *resultsetID) size() (int, error) {
return resultsetIDSize, nil
}
func (id *resultsetID) numArg() int {
return 1
}
func (id *resultsetID) setNumArg(int) {
//ignore - always 1
}
func (id *resultsetID) read(rd *bufio.Reader) error {
_id, err := rd.ReadUint64()
if err != nil {
return err
}
*id.id = _id
if trace {
outLogger.Printf("resultset id: %d", *id.id)
}
return nil
}
func (id *resultsetID) write(wr *bufio.Writer) error {
if err := wr.WriteUint64(*id.id); err != nil {
return err
}
if trace {
outLogger.Printf("resultset id: %d", *id.id)
}
return nil
}
const (
resultTableName = iota // used as index: start with 0
resultSchemaName
resultColumnName
resultColumnDisplayName
maxResultNames
)
type resultField struct {
columnOptions columnOptions
tc typeCode
fraction int16
length int16
tablenameOffset uint32
schemanameOffset uint32
columnnameOffset uint32
columnDisplaynameOffset uint32
}
func newResultField() *resultField {
return &resultField{}
}
func (f *resultField) String() string {
return fmt.Sprintf("columnsOptions %s typeCode %s fraction %d length %d tablenameOffset %d schemanameOffset %d columnnameOffset %d columnDisplaynameOffset %d",
f.columnOptions,
f.tc,
f.fraction,
f.length,
f.tablenameOffset,
f.schemanameOffset,
f.columnnameOffset,
f.columnDisplaynameOffset,
)
}
// Field interface
func (f *resultField) typeCode() typeCode {
return f.tc
}
func (f *resultField) in() bool {
return false
}
func (f *resultField) out() bool {
return true
}
func (f *resultField) name(names map[uint32]string) string {
return names[f.columnnameOffset]
}
func (f *resultField) nameOffsets() []uint32 {
return []uint32{f.tablenameOffset, f.schemanameOffset, f.columnnameOffset, f.columnDisplaynameOffset}
}
//
func (f *resultField) read(rd *bufio.Reader) error {
var err error
if co, err := rd.ReadInt8(); err == nil {
f.columnOptions = columnOptions(co)
} else {
return err
}
if tc, err := rd.ReadInt8(); err == nil {
f.tc = typeCode(tc)
} else {
return err
}
if f.fraction, err = rd.ReadInt16(); err != nil {
return err
}
if f.length, err = rd.ReadInt16(); err != nil {
return err
}
if err := rd.Skip(2); err != nil { //filler
return err
}
if f.tablenameOffset, err = rd.ReadUint32(); err != nil {
return err
}
if f.schemanameOffset, err = rd.ReadUint32(); err != nil {
return err
}
if f.columnnameOffset, err = rd.ReadUint32(); err != nil {
return err
}
if f.columnDisplaynameOffset, err = rd.ReadUint32(); err != nil {
return err
}
return nil
}
//resultset metadata
type resultMetadata struct {
fieldSet *FieldSet
numArg int
}
func (r *resultMetadata) String() string {
return fmt.Sprintf("result metadata: %s", r.fieldSet.fields)
}
func (r *resultMetadata) kind() partKind {
return pkResultMetadata
}
func (r *resultMetadata) setNumArg(numArg int) {
r.numArg = numArg
}
func (r *resultMetadata) read(rd *bufio.Reader) error {
for i := 0; i < r.numArg; i++ {
field := newResultField()
if err := field.read(rd); err != nil {
return err
}
r.fieldSet.fields[i] = field
}
pos := uint32(0)
for _, offset := range r.fieldSet.nameOffsets() {
if diff := int(offset - pos); diff > 0 {
rd.Skip(diff)
}
b, size, err := readShortUtf8(rd)
if err != nil {
return err
}
r.fieldSet.names[offset] = string(b)
pos += uint32(1 + size)
}
if trace {
outLogger.Printf("read %s", r)
}
return nil
}
//resultset
type resultset struct {
numArg int
fieldSet *FieldSet
fieldValues *FieldValues
}
func (r *resultset) String() string {
return fmt.Sprintf("resultset: %s", r.fieldValues)
}
func (r *resultset) kind() partKind {
return pkResultset
}
func (r *resultset) setNumArg(numArg int) {
r.numArg = numArg
}
func (r *resultset) read(rd *bufio.Reader) error {
if err := r.fieldValues.read(r.numArg, r.fieldSet, rd); err != nil {
return err
}
if trace {
outLogger.Printf("read %s", r)
}
return nil
}

View File

@ -0,0 +1,76 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"github.com/SAP/go-hdb/internal/bufio"
)
//rows affected
const (
raSuccessNoInfo = -2
raExecutionFailed = -3
)
//rows affected
type rowsAffected struct {
sums []int32
_numArg int
}
func (r *rowsAffected) kind() partKind {
return pkRowsAffected
}
func (r *rowsAffected) setNumArg(numArg int) {
r._numArg = numArg
}
func (r *rowsAffected) read(rd *bufio.Reader) error {
if r.sums == nil || r._numArg > cap(r.sums) {
r.sums = make([]int32, r._numArg)
} else {
r.sums = r.sums[:r._numArg]
}
var err error
for i := 0; i < r._numArg; i++ {
r.sums[i], err = rd.ReadInt32()
if err != nil {
return err
}
}
if trace {
outLogger.Printf("rows affected %v", r.sums)
}
return nil
}
func (r *rowsAffected) total() int64 {
if r.sums == nil {
return 0
}
total := int64(0)
for _, sum := range r.sums {
total += int64(sum)
}
return total
}

View File

@ -0,0 +1,332 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//Salted Challenge Response Authentication Mechanism (SCRAM)
import (
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
const (
clientChallengeSize = 64
serverChallengeDataSize = 68
clientProofDataSize = 35
clientProofSize = 32
)
type scramsha256InitialRequest struct {
username []byte
clientChallenge []byte
}
func newScramsha256InitialRequest() *scramsha256InitialRequest {
return &scramsha256InitialRequest{}
}
func (r *scramsha256InitialRequest) kind() partKind {
return pkAuthentication
}
func (r *scramsha256InitialRequest) size() (int, error) {
return 2 + authFieldSize(r.username) + authFieldSize([]byte(mnSCRAMSHA256)) + authFieldSize(r.clientChallenge), nil
}
func (r *scramsha256InitialRequest) numArg() int {
return 1
}
func (r *scramsha256InitialRequest) write(wr *bufio.Writer) error {
if err := wr.WriteInt16(3); err != nil { //field count
return err
}
if err := writeAuthField(wr, r.username); err != nil {
return err
}
if err := writeAuthField(wr, []byte(mnSCRAMSHA256)); err != nil {
return err
}
if err := writeAuthField(wr, r.clientChallenge); err != nil {
return err
}
return nil
}
type scramsha256InitialReply struct {
salt []byte
serverChallenge []byte
}
func newScramsha256InitialReply() *scramsha256InitialReply {
return &scramsha256InitialReply{}
}
func (r *scramsha256InitialReply) kind() partKind {
return pkAuthentication
}
func (r *scramsha256InitialReply) setNumArg(int) {
//not needed
}
func (r *scramsha256InitialReply) read(rd *bufio.Reader) error {
cnt, err := rd.ReadInt16()
if err != nil {
return err
}
if err := readMethodName(rd); err != nil {
return err
}
size, err := rd.ReadByte()
if err != nil {
return err
}
if size != serverChallengeDataSize {
return fmt.Errorf("invalid server challenge data size %d - %d expected", size, serverChallengeDataSize)
}
//server challenge data
cnt, err = rd.ReadInt16()
if err != nil {
return err
}
if cnt != 2 {
return fmt.Errorf("invalid server challenge data field count %d - %d expected", cnt, 2)
}
size, err = rd.ReadByte()
if err != nil {
return err
}
if trace {
outLogger.Printf("salt size %d", size)
}
r.salt = make([]byte, size)
if err := rd.ReadFull(r.salt); err != nil {
return err
}
if trace {
outLogger.Printf("salt %v", r.salt)
}
size, err = rd.ReadByte()
if err != nil {
return err
}
r.serverChallenge = make([]byte, size)
if err := rd.ReadFull(r.serverChallenge); err != nil {
return err
}
if trace {
outLogger.Printf("server challenge %v", r.serverChallenge)
}
return nil
}
type scramsha256FinalRequest struct {
username []byte
clientProof []byte
}
func newScramsha256FinalRequest() *scramsha256FinalRequest {
return &scramsha256FinalRequest{}
}
func (r *scramsha256FinalRequest) kind() partKind {
return pkAuthentication
}
func (r *scramsha256FinalRequest) size() (int, error) {
return 2 + authFieldSize(r.username) + authFieldSize([]byte(mnSCRAMSHA256)) + authFieldSize(r.clientProof), nil
}
func (r *scramsha256FinalRequest) numArg() int {
return 1
}
func (r *scramsha256FinalRequest) write(wr *bufio.Writer) error {
if err := wr.WriteInt16(3); err != nil { //field count
return err
}
if err := writeAuthField(wr, r.username); err != nil {
return err
}
if err := writeAuthField(wr, []byte(mnSCRAMSHA256)); err != nil {
return err
}
if err := writeAuthField(wr, r.clientProof); err != nil {
return err
}
return nil
}
type scramsha256FinalReply struct {
serverProof []byte
}
func newScramsha256FinalReply() *scramsha256FinalReply {
return &scramsha256FinalReply{}
}
func (r *scramsha256FinalReply) kind() partKind {
return pkAuthentication
}
func (r *scramsha256FinalReply) setNumArg(int) {
//not needed
}
func (r *scramsha256FinalReply) read(rd *bufio.Reader) error {
cnt, err := rd.ReadInt16()
if err != nil {
return err
}
if cnt != 2 {
return fmt.Errorf("invalid final reply field count %d - %d expected", cnt, 2)
}
if err := readMethodName(rd); err != nil {
return err
}
//serverProof
size, err := rd.ReadByte()
if err != nil {
return err
}
serverProof := make([]byte, size)
if err := rd.ReadFull(serverProof); err != nil {
return err
}
return nil
}
//helper
func authFieldSize(f []byte) int {
size := len(f)
if size >= 250 {
// - different indicators compared to db field handling
// - 1-5 bytes? but only 1 resp 3 bytes explained
panic("not implemented error")
}
return size + 1 //length indicator size := 1
}
func writeAuthField(wr *bufio.Writer, f []byte) error {
size := len(f)
if size >= 250 {
// - different indicators compared to db field handling
// - 1-5 bytes? but only 1 resp 3 bytes explained
panic("not implemented error")
}
if err := wr.WriteByte(byte(size)); err != nil {
return err
}
if _, err := wr.Write(f); err != nil {
return err
}
return nil
}
func readMethodName(rd *bufio.Reader) error {
size, err := rd.ReadByte()
if err != nil {
return err
}
methodName := make([]byte, size)
if err := rd.ReadFull(methodName); err != nil {
return err
}
if string(methodName) != mnSCRAMSHA256 {
return fmt.Errorf("invalid authentication method %s - %s expected", methodName, mnSCRAMSHA256)
}
return nil
}
func clientChallenge() []byte {
r := make([]byte, clientChallengeSize)
if _, err := rand.Read(r); err != nil {
outLogger.Fatal("client challenge fatal error")
}
return r
}
func clientProof(salt, serverChallenge, clientChallenge, password []byte) []byte {
clientProof := make([]byte, clientProofDataSize)
buf := make([]byte, 0, len(salt)+len(serverChallenge)+len(clientChallenge))
buf = append(buf, salt...)
buf = append(buf, serverChallenge...)
buf = append(buf, clientChallenge...)
key := _sha256(_hmac(password, salt))
sig := _hmac(_sha256(key), buf)
proof := xor(sig, key)
//actual implementation: only one salt value?
clientProof[0] = 0
clientProof[1] = 1
clientProof[2] = clientProofSize
copy(clientProof[3:], proof)
return clientProof
}
func _sha256(p []byte) []byte {
hash := sha256.New()
hash.Write(p)
s := hash.Sum(nil)
if trace {
outLogger.Printf("sha length %d value %v", len(s), s)
}
return s
}
func _hmac(key, p []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(p)
s := hash.Sum(nil)
if trace {
outLogger.Printf("hmac length %d value %v", len(s), s)
}
return s
}
func xor(sig, key []byte) []byte {
r := make([]byte, len(sig))
for i, v := range sig {
r[i] = v ^ key[i]
}
return r
}

View File

@ -0,0 +1,238 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
const (
segmentHeaderSize = 24
)
type commandOptions int8
const (
coNil commandOptions = 0x00
coSelfetchOff commandOptions = 0x01
coScrollableCursorOn commandOptions = 0x02
coNoResultsetCloseNeeded commandOptions = 0x04
coHoldCursorOverCommtit commandOptions = 0x08
coExecuteLocally commandOptions = 0x10
)
var commandOptionsText = map[commandOptions]string{
coSelfetchOff: "selfetchOff",
coScrollableCursorOn: "scrollabeCursorOn",
coNoResultsetCloseNeeded: "noResltsetCloseNeeded",
coHoldCursorOverCommtit: "holdCursorOverCommit",
coExecuteLocally: "executLocally",
}
func (k commandOptions) String() string {
t := make([]string, 0, len(commandOptionsText))
for option, text := range commandOptionsText {
if (k & option) != 0 {
t = append(t, text)
}
}
return fmt.Sprintf("%v", t)
}
//segment header
type segmentHeader struct {
segmentLength int32
segmentOfs int32
noOfParts int16
segmentNo int16
segmentKind segmentKind
messageType messageType
commit bool
commandOptions commandOptions
functionCode functionCode
}
func (h *segmentHeader) String() string {
switch h.segmentKind {
default: //error
return fmt.Sprintf(
"segment length %d segment ofs %d noOfParts %d, segmentNo %d segmentKind %s",
h.segmentLength,
h.segmentOfs,
h.noOfParts,
h.segmentNo,
h.segmentKind,
)
case skRequest:
return fmt.Sprintf(
"segment length %d segment ofs %d noOfParts %d, segmentNo %d segmentKind %s messageType %s commit %t commandOptions %s",
h.segmentLength,
h.segmentOfs,
h.noOfParts,
h.segmentNo,
h.segmentKind,
h.messageType,
h.commit,
h.commandOptions,
)
case skReply:
return fmt.Sprintf(
"segment length %d segment ofs %d noOfParts %d, segmentNo %d segmentKind %s functionCode %s",
h.segmentLength,
h.segmentOfs,
h.noOfParts,
h.segmentNo,
h.segmentKind,
h.functionCode,
)
}
}
// request
func (h *segmentHeader) write(wr *bufio.Writer) error {
if err := wr.WriteInt32(h.segmentLength); err != nil {
return err
}
if err := wr.WriteInt32(h.segmentOfs); err != nil {
return err
}
if err := wr.WriteInt16(h.noOfParts); err != nil {
return err
}
if err := wr.WriteInt16(h.segmentNo); err != nil {
return err
}
if err := wr.WriteInt8(int8(h.segmentKind)); err != nil {
return err
}
switch h.segmentKind {
default: //error
if err := wr.WriteZeroes(11); err != nil { //segmentHeaderLength
return err
}
case skRequest:
if err := wr.WriteInt8(int8(h.messageType)); err != nil {
return err
}
if err := wr.WriteBool(h.commit); err != nil {
return err
}
if err := wr.WriteInt8(int8(h.commandOptions)); err != nil {
return err
}
if err := wr.WriteZeroes(8); err != nil { //segmentHeaderSize
return err
}
case skReply:
if err := wr.WriteZeroes(1); err != nil { //reerved
return err
}
if err := wr.WriteInt16(int16(h.functionCode)); err != nil {
return err
}
if err := wr.WriteZeroes(8); err != nil { //segmentHeaderSize
return err
}
}
if trace {
outLogger.Printf("write segment header: %s", h)
}
return nil
}
// reply || error
func (h *segmentHeader) read(rd *bufio.Reader) error {
var err error
if h.segmentLength, err = rd.ReadInt32(); err != nil {
return err
}
if h.segmentOfs, err = rd.ReadInt32(); err != nil {
return err
}
if h.noOfParts, err = rd.ReadInt16(); err != nil {
return err
}
if h.segmentNo, err = rd.ReadInt16(); err != nil {
return err
}
if sk, err := rd.ReadInt8(); err == nil {
h.segmentKind = segmentKind(sk)
} else {
return err
}
switch h.segmentKind {
default: //error
if err := rd.Skip(11); err != nil { //segmentHeaderLength
return err
}
case skRequest:
if mt, err := rd.ReadInt8(); err == nil {
h.messageType = messageType(mt)
} else {
return err
}
if h.commit, err = rd.ReadBool(); err != nil {
return err
}
if co, err := rd.ReadInt8(); err == nil {
h.commandOptions = commandOptions(co)
} else {
return err
}
if err := rd.Skip(8); err != nil { //segmentHeaderLength
return err
}
case skReply:
if err := rd.Skip(1); err != nil { //reserved
return err
}
if fc, err := rd.ReadInt16(); err == nil {
h.functionCode = functionCode(fc)
} else {
return err
}
if err := rd.Skip(8); err != nil { //segmentHeaderLength
return err
}
}
if trace {
outLogger.Printf("read segment header: %s", h)
}
return nil
}

View File

@ -0,0 +1,28 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=segmentKind
type segmentKind int8
const (
skInvalid segmentKind = 0
skRequest segmentKind = 1
skReply segmentKind = 2
skError segmentKind = 5
)

View File

@ -0,0 +1,26 @@
// generated by stringer -type=segmentKind; DO NOT EDIT
package protocol
import "fmt"
const (
_segmentKind_name_0 = "skInvalidskRequestskReply"
_segmentKind_name_1 = "skError"
)
var (
_segmentKind_index_0 = [...]uint8{0, 9, 18, 25}
_segmentKind_index_1 = [...]uint8{0, 7}
)
func (i segmentKind) String() string {
switch {
case 0 <= i && i <= 2:
return _segmentKind_name_0[_segmentKind_index_0[i]:_segmentKind_index_0[i+1]]
case i == 5:
return _segmentKind_name_1
default:
return fmt.Sprintf("segmentKind(%d)", i)
}
}

View File

@ -0,0 +1,986 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"database/sql/driver"
"flag"
"fmt"
"log"
"math"
"net"
"os"
"time"
"github.com/SAP/go-hdb/internal/bufio"
"github.com/SAP/go-hdb/internal/unicode"
"github.com/SAP/go-hdb/internal/unicode/cesu8"
"github.com/SAP/go-hdb/driver/sqltrace"
)
const (
mnSCRAMSHA256 = "SCRAMSHA256"
mnGSS = "GSS"
mnSAML = "SAML"
)
var trace bool
func init() {
flag.BoolVar(&trace, "hdb.protocol.trace", false, "enabling hdb protocol trace")
}
var (
outLogger = log.New(os.Stdout, "hdb.protocol ", log.Ldate|log.Ltime|log.Lshortfile)
errLogger = log.New(os.Stderr, "hdb.protocol ", log.Ldate|log.Ltime|log.Lshortfile)
)
//padding
const (
padding = 8
)
func padBytes(size int) int {
if r := size % padding; r != 0 {
return padding - r
}
return 0
}
// SessionConn wraps the database tcp connection. It sets timeouts and handles driver ErrBadConn behavior.
type sessionConn struct {
addr string
timeoutDuration time.Duration
conn net.Conn
isBad bool // bad connection
badError error // error cause for session bad state
inTx bool // in transaction
}
func newSessionConn(addr string, timeout int) (*sessionConn, error) {
timeoutDuration := time.Duration(timeout) * time.Second
conn, err := net.DialTimeout("tcp", addr, timeoutDuration)
if err != nil {
return nil, err
}
return &sessionConn{
addr: addr,
timeoutDuration: timeoutDuration,
conn: conn,
}, nil
}
func (c *sessionConn) close() error {
return c.conn.Close()
}
// Read implements the io.Reader interface.
func (c *sessionConn) Read(b []byte) (int, error) {
//set timeout
if err := c.conn.SetReadDeadline(time.Now().Add(c.timeoutDuration)); err != nil {
return 0, err
}
n, err := c.conn.Read(b)
if err != nil {
errLogger.Printf("Connection read error local address %s remote address %s: %s", c.conn.LocalAddr(), c.conn.RemoteAddr(), err)
c.isBad = true
c.badError = err
return n, driver.ErrBadConn
}
return n, nil
}
// Write implements the io.Writer interface.
func (c *sessionConn) Write(b []byte) (int, error) {
//set timeout
if err := c.conn.SetWriteDeadline(time.Now().Add(c.timeoutDuration)); err != nil {
return 0, err
}
n, err := c.conn.Write(b)
if err != nil {
errLogger.Printf("Connection write error local address %s remote address %s: %s", c.conn.LocalAddr(), c.conn.RemoteAddr(), err)
c.isBad = true
c.badError = err
return n, driver.ErrBadConn
}
return n, nil
}
type providePart func(pk partKind) replyPart
type beforeRead func(p replyPart)
// Session represents a HDB session.
type Session struct {
prm *SessionPrm
conn *sessionConn
rd *bufio.Reader
wr *bufio.Writer
// reuse header
mh *messageHeader
sh *segmentHeader
ph *partHeader
//reuse request / reply parts
rowsAffected *rowsAffected
statementID *statementID
resultMetadata *resultMetadata
resultsetID *resultsetID
resultset *resultset
parameterMetadata *parameterMetadata
outputParameters *outputParameters
readLobRequest *readLobRequest
readLobReply *readLobReply
//standard replies
stmtCtx *statementContext
txFlags *transactionFlags
lastError *hdbError
}
// NewSession creates a new database session.
func NewSession(prm *SessionPrm) (*Session, error) {
if trace {
outLogger.Printf("%s", prm)
}
conn, err := newSessionConn(prm.Host, prm.Timeout)
if err != nil {
return nil, err
}
var rd *bufio.Reader
var wr *bufio.Writer
if prm.BufferSize > 0 {
rd = bufio.NewReaderSize(conn, prm.BufferSize)
wr = bufio.NewWriterSize(conn, prm.BufferSize)
} else {
rd = bufio.NewReader(conn)
wr = bufio.NewWriter(conn)
}
s := &Session{
prm: prm,
conn: conn,
rd: rd,
wr: wr,
mh: new(messageHeader),
sh: new(segmentHeader),
ph: new(partHeader),
rowsAffected: new(rowsAffected),
statementID: new(statementID),
resultMetadata: new(resultMetadata),
resultsetID: new(resultsetID),
resultset: new(resultset),
parameterMetadata: new(parameterMetadata),
outputParameters: new(outputParameters),
readLobRequest: new(readLobRequest),
readLobReply: new(readLobReply),
stmtCtx: newStatementContext(),
txFlags: newTransactionFlags(),
lastError: newHdbError(),
}
if err = s.init(); err != nil {
return nil, err
}
return s, nil
}
// Close closes the session.
func (s *Session) Close() error {
return s.conn.close()
}
func (s *Session) sessionID() int64 {
return s.mh.sessionID
}
// InTx indicates, if the session is in transaction mode.
func (s *Session) InTx() bool {
return s.conn.inTx
}
// SetInTx sets session in transaction mode.
func (s *Session) SetInTx(v bool) {
s.conn.inTx = v
}
// IsBad indicates, that the session is in bad state.
func (s *Session) IsBad() bool {
return s.conn.isBad
}
// BadErr returns the error, that caused the bad session state.
func (s *Session) BadErr() error {
return s.conn.badError
}
func (s *Session) init() error {
if err := s.initRequest(); err != nil {
return err
}
// TODO: detect authentication method
// - actually only basic authetication supported
authentication := mnSCRAMSHA256
switch authentication {
default:
return fmt.Errorf("invalid authentication %s", authentication)
case mnSCRAMSHA256:
if err := s.authenticateScramsha256(); err != nil {
return err
}
case mnGSS:
panic("not implemented error")
case mnSAML:
panic("not implemented error")
}
id := s.sessionID()
if id <= 0 {
return fmt.Errorf("invalid session id %d", id)
}
if trace {
outLogger.Printf("sessionId %d", id)
}
return nil
}
func (s *Session) authenticateScramsha256() error {
tr := unicode.Utf8ToCesu8Transformer
tr.Reset()
username := make([]byte, cesu8.StringSize(s.prm.Username))
if _, _, err := tr.Transform(username, []byte(s.prm.Username), true); err != nil {
return err // should never happen
}
password := make([]byte, cesu8.StringSize(s.prm.Password))
if _, _, err := tr.Transform(password, []byte(s.prm.Password), true); err != nil {
return err //should never happen
}
clientChallenge := clientChallenge()
//initial request
ireq := newScramsha256InitialRequest()
ireq.username = username
ireq.clientChallenge = clientChallenge
if err := s.writeRequest(mtAuthenticate, false, ireq); err != nil {
return err
}
irep := newScramsha256InitialReply()
f := func(pk partKind) replyPart {
switch pk {
case pkAuthentication:
return irep
default:
return nil
}
}
if err := s.readReply(f, nil); err != nil {
return err
}
//final request
freq := newScramsha256FinalRequest()
freq.username = username
freq.clientProof = clientProof(irep.salt, irep.serverChallenge, clientChallenge, password)
id := newClientID()
co := newConnectOptions()
co.set(coDistributionProtocolVersion, booleanType(false))
co.set(coSelectForUpdateSupported, booleanType(false))
co.set(coSplitBatchCommands, booleanType(true))
co.set(coDataFormatVersion, dfvBaseline)
co.set(coDataFormatVersion2, dfvBaseline)
co.set(coCompleteArrayExecution, booleanType(true))
co.set(coClientLocale, stringType(s.prm.Locale))
co.set(coClientDistributionMode, cdmOff)
if err := s.writeRequest(mtConnect, false, freq, id, co); err != nil {
return err
}
frep := newScramsha256FinalReply()
topo := newTopologyOptions()
f = func(pk partKind) replyPart {
switch pk {
case pkAuthentication:
return frep
case pkTopologyInformation:
return topo
case pkConnectOptions:
return co
default:
return nil
}
}
if err := s.readReply(f, nil); err != nil {
return err
}
return nil
}
// QueryDirect executes a query without query parameters.
func (s *Session) QueryDirect(query string) (uint64, *FieldSet, *FieldValues, PartAttributes, error) {
if err := s.writeRequest(mtExecuteDirect, false, command(query)); err != nil {
return 0, nil, nil, nil, err
}
var id uint64
var fieldSet *FieldSet
fieldValues := newFieldValues(s)
f := func(p replyPart) {
switch p := p.(type) {
case *resultsetID:
p.id = &id
case *resultMetadata:
fieldSet = newFieldSet(p.numArg)
p.fieldSet = fieldSet
case *resultset:
p.fieldSet = fieldSet
p.fieldValues = fieldValues
}
}
if err := s.readReply(nil, f); err != nil {
return 0, nil, nil, nil, err
}
attrs := s.ph.partAttributes
return id, fieldSet, fieldValues, attrs, nil
}
// ExecDirect executes a sql statement without statement parameters.
func (s *Session) ExecDirect(query string) (driver.Result, error) {
if err := s.writeRequest(mtExecuteDirect, !s.conn.inTx, command(query)); err != nil {
return nil, err
}
if err := s.readReply(nil, nil); err != nil {
return nil, err
}
if s.sh.functionCode == fcDDL {
return driver.ResultNoRows, nil
}
return driver.RowsAffected(s.rowsAffected.total()), nil
}
// Prepare prepares a sql statement.
func (s *Session) Prepare(query string) (QueryType, uint64, *FieldSet, *FieldSet, error) {
if err := s.writeRequest(mtPrepare, false, command(query)); err != nil {
return QtNone, 0, nil, nil, err
}
var id uint64
var prmFieldSet *FieldSet
var resultFieldSet *FieldSet
f := func(p replyPart) {
switch p := p.(type) {
case *statementID:
p.id = &id
case *parameterMetadata:
prmFieldSet = newFieldSet(p.numArg)
p.fieldSet = prmFieldSet
case *resultMetadata:
resultFieldSet = newFieldSet(p.numArg)
p.fieldSet = resultFieldSet
}
}
if err := s.readReply(nil, f); err != nil {
return QtNone, 0, nil, nil, err
}
return s.sh.functionCode.queryType(), id, prmFieldSet, resultFieldSet, nil
}
// Exec executes a sql statement.
func (s *Session) Exec(id uint64, parameterFieldSet *FieldSet, args []driver.Value) (driver.Result, error) {
s.statementID.id = &id
if err := s.writeRequest(mtExecute, !s.conn.inTx, s.statementID, newParameters(parameterFieldSet, args)); err != nil {
return nil, err
}
wlr := newWriteLobReply() //lob streaming
f := func(pk partKind) replyPart {
switch pk {
case pkWriteLobReply:
return wlr
default:
return nil
}
}
if err := s.readReply(f, nil); err != nil {
return nil, err
}
var result driver.Result
if s.sh.functionCode == fcDDL {
result = driver.ResultNoRows
} else {
result = driver.RowsAffected(s.rowsAffected.total())
}
if wlr.numArg > 0 {
if err := s.writeLobStream(parameterFieldSet, nil, args, wlr); err != nil {
return nil, err
}
}
return result, nil
}
// DropStatementID releases the hdb statement handle.
func (s *Session) DropStatementID(id uint64) error {
s.statementID.id = &id
if err := s.writeRequest(mtDropStatementID, false, s.statementID); err != nil {
return err
}
if err := s.readReply(nil, nil); err != nil {
return err
}
return nil
}
// Call executes a stored procedure.
func (s *Session) Call(id uint64, prmFieldSet *FieldSet, args []driver.Value) (*FieldValues, []*TableResult, error) {
s.statementID.id = &id
if err := s.writeRequest(mtExecute, false, s.statementID, newParameters(prmFieldSet, args)); err != nil {
return nil, nil, err
}
wlr := newWriteLobReply() //lob streaming
f := func(pk partKind) replyPart {
switch pk {
case pkWriteLobReply:
return wlr
default:
return nil
}
}
prmFieldValues := newFieldValues(s)
var tableResults []*TableResult
var tableResult *TableResult
g := func(p replyPart) {
switch p := p.(type) {
case *outputParameters:
p.fieldSet = prmFieldSet
p.fieldValues = prmFieldValues
// table output parameters: meta, id, result (only first param?)
case *resultMetadata:
tableResult = newTableResult(s, p.numArg)
tableResults = append(tableResults, tableResult)
p.fieldSet = tableResult.fieldSet
case *resultsetID:
p.id = &(tableResult.id)
case *resultset:
tableResult.attrs = s.ph.partAttributes
p.fieldSet = tableResult.fieldSet
p.fieldValues = tableResult.fieldValues
}
}
if err := s.readReply(f, g); err != nil {
return nil, nil, err
}
if wlr.numArg > 0 {
if err := s.writeLobStream(prmFieldSet, prmFieldValues, args, wlr); err != nil {
return nil, nil, err
}
}
return prmFieldValues, tableResults, nil
}
// Query executes a query.
func (s *Session) Query(stmtID uint64, parameterFieldSet *FieldSet, resultFieldSet *FieldSet, args []driver.Value) (uint64, *FieldValues, PartAttributes, error) {
s.statementID.id = &stmtID
if err := s.writeRequest(mtExecute, false, s.statementID, newParameters(parameterFieldSet, args)); err != nil {
return 0, nil, nil, err
}
var rsetID uint64
fieldValues := newFieldValues(s)
f := func(p replyPart) {
switch p := p.(type) {
case *resultsetID:
p.id = &rsetID
case *resultset:
p.fieldSet = resultFieldSet
p.fieldValues = fieldValues
}
}
if err := s.readReply(nil, f); err != nil {
return 0, nil, nil, err
}
attrs := s.ph.partAttributes
return rsetID, fieldValues, attrs, nil
}
// FetchNext fetches next chunk in query result set.
func (s *Session) FetchNext(id uint64, resultFieldSet *FieldSet) (*FieldValues, PartAttributes, error) {
s.resultsetID.id = &id
if err := s.writeRequest(mtFetchNext, false, s.resultsetID, fetchsize(s.prm.FetchSize)); err != nil {
return nil, nil, err
}
fieldValues := newFieldValues(s)
f := func(p replyPart) {
switch p := p.(type) {
case *resultset:
p.fieldSet = resultFieldSet
p.fieldValues = fieldValues
}
}
if err := s.readReply(nil, f); err != nil {
return nil, nil, err
}
attrs := s.ph.partAttributes
return fieldValues, attrs, nil
}
// CloseResultsetID releases the hdb resultset handle.
func (s *Session) CloseResultsetID(id uint64) error {
s.resultsetID.id = &id
if err := s.writeRequest(mtCloseResultset, false, s.resultsetID); err != nil {
return err
}
if err := s.readReply(nil, nil); err != nil {
return err
}
return nil
}
// Commit executes a database commit.
func (s *Session) Commit() error {
if err := s.writeRequest(mtCommit, false); err != nil {
return err
}
if err := s.readReply(nil, nil); err != nil {
return err
}
if trace {
outLogger.Printf("transaction flags: %s", s.txFlags)
}
s.conn.inTx = false
return nil
}
// Rollback executes a database rollback.
func (s *Session) Rollback() error {
if err := s.writeRequest(mtRollback, false); err != nil {
return err
}
if err := s.readReply(nil, nil); err != nil {
return err
}
if trace {
outLogger.Printf("transaction flags: %s", s.txFlags)
}
s.conn.inTx = false
return nil
}
// helper
func readLobStreamDone(writers []lobWriter) bool {
for _, writer := range writers {
if !writer.eof() {
return false
}
}
return true
}
//
func (s *Session) readLobStream(writers []lobWriter) error {
f := func(pk partKind) replyPart {
switch pk {
case pkReadLobReply:
return s.readLobReply
default:
return nil
}
}
for !readLobStreamDone(writers) {
s.readLobRequest.writers = writers
s.readLobReply.writers = writers
if err := s.writeRequest(mtWriteLob, false, s.readLobRequest); err != nil {
return err
}
if err := s.readReply(f, nil); err != nil {
return err
}
}
return nil
}
func (s *Session) writeLobStream(prmFieldSet *FieldSet, prmFieldValues *FieldValues, args []driver.Value, reply *writeLobReply) error {
num := reply.numArg
readers := make([]lobReader, num)
request := newWriteLobRequest(readers)
j := 0
for i, field := range prmFieldSet.fields {
if field.typeCode().isLob() && field.in() {
ptr, ok := args[i].(int64)
if !ok {
return fmt.Errorf("protocol error: invalid lob driver value type %T", args[i])
}
descr := pointerToLobWriteDescr(ptr)
if descr.r == nil {
return fmt.Errorf("protocol error: lob reader %d initial", ptr)
}
if j >= num {
return fmt.Errorf("protocol error: invalid number of lob parameter ids %d", num)
}
if field.typeCode().isCharBased() {
readers[j] = newCharLobReader(descr.r, reply.ids[j])
} else {
readers[j] = newBinaryLobReader(descr.r, reply.ids[j])
}
j++
}
}
f := func(pk partKind) replyPart {
switch pk {
case pkWriteLobReply:
return reply
default:
return nil
}
}
g := func(p replyPart) {
if p, ok := p.(*outputParameters); ok {
p.fieldSet = prmFieldSet
p.fieldValues = prmFieldValues
}
}
for reply.numArg != 0 {
if err := s.writeRequest(mtReadLob, false, request); err != nil {
return err
}
if err := s.readReply(f, g); err != nil {
return err
}
}
return nil
}
//
func (s *Session) initRequest() error {
// init
s.mh.sessionID = -1
// handshake
req := newInitRequest()
// TODO: constants
req.product.major = 4
req.product.minor = 20
req.protocol.major = 4
req.protocol.minor = 1
req.numOptions = 1
req.endianess = archEndian
if err := req.write(s.wr); err != nil {
return err
}
rep := newInitReply()
if err := rep.read(s.rd); err != nil {
return err
}
return nil
}
func (s *Session) writeRequest(messageType messageType, commit bool, requests ...requestPart) error {
partSize := make([]int, len(requests))
size := int64(segmentHeaderSize + len(requests)*partHeaderSize) //int64 to hold MaxUInt32 in 32bit OS
for i, part := range requests {
s, err := part.size()
if err != nil {
return err
}
size += int64(s + padBytes(s))
partSize[i] = s // buffer size (expensive calculation)
}
if size > math.MaxUint32 {
return fmt.Errorf("message size %d exceeds maximum message header value %d", size, int64(math.MaxUint32)) //int64: without cast overflow error in 32bit OS
}
bufferSize := size
s.mh.varPartLength = uint32(size)
s.mh.varPartSize = uint32(bufferSize)
s.mh.noOfSegm = 1
if err := s.mh.write(s.wr); err != nil {
return err
}
if size > math.MaxInt32 {
return fmt.Errorf("message size %d exceeds maximum part header value %d", size, math.MaxInt32)
}
s.sh.messageType = messageType
s.sh.commit = commit
s.sh.segmentKind = skRequest
s.sh.segmentLength = int32(size)
s.sh.segmentOfs = 0
s.sh.noOfParts = int16(len(requests))
s.sh.segmentNo = 1
if err := s.sh.write(s.wr); err != nil {
return err
}
bufferSize -= segmentHeaderSize
for i, part := range requests {
size := partSize[i]
pad := padBytes(size)
s.ph.partKind = part.kind()
numArg := part.numArg()
switch {
default:
return fmt.Errorf("maximum number of arguments %d exceeded", numArg)
case numArg <= math.MaxInt16:
s.ph.argumentCount = int16(numArg)
s.ph.bigArgumentCount = 0
// TODO: seems not to work: see bulk insert test
case numArg <= math.MaxInt32:
s.ph.argumentCount = 0
s.ph.bigArgumentCount = int32(numArg)
}
s.ph.bufferLength = int32(size)
s.ph.bufferSize = int32(bufferSize)
if err := s.ph.write(s.wr); err != nil {
return err
}
if err := part.write(s.wr); err != nil {
return err
}
if err := s.wr.WriteZeroes(pad); err != nil {
return err
}
bufferSize -= int64(partHeaderSize + size + pad)
}
if err := s.wr.Flush(); err != nil {
return err
}
return nil
}
func (s *Session) readReply(providePart providePart, beforeRead beforeRead) error {
replyError := false
if err := s.mh.read(s.rd); err != nil {
return err
}
if s.mh.noOfSegm != 1 {
return fmt.Errorf("simple message: no of segments %d - expected 1", s.mh.noOfSegm)
}
if err := s.sh.read(s.rd); err != nil {
return err
}
// TODO: protocol error (sps 82)?: message header varPartLength < segment header segmentLength (*1)
diff := int(s.mh.varPartLength) - int(s.sh.segmentLength)
if trace && diff != 0 {
outLogger.Printf("+++++diff %d", diff)
}
noOfParts := int(s.sh.noOfParts)
for i := 0; i < noOfParts; i++ {
if err := s.ph.read(s.rd); err != nil {
return err
}
numArg := int(s.ph.argumentCount)
var part replyPart
if providePart != nil {
part = providePart(s.ph.partKind)
} else {
part = nil
}
if part == nil { // use pre defined parts
switch s.ph.partKind {
case pkStatementID:
part = s.statementID
case pkResultMetadata:
part = s.resultMetadata
case pkResultsetID:
part = s.resultsetID
case pkResultset:
part = s.resultset
case pkParameterMetadata:
part = s.parameterMetadata
case pkOutputParameters:
part = s.outputParameters
case pkError:
replyError = true
part = s.lastError
case pkStatementContext:
part = s.stmtCtx
case pkTransactionFlags:
part = s.txFlags
case pkRowsAffected:
part = s.rowsAffected
default:
return fmt.Errorf("read not expected part kind %s", s.ph.partKind)
}
}
part.setNumArg(numArg)
if beforeRead != nil {
beforeRead(part)
}
if err := part.read(s.rd); err != nil {
return err
}
// TODO: workaround (see *)
if i != (noOfParts-1) || (i == (noOfParts-1) && diff == 0) {
if err := s.rd.Skip(padBytes(int(s.ph.bufferLength))); err != nil {
return err
}
}
}
if replyError {
if s.lastError.IsWarning() {
sqltrace.Traceln(s.lastError)
} else {
return s.lastError
}
}
return nil
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import "fmt"
type SessionPrm struct {
Host, Username, Password string
Locale string
BufferSize, FetchSize, Timeout int
}
func (p *SessionPrm) String() string {
return fmt.Sprintf("session parameters: bufferSize %d fetchSize %d timeout %d locale %s", p.BufferSize, p.FetchSize, p.Timeout, p.Locale)
}

View File

@ -0,0 +1,207 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"log"
"net"
"github.com/SAP/go-hdb/internal/bufio"
)
type dir bool
const (
maxBinarySize = 128
)
type fragment interface {
read(rd *bufio.Reader) error
write(wr *bufio.Writer) error
}
func (d dir) String() string {
if d {
return "->"
}
return "<-"
}
// A Sniffer is a simple proxy for logging hdb protocol requests and responses.
type Sniffer struct {
conn net.Conn
dbAddr string
dbConn net.Conn
//client
clRd *bufio.Reader
clWr *bufio.Writer
//database
dbRd *bufio.Reader
dbWr *bufio.Writer
mh *messageHeader
sh *segmentHeader
ph *partHeader
buf []byte
}
// NewSniffer creates a new sniffer instance. The conn parameter is the net.Conn connection, where the Sniffer
// is listening for hdb protocol calls. The dbAddr is the hdb host port address in "host:port" format.
func NewSniffer(conn net.Conn, dbAddr string) (*Sniffer, error) {
s := &Sniffer{
conn: conn,
dbAddr: dbAddr,
clRd: bufio.NewReader(conn),
clWr: bufio.NewWriter(conn),
mh: &messageHeader{},
sh: &segmentHeader{},
ph: &partHeader{},
buf: make([]byte, 0),
}
dbConn, err := net.Dial("tcp", s.dbAddr)
if err != nil {
return nil, err
}
s.dbRd = bufio.NewReader(dbConn)
s.dbWr = bufio.NewWriter(dbConn)
s.dbConn = dbConn
return s, nil
}
func (s *Sniffer) getBuffer(size int) []byte {
if cap(s.buf) < size {
s.buf = make([]byte, size)
}
return s.buf[:size]
}
// Go starts the protocol request and response logging.
func (s *Sniffer) Go() {
defer s.dbConn.Close()
defer s.conn.Close()
req := newInitRequest()
if err := s.streamFragment(dir(true), s.clRd, s.dbWr, req); err != nil {
return
}
rep := newInitReply()
if err := s.streamFragment(dir(false), s.dbRd, s.clWr, rep); err != nil {
return
}
for {
//up stream
if err := s.stream(dir(true), s.clRd, s.dbWr); err != nil {
return
}
//down stream
if err := s.stream(dir(false), s.dbRd, s.clWr); err != nil {
return
}
}
}
func (s *Sniffer) stream(d dir, from *bufio.Reader, to *bufio.Writer) error {
if err := s.streamFragment(d, from, to, s.mh); err != nil {
return err
}
size := int(s.mh.varPartLength)
for i := 0; i < int(s.mh.noOfSegm); i++ {
if err := s.streamFragment(d, from, to, s.sh); err != nil {
return err
}
size -= int(s.sh.segmentLength)
for j := 0; j < int(s.sh.noOfParts); j++ {
if err := s.streamFragment(d, from, to, s.ph); err != nil {
return err
}
// protocol error workaraound
padding := (size == 0) || (j != (int(s.sh.noOfParts) - 1))
if err := s.streamPart(d, from, to, s.ph, padding); err != nil {
return err
}
}
}
to.Flush()
return nil
}
func (s *Sniffer) streamPart(d dir, from *bufio.Reader, to *bufio.Writer, ph *partHeader, padding bool) error {
switch ph.partKind {
default:
return s.streamBinary(d, from, to, int(ph.bufferLength), padding)
}
}
func (s *Sniffer) streamBinary(d dir, from *bufio.Reader, to *bufio.Writer, size int, padding bool) error {
var b []byte
//protocol error workaraound
if padding {
pad := padBytes(size)
b = s.getBuffer(size + pad)
} else {
b = s.getBuffer(size)
}
if err := from.ReadFull(b); err != nil {
log.Print(err)
return err
}
if size > maxBinarySize {
log.Printf("%s %v", d, b[:maxBinarySize])
} else {
log.Printf("%s %v", d, b[:size])
}
if _, err := to.Write(b); err != nil {
log.Print(err)
return err
}
return nil
}
func (s *Sniffer) streamFragment(d dir, from *bufio.Reader, to *bufio.Writer, f fragment) error {
if err := f.read(from); err != nil {
log.Print(err)
return err
}
log.Printf("%s %s", d, f)
if err := f.write(to); err != nil {
log.Print(err)
return err
}
return nil
}

View File

@ -0,0 +1,62 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
type statementContext struct {
options plainOptions
_numArg int
}
func newStatementContext() *statementContext {
return &statementContext{
options: plainOptions{},
}
}
func (c *statementContext) String() string {
typedSc := make(map[statementContextType]interface{})
for k, v := range c.options {
typedSc[statementContextType(k)] = v
}
return fmt.Sprintf("%s", typedSc)
}
func (c *statementContext) kind() partKind {
return pkStatementContext
}
func (c *statementContext) setNumArg(numArg int) {
c._numArg = numArg
}
func (c *statementContext) read(rd *bufio.Reader) error {
if err := c.options.read(rd, c._numArg); err != nil {
return err
}
if trace {
outLogger.Printf("statement context: %v", c)
}
return nil
}

View File

@ -0,0 +1,26 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=statementContextType
type statementContextType int8
const (
scStatementSequenceInfo statementContextType = 1
scServerExecutionTime statementContextType = 2
)

View File

@ -0,0 +1,17 @@
// generated by stringer -type=statementContextType; DO NOT EDIT
package protocol
import "fmt"
const _statementContextType_name = "scStatementSequenceInfoscServerExecutionTime"
var _statementContextType_index = [...]uint8{0, 23, 44}
func (i statementContextType) String() string {
i -= 1
if i < 0 || i >= statementContextType(len(_statementContextType_index)-1) {
return fmt.Sprintf("statementContextType(%d)", i+1)
}
return _statementContextType_name[_statementContextType_index[i]:_statementContextType_index[i+1]]
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"github.com/SAP/go-hdb/internal/bufio"
)
const (
statementIDSize = 8
)
type statementID struct {
id *uint64
}
func (id statementID) kind() partKind {
return pkStatementID
}
func (id statementID) size() (int, error) {
return statementIDSize, nil
}
func (id statementID) numArg() int {
return 1
}
func (id statementID) setNumArg(int) {
//ignore - always 1
}
func (id *statementID) read(rd *bufio.Reader) error {
_id, err := rd.ReadUint64()
if err != nil {
return err
}
*id.id = _id
if trace {
outLogger.Printf("statement id: %d", *id.id)
}
return nil
}
func (id statementID) write(wr *bufio.Writer) error {
if err := wr.WriteUint64(*id.id); err != nil {
return err
}
if trace {
outLogger.Printf("statement id: %d", *id.id)
}
return nil
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
// TableResult is the package internal representation of a table like output parameter of a stored procedure.
type TableResult struct {
id uint64
fieldSet *FieldSet
fieldValues *FieldValues
attrs partAttributes
}
func newTableResult(s *Session, size int) *TableResult {
return &TableResult{
fieldSet: newFieldSet(size),
fieldValues: newFieldValues(s),
}
}
// ID returns the resultset id.
func (r *TableResult) ID() uint64 {
return r.id
}
// FieldSet returns the field metadata of the table.
func (r *TableResult) FieldSet() *FieldSet {
return r.fieldSet
}
// FieldValues returns the field values (fetched resultset part) of the table.
func (r *TableResult) FieldValues() *FieldValues {
return r.fieldValues
}
// Attrs returns the PartAttributes interface of the fetched resultset part.
func (r *TableResult) Attrs() PartAttributes {
return r.attrs
}

View File

@ -0,0 +1,92 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
type topologyOptions struct {
mlo multiLineOptions
_numArg int
}
func newTopologyOptions() *topologyOptions {
return &topologyOptions{
mlo: multiLineOptions{},
}
}
func (o *topologyOptions) String() string {
mlo := make([]map[topologyOption]interface{}, len(o.mlo))
for i, po := range o.mlo {
typedPo := make(map[topologyOption]interface{})
for k, v := range po {
typedPo[topologyOption(k)] = v
}
mlo[i] = typedPo
}
return fmt.Sprintf("%s", mlo)
}
func (o *topologyOptions) kind() partKind {
return pkTopologyInformation
}
func (o *topologyOptions) size() int {
return o.mlo.size()
}
func (o *topologyOptions) numArg() int {
return len(o.mlo)
}
func (o *topologyOptions) setNumArg(numArg int) {
o._numArg = numArg
}
func (o *topologyOptions) read(rd *bufio.Reader) error {
if err := o.mlo.read(rd, o._numArg); err != nil {
return err
}
if trace {
outLogger.Printf("topology options: %v", o)
}
return nil
}
func (o *topologyOptions) write(wr *bufio.Writer) error {
for _, m := range o.mlo {
if err := wr.WriteInt16(int16(len(m))); err != nil {
return err
}
if err := o.mlo.write(wr); err != nil {
return err
}
}
if trace {
outLogger.Printf("topology options: %v", o)
}
return nil
}

View File

@ -0,0 +1,36 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=topologyOption
type topologyOption int8
const (
toHostName topologyOption = 1
toHostPortnumber topologyOption = 2
toTenantName topologyOption = 3
toLoadfactor topologyOption = 4
toVolumeID topologyOption = 5
toIsMaster topologyOption = 6
toIsCurrentSession topologyOption = 7
toServiceType topologyOption = 8
toNetworkDomain topologyOption = 9
toIsStandby topologyOption = 10
toAllIPAddresses topologyOption = 11
toAllHostNames topologyOption = 12
)

View File

@ -0,0 +1,17 @@
// generated by stringer -type=topologyOption; DO NOT EDIT
package protocol
import "fmt"
const _topologyOption_name = "toHostNametoHostPortnumbertoTenantNametoLoadfactortoVolumeIDtoIsMastertoIsCurrentSessiontoServiceTypetoNetworkDomaintoIsStandbytoAllIPAddressestoAllHostNames"
var _topologyOption_index = [...]uint8{0, 10, 26, 38, 50, 60, 70, 88, 101, 116, 127, 143, 157}
func (i topologyOption) String() string {
i -= 1
if i < 0 || i >= topologyOption(len(_topologyOption_index)-1) {
return fmt.Sprintf("topologyOption(%d)", i+1)
}
return _topologyOption_name[_topologyOption_index[i]:_topologyOption_index[i+1]]
}

View File

@ -0,0 +1,62 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import (
"fmt"
"github.com/SAP/go-hdb/internal/bufio"
)
type transactionFlags struct {
options plainOptions
_numArg int
}
func newTransactionFlags() *transactionFlags {
return &transactionFlags{
options: plainOptions{},
}
}
func (f *transactionFlags) String() string {
typedSc := make(map[transactionFlagType]interface{})
for k, v := range f.options {
typedSc[transactionFlagType(k)] = v
}
return fmt.Sprintf("%s", typedSc)
}
func (f *transactionFlags) kind() partKind {
return pkTransactionFlags
}
func (f *transactionFlags) setNumArg(numArg int) {
f._numArg = numArg
}
func (f *transactionFlags) read(rd *bufio.Reader) error {
if err := f.options.read(rd, f._numArg); err != nil {
return err
}
if trace {
outLogger.Printf("transaction flags: %v", f)
}
return nil
}

View File

@ -0,0 +1,32 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=transactionFlagType
//transaction flags
type transactionFlagType int8
const (
tfRolledback transactionFlagType = 0
tfCommited transactionFlagType = 1
tfNewIsolationLevel transactionFlagType = 2
tfDDLCommitmodeChanged transactionFlagType = 3
tfWriteTransactionStarted transactionFlagType = 4
tfNowriteTransactionStarted transactionFlagType = 5
tfSessionClosingTransactionError transactionFlagType = 6
)

View File

@ -0,0 +1,16 @@
// generated by stringer -type=transactionFlagType; DO NOT EDIT
package protocol
import "fmt"
const _transactionFlagType_name = "tfRolledbacktfCommitedtfNewIsolationLeveltfDDLCommitmodeChangedtfWriteTransactionStartedtfNowriteTransactionStartedtfSessionClosingTransactionError"
var _transactionFlagType_index = [...]uint8{0, 12, 22, 41, 63, 88, 115, 147}
func (i transactionFlagType) String() string {
if i < 0 || i >= transactionFlagType(len(_transactionFlagType_index)-1) {
return fmt.Sprintf("transactionFlagType(%d)", i)
}
return _transactionFlagType_name[_transactionFlagType_index[i]:_transactionFlagType_index[i+1]]
}

View File

@ -0,0 +1,138 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
//go:generate stringer -type=typeCode
// null value indicator is high bit
type typeCode byte
const (
tcNull typeCode = 0
tcTinyint typeCode = 1
tcSmallint typeCode = 2
tcInt typeCode = 3
tcBigint typeCode = 4
tcDecimal typeCode = 5
tcReal typeCode = 6
tcDouble typeCode = 7
tcChar typeCode = 8
tcVarchar typeCode = 9
tcNchar typeCode = 10
tcNvarchar typeCode = 11
tcBinary typeCode = 12
tcVarbinary typeCode = 13
// depricated with 3 (doku) - but table 'date' field uses it
tcDate typeCode = 14
// depricated with 3 (doku) - but table 'time' field uses it
tcTime typeCode = 15
// depricated with 3 (doku) - but table 'timestamp' field uses it
tcTimestamp typeCode = 16
//tcTimetz typeCode = 17 // reserved: do not use
//tcTimeltz typeCode = 18 // reserved: do not use
//tcTimestamptz typeCode = 19 // reserved: do not use
//tcTimestampltz typeCode = 20 // reserved: do not use
//tcInvervalym typeCode = 21 // reserved: do not use
//tcInvervalds typeCode = 22 // reserved: do not use
//tcRowid typeCode = 23 // reserved: do not use
//tcUrowid typeCode = 24 // reserved: do not use
tcClob typeCode = 25
tcNclob typeCode = 26
tcBlob typeCode = 27
tcBoolean typeCode = 28
tcString typeCode = 29
tcNstring typeCode = 30
tcBlocator typeCode = 31
tcNlocator typeCode = 32
tcBstring typeCode = 33
//tcDecimaldigitarray typeCode = 34 // reserved: do not use
tcVarchar2 typeCode = 35
tcVarchar3 typeCode = 36
tcNvarchar3 typeCode = 37
tcVarbinary3 typeCode = 38
//tcVargroup typeCode = 39 // reserved: do not use
//tcTinyintnotnull typeCode = 40 // reserved: do not use
//tcSmallintnotnull typeCode = 41 // reserved: do not use
//tcIntnotnull typeCode = 42 // reserved: do not use
//tcBigintnotnull typeCode = 43 // reserved: do not use
//tcArgument typeCode = 44 // reserved: do not use
//tcTable typeCode = 45 // reserved: do not use
//tcCursor typeCode = 46 // reserved: do not use
tcSmalldecimal typeCode = 47
//tcAbapitab typeCode = 48 // not supported by GO hdb driver
//tcAbapstruct typeCode = 49 // not supported by GO hdb driver
//tcArray typeCode = 50 // reserved: do not use
tcText typeCode = 51
tcShorttext typeCode = 52
tcBintext typeCode = 53
//tcFixedpointdecimal typeCode = 54 // reserved: do not use
tcAlphanum typeCode = 55
//tcTlocator typeCode = 56 // reserved: do not use
tcLongdate typeCode = 61
tcSeconddate typeCode = 62
tcDaydate typeCode = 63
tcSecondtime typeCode = 64
//tcCsdate typeCode = 65 // reserved: do not use
//tcCstime typeCode = 66 // reserved: do not use
//tcBlobdisk typeCode = 71 // reserved: do not use
//tcClobdisk typeCode = 72 // reserved: do not use
//tcNclobdisk typeCode = 73 // reserved: do not use
tcGeometry typeCode = 74
tcPoint typeCode = 75
//tcFixed16 typeCode = 76 // reserved: do not use
//tcBlobhybrid typeCode = 77 // reserved: do not use
//tcClobhybrid typeCode = 78 // reserved: do not use
//tcNclobhybrid typeCode = 79 // reserved: do not use
tcPointz typeCode = 80
)
func (k typeCode) isLob() bool {
return k == tcClob || k == tcNclob || k == tcBlob
}
func (k typeCode) isCharBased() bool {
return k == tcNvarchar || k == tcNstring || k == tcNclob
}
func (k typeCode) dataType() DataType {
switch k {
default:
return DtUnknown
case tcTinyint:
return DtTinyint
case tcSmallint:
return DtSmallint
case tcInt:
return DtInt
case tcBigint:
return DtBigint
case tcReal:
return DtReal
case tcDouble:
return DtDouble
case tcDate, tcTime, tcTimestamp:
return DtTime
case tcDecimal:
return DtDecimal
case tcChar, tcVarchar, tcString, tcNchar, tcNvarchar, tcNstring:
return DtString
case tcBinary, tcVarbinary:
return DtBytes
case tcBlob, tcClob, tcNclob:
return DtLob
}
}

View File

@ -0,0 +1,59 @@
// generated by stringer -type=typeCode; DO NOT EDIT
package protocol
import "fmt"
const (
_typeCode_name_0 = "tcNulltcTinyinttcSmallinttcInttcBiginttcDecimaltcRealtcDoubletcChartcVarchartcNchartcNvarchartcBinarytcVarbinarytcDatetcTimetcTimestamp"
_typeCode_name_1 = "tcClobtcNclobtcBlobtcBooleantcStringtcNstringtcBlocatortcNlocatortcBstring"
_typeCode_name_2 = "tcVarchar2tcVarchar3tcNvarchar3tcVarbinary3"
_typeCode_name_3 = "tcSmalldecimal"
_typeCode_name_4 = "tcTexttcShorttexttcBintext"
_typeCode_name_5 = "tcAlphanum"
_typeCode_name_6 = "tcLongdatetcSeconddatetcDaydatetcSecondtime"
_typeCode_name_7 = "tcGeometrytcPoint"
_typeCode_name_8 = "tcPointz"
)
var (
_typeCode_index_0 = [...]uint8{0, 6, 15, 25, 30, 38, 47, 53, 61, 67, 76, 83, 93, 101, 112, 118, 124, 135}
_typeCode_index_1 = [...]uint8{0, 6, 13, 19, 28, 36, 45, 55, 65, 74}
_typeCode_index_2 = [...]uint8{0, 10, 20, 31, 43}
_typeCode_index_3 = [...]uint8{0, 14}
_typeCode_index_4 = [...]uint8{0, 6, 17, 26}
_typeCode_index_5 = [...]uint8{0, 10}
_typeCode_index_6 = [...]uint8{0, 10, 22, 31, 43}
_typeCode_index_7 = [...]uint8{0, 10, 17}
_typeCode_index_8 = [...]uint8{0, 8}
)
func (i typeCode) String() string {
switch {
case 0 <= i && i <= 16:
return _typeCode_name_0[_typeCode_index_0[i]:_typeCode_index_0[i+1]]
case 25 <= i && i <= 33:
i -= 25
return _typeCode_name_1[_typeCode_index_1[i]:_typeCode_index_1[i+1]]
case 35 <= i && i <= 38:
i -= 35
return _typeCode_name_2[_typeCode_index_2[i]:_typeCode_index_2[i+1]]
case i == 47:
return _typeCode_name_3
case 51 <= i && i <= 53:
i -= 51
return _typeCode_name_4[_typeCode_index_4[i]:_typeCode_index_4[i+1]]
case i == 55:
return _typeCode_name_5
case 61 <= i && i <= 64:
i -= 61
return _typeCode_name_6[_typeCode_index_6[i]:_typeCode_index_6[i+1]]
case 74 <= i && i <= 75:
i -= 74
return _typeCode_name_7[_typeCode_index_7[i]:_typeCode_index_7[i+1]]
case i == 80:
return _typeCode_name_8
default:
return fmt.Sprintf("typeCode(%d)", i)
}
}

View File

@ -0,0 +1,47 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protocol
import "unsafe"
func init() {
//check if pointer could be stored in uint64
var ptr uintptr
var ui64 uint64
if unsafe.Sizeof(ptr) > unsafe.Sizeof(ui64) {
panic("pointer size exceeds uint64 size")
}
}
// LobWriteDescrToPointer returns a pointer to a LobWriteDescr compatible to sql/driver/Value (int64).
func LobWriteDescrToPointer(w *LobWriteDescr) int64 {
return int64(uintptr(unsafe.Pointer(w)))
}
func pointerToLobWriteDescr(ptr int64) *LobWriteDescr {
return (*LobWriteDescr)(unsafe.Pointer(uintptr(ptr)))
}
func lobReadDescrToPointer(r *LobReadDescr) int64 {
return int64(uintptr(unsafe.Pointer(r)))
}
// PointerToLobReadDescr returns the address of a LobReadDescr from an sql/driver/Value (int64) compatible pointer.
func PointerToLobReadDescr(ptr int64) *LobReadDescr {
return (*LobReadDescr)(unsafe.Pointer(uintptr(ptr)))
}

View File

@ -0,0 +1,240 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cesu8 implements functions and constants to support text encoded in CESU-8.
// It implements functions comparable to the unicode/utf8 package for UTF-8 de- and encoding.
package cesu8
import (
"unicode/utf16"
"unicode/utf8"
)
const (
// CESUMax is the maximum amount of bytes used by an CESU-8 codepoint encoding.
CESUMax = 6
)
// Size returns the amount of bytes needed to encode an UTF-8 byte slice to CESU-8.
func Size(p []byte) int {
n := 0
for i := 0; i < len(p); {
r, size, _ := decodeRune(p[i:])
i += size
n += RuneLen(r)
}
return n
}
// StringSize is like Size with a string as parameter.
func StringSize(s string) int {
n := 0
for _, r := range s {
n += RuneLen(r)
}
return n
}
// EncodeRune writes into p (which must be large enough) the CESU-8 encoding of the rune. It returns the number of bytes written.
func EncodeRune(p []byte, r rune) int {
if r <= rune3Max {
return encodeRune(p, r)
}
high, low := utf16.EncodeRune(r)
n := encodeRune(p, high)
n += encodeRune(p[n:], low)
return n
}
// FullRune reports whether the bytes in p begin with a full CESU-8 encoding of a rune.
func FullRune(p []byte) bool {
high, n, short := decodeRune(p)
if short {
return false
}
if !utf16.IsSurrogate(high) {
return true
}
_, _, short = decodeRune(p[n:])
return !short
}
// DecodeRune unpacks the first CESU-8 encoding in p and returns the rune and its width in bytes.
func DecodeRune(p []byte) (rune, int) {
high, n1, _ := decodeRune(p)
if !utf16.IsSurrogate(high) {
return high, n1
}
low, n2, _ := decodeRune(p[n1:])
if low == utf8.RuneError {
return low, n1 + n2
}
return utf16.DecodeRune(high, low), n1 + n2
}
// RuneLen returns the number of bytes required to encode the rune.
func RuneLen(r rune) int {
switch {
case r < 0:
return -1
case r <= rune1Max:
return 1
case r <= rune2Max:
return 2
case r <= rune3Max:
return 3
case r <= utf8.MaxRune:
return CESUMax
}
return -1
}
// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Copied from unicode utf8
// - allow utf8 encoding of utf16 surrogate values
// - see (*) for code changes
// Code points in the surrogate range are not valid for UTF-8.
const (
surrogateMin = 0xD800
surrogateMax = 0xDFFF
)
const (
t1 = 0x00 // 0000 0000
tx = 0x80 // 1000 0000
t2 = 0xC0 // 1100 0000
t3 = 0xE0 // 1110 0000
t4 = 0xF0 // 1111 0000
t5 = 0xF8 // 1111 1000
maskx = 0x3F // 0011 1111
mask2 = 0x1F // 0001 1111
mask3 = 0x0F // 0000 1111
mask4 = 0x07 // 0000 0111
rune1Max = 1<<7 - 1
rune2Max = 1<<11 - 1
rune3Max = 1<<16 - 1
)
func encodeRune(p []byte, r rune) int {
// Negative values are erroneous. Making it unsigned addresses the problem.
switch i := uint32(r); {
case i <= rune1Max:
p[0] = byte(r)
return 1
case i <= rune2Max:
p[0] = t2 | byte(r>>6)
p[1] = tx | byte(r)&maskx
return 2
//case i > MaxRune, surrogateMin <= i && i <= surrogateMax: // replaced (*)
case i > utf8.MaxRune: // (*)
r = utf8.RuneError
fallthrough
case i <= rune3Max:
p[0] = t3 | byte(r>>12)
p[1] = tx | byte(r>>6)&maskx
p[2] = tx | byte(r)&maskx
return 3
default:
p[0] = t4 | byte(r>>18)
p[1] = tx | byte(r>>12)&maskx
p[2] = tx | byte(r>>6)&maskx
p[3] = tx | byte(r)&maskx
return 4
}
}
func decodeRune(p []byte) (r rune, size int, short bool) {
n := len(p)
if n < 1 {
return utf8.RuneError, 0, true
}
c0 := p[0]
// 1-byte, 7-bit sequence?
if c0 < tx {
return rune(c0), 1, false
}
// unexpected continuation byte?
if c0 < t2 {
return utf8.RuneError, 1, false
}
// need first continuation byte
if n < 2 {
return utf8.RuneError, 1, true
}
c1 := p[1]
if c1 < tx || t2 <= c1 {
return utf8.RuneError, 1, false
}
// 2-byte, 11-bit sequence?
if c0 < t3 {
r = rune(c0&mask2)<<6 | rune(c1&maskx)
if r <= rune1Max {
return utf8.RuneError, 1, false
}
return r, 2, false
}
// need second continuation byte
if n < 3 {
return utf8.RuneError, 1, true
}
c2 := p[2]
if c2 < tx || t2 <= c2 {
return utf8.RuneError, 1, false
}
// 3-byte, 16-bit sequence?
if c0 < t4 {
r = rune(c0&mask3)<<12 | rune(c1&maskx)<<6 | rune(c2&maskx)
if r <= rune2Max {
return utf8.RuneError, 1, false
}
// do not throw error on surrogates // (*)
//if surrogateMin <= r && r <= surrogateMax {
// return RuneError, 1, false
//}
return r, 3, false
}
// need third continuation byte
if n < 4 {
return utf8.RuneError, 1, true
}
c3 := p[3]
if c3 < tx || t2 <= c3 {
return utf8.RuneError, 1, false
}
// 4-byte, 21-bit sequence?
if c0 < t5 {
r = rune(c0&mask4)<<18 | rune(c1&maskx)<<12 | rune(c2&maskx)<<6 | rune(c3&maskx)
if r <= rune3Max || utf8.MaxRune < r {
return utf8.RuneError, 1, false
}
return r, 4, false
}
// error
return utf8.RuneError, 1, false
}

View File

@ -0,0 +1,111 @@
/*
Copyright 2014 SAP SE
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package unicode implements UTF-8 to CESU-8 and vice versa transformations.
package unicode
import (
"errors"
"unicode/utf8"
"github.com/SAP/go-hdb/internal/unicode/cesu8"
"golang.org/x/text/transform"
)
var (
// Utf8ToCesu8Transformer implements the golang.org/x/text/transform/Transformer interface for UTF-8 to CESU-8 transformation.
Utf8ToCesu8Transformer = new(utf8ToCesu8Transformer)
// Cesu8ToUtf8Transformer implements the golang.org/x/text/transform/Transformer interface for CESU-8 to UTF-8 transformation.
Cesu8ToUtf8Transformer = new(cesu8ToUtf8Transformer)
// ErrInvalidUtf8 means that a transformer detected invalid UTF-8 data.
ErrInvalidUtf8 = errors.New("Invalid UTF-8")
// ErrInvalidCesu8 means that a transformer detected invalid CESU-8 data.
ErrInvalidCesu8 = errors.New("Invalid CESU-8")
)
type utf8ToCesu8Transformer struct{ transform.NopResetter }
func (t *utf8ToCesu8Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
i, j := 0, 0
for i < len(src) {
if src[i] < utf8.RuneSelf {
if j < len(dst) {
dst[j] = src[i]
i++
j++
} else {
return j, i, transform.ErrShortDst
}
} else {
if !utf8.FullRune(src[i:]) {
return j, i, transform.ErrShortSrc
}
r, n := utf8.DecodeRune(src[i:])
if r == utf8.RuneError {
return j, i, ErrInvalidUtf8
}
m := cesu8.RuneLen(r)
if m == -1 {
panic("internal UTF-8 to CESU-8 transformation error")
}
if j+m <= len(dst) {
cesu8.EncodeRune(dst[j:], r)
i += n
j += m
} else {
return j, i, transform.ErrShortDst
}
}
}
return j, i, nil
}
type cesu8ToUtf8Transformer struct{ transform.NopResetter }
func (t *cesu8ToUtf8Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
i, j := 0, 0
for i < len(src) {
if src[i] < utf8.RuneSelf {
if j < len(dst) {
dst[j] = src[i]
i++
j++
} else {
return j, i, transform.ErrShortDst
}
} else {
if !cesu8.FullRune(src[i:]) {
return j, i, transform.ErrShortSrc
}
r, n := cesu8.DecodeRune(src[i:])
if r == utf8.RuneError {
return j, i, ErrInvalidCesu8
}
m := utf8.RuneLen(r)
if m == -1 {
panic("internal CESU-8 to UTF-8 transformation error")
}
if j+m <= len(dst) {
utf8.EncodeRune(dst[j:], r)
i += n
j += m
} else {
return j, i, transform.ErrShortDst
}
}
}
return j, i, nil
}

3
vendor/golang.org/x/text/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

31
vendor/golang.org/x/text/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,31 @@
# Contributing to Go
Go is an open source project.
It is the work of hundreds of contributors. We appreciate your help!
## Filing issues
When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
1. What version of Go are you using (`go version`)?
2. What operating system and processor architecture are you using?
3. What did you do?
4. What did you expect to see?
5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
## Contributing code
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
**We do not accept GitHub pull requests**
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.

3
vendor/golang.org/x/text/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

23
vendor/golang.org/x/text/README generated vendored Normal file
View File

@ -0,0 +1,23 @@
This repository holds supplementary Go libraries for text processing, many involving Unicode.
To submit changes to this repository, see http://golang.org/doc/contribute.html.
To generate the tables in this repository (except for the encoding tables),
run go generate from this directory. By default tables are generated for the
Unicode version in core and the CLDR version defined in
golang.org/x/text/unicode/cldr.
Running go generate will as a side effect create a DATA subdirectory in this
directory which holds all files that are used as a source for generating the
tables. This directory will also serve as a cache.
Run
go test ./...
from this directory to run all tests. Add the "-tags icu" flag to also run
ICU conformance tests (if available). This requires that you have the correct
ICU version installed on your system.
TODO:
- updating unversioned source files.

1
vendor/golang.org/x/text/codereview.cfg generated vendored Normal file
View File

@ -0,0 +1 @@
issuerepo: golang/go

13
vendor/golang.org/x/text/doc.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// text is a repository of text-related packages related to internationalization
// (i18n) and localization (l10n), such as character encodings, text
// transformations, and locale-specific text handling.
package text
// TODO: more documentation on general concepts, such as Transformers, use
// of normalization, etc.

292
vendor/golang.org/x/text/gen.go generated vendored Normal file
View File

@ -0,0 +1,292 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// gen runs go generate on Unicode- and CLDR-related package in the text
// repositories, taking into account dependencies and versions.
package main
import (
"bytes"
"flag"
"fmt"
"go/build"
"go/format"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"unicode"
"golang.org/x/text/internal/gen"
)
var (
verbose = flag.Bool("v", false, "verbose output")
force = flag.Bool("force", false, "ignore failing dependencies")
doCore = flag.Bool("core", false, "force an update to core")
excludeList = flag.String("exclude", "",
"comma-separated list of packages to exclude")
// The user can specify a selection of packages to build on the command line.
args []string
)
func exclude(pkg string) bool {
if len(args) > 0 {
return !contains(args, pkg)
}
return contains(strings.Split(*excludeList, ","), pkg)
}
// TODO:
// - Better version handling.
// - Generate tables for the core unicode package?
// - Add generation for encodings. This requires some retooling here and there.
// - Running repo-wide "long" tests.
var vprintf = fmt.Printf
func main() {
gen.Init()
args = flag.Args()
if !*verbose {
// Set vprintf to a no-op.
vprintf = func(string, ...interface{}) (int, error) { return 0, nil }
}
// TODO: create temporary cache directory to load files and create and set
// a "cache" option if the user did not specify the UNICODE_DIR environment
// variable. This will prevent duplicate downloads and also will enable long
// tests, which really need to be run after each generated package.
updateCore := *doCore
if gen.UnicodeVersion() != unicode.Version {
fmt.Printf("Requested Unicode version %s; core unicode version is %s.\n",
gen.UnicodeVersion(),
unicode.Version)
// TODO: use collate to compare. Simple comparison will work, though,
// until Unicode reaches version 10. To avoid circular dependencies, we
// could use the NumericWeighter without using package collate using a
// trivial Weighter implementation.
if gen.UnicodeVersion() < unicode.Version && !*force {
os.Exit(2)
}
updateCore = true
}
var unicode = &dependency{}
if updateCore {
fmt.Printf("Updating core to version %s...\n", gen.UnicodeVersion())
unicode = generate("unicode")
// Test some users of the unicode packages, especially the ones that
// keep a mirrored table. These may need to be corrected by hand.
generate("regexp", unicode)
generate("strconv", unicode) // mimics Unicode table
generate("strings", unicode)
generate("testing", unicode) // mimics Unicode table
}
var (
cldr = generate("./unicode/cldr", unicode)
language = generate("./language", cldr)
internal = generate("./internal", unicode, language)
norm = generate("./unicode/norm", unicode)
rangetable = generate("./unicode/rangetable", unicode)
cases = generate("./cases", unicode, norm, language, rangetable)
width = generate("./width", unicode)
bidi = generate("./unicode/bidi", unicode, norm, rangetable)
mib = generate("./encoding/internal/identifier", unicode)
_ = generate("./encoding/htmlindex", unicode, language, mib)
_ = generate("./encoding/ianaindex", unicode, language, mib)
_ = generate("./secure/precis", unicode, norm, rangetable, cases, width, bidi)
_ = generate("./currency", unicode, cldr, language, internal)
_ = generate("./internal/number", unicode, cldr, language, internal)
_ = generate("./feature/plural", unicode, cldr, language, internal)
_ = generate("./internal/export/idna", unicode, bidi, norm)
_ = generate("./language/display", unicode, cldr, language, internal)
_ = generate("./collate", unicode, norm, cldr, language, rangetable)
_ = generate("./search", unicode, norm, cldr, language, rangetable)
)
all.Wait()
// Copy exported packages to the destination golang.org repo.
copyExported("golang.org/x/net/idna")
if updateCore {
copyVendored()
}
if hasErrors {
fmt.Println("FAIL")
os.Exit(1)
}
vprintf("SUCCESS\n")
}
var (
all sync.WaitGroup
hasErrors bool
)
type dependency struct {
sync.WaitGroup
hasErrors bool
}
func generate(pkg string, deps ...*dependency) *dependency {
var wg dependency
if exclude(pkg) {
return &wg
}
wg.Add(1)
all.Add(1)
go func() {
defer wg.Done()
defer all.Done()
// Wait for dependencies to finish.
for _, d := range deps {
d.Wait()
if d.hasErrors && !*force {
fmt.Printf("--- ABORT: %s\n", pkg)
wg.hasErrors = true
return
}
}
vprintf("=== GENERATE %s\n", pkg)
args := []string{"generate"}
if *verbose {
args = append(args, "-v")
}
args = append(args, pkg)
cmd := exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
w := &bytes.Buffer{}
cmd.Stderr = w
cmd.Stdout = w
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(w), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("=== TEST %s\n", pkg)
args[0] = "test"
cmd = exec.Command(filepath.Join(runtime.GOROOT(), "bin", "go"), args...)
wt := &bytes.Buffer{}
cmd.Stderr = wt
cmd.Stdout = wt
if err := cmd.Run(); err != nil {
fmt.Printf("--- FAIL: %s:\n\t%v\n\tError: %v\n", pkg, indent(wt), err)
hasErrors = true
wg.hasErrors = true
return
}
vprintf("--- SUCCESS: %s\n\t%v\n", pkg, indent(w))
fmt.Print(wt.String())
}()
return &wg
}
// copyExported copies a package in x/text/internal/export to the
// destination repository.
func copyExported(p string) {
copyPackage(
filepath.Join("internal", "export", path.Base(p)),
filepath.Join("..", filepath.FromSlash(p[len("golang.org/x"):])),
"golang.org/x/text/internal/export/"+path.Base(p),
p)
}
// copyVendored copies packages used by Go core into the vendored directory.
func copyVendored() {
root := filepath.Join(build.Default.GOROOT, filepath.FromSlash("src/vendor/golang_org/x"))
err := filepath.Walk(root, func(dir string, info os.FileInfo, err error) error {
if err != nil || !info.IsDir() || root == dir {
return err
}
src := dir[len(root)+1:]
const slash = string(filepath.Separator)
if c := strings.Split(src, slash); c[0] == "text" {
// Copy a text repo package from its normal location.
src = strings.Join(c[1:], slash)
} else {
// Copy the vendored package if it exists in the export directory.
src = filepath.Join("internal", "export", filepath.Base(src))
}
copyPackage(src, dir, "golang.org", "golang_org")
return nil
})
if err != nil {
fmt.Printf("Seeding directory %s has failed %v:", root, err)
os.Exit(1)
}
}
// goGenRE is used to remove go:generate lines.
var goGenRE = regexp.MustCompile("//go:generate[^\n]*\n")
// copyPackage copies relevant files from a directory in x/text to the
// destination package directory. The destination package is assumed to have
// the same name. For each copied file go:generate lines are removed and
// and package comments are rewritten to the new path.
func copyPackage(dirSrc, dirDst, search, replace string) {
err := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error {
base := filepath.Base(file)
if err != nil || info.IsDir() ||
!strings.HasSuffix(base, ".go") ||
strings.HasSuffix(base, "_test.go") && !strings.HasPrefix(base, "example") ||
// Don't process subdirectories.
filepath.Dir(file) != dirSrc {
return nil
}
b, err := ioutil.ReadFile(file)
if err != nil || bytes.Contains(b, []byte("\n// +build ignore")) {
return err
}
// Fix paths.
b = bytes.Replace(b, []byte(search), []byte(replace), -1)
// Remove go:generate lines.
b = goGenRE.ReplaceAllLiteral(b, nil)
comment := "// Code generated by running \"go generate\" in golang.org/x/text. DO NOT EDIT.\n\n"
if *doCore {
comment = "// Code generated by running \"go run gen.go -core\" in golang.org/x/text. DO NOT EDIT.\n\n"
}
if !bytes.HasPrefix(b, []byte(comment)) {
b = append([]byte(comment), b...)
}
if b, err = format.Source(b); err != nil {
fmt.Println("Failed to format file:", err)
os.Exit(1)
}
file = filepath.Join(dirDst, base)
vprintf("=== COPY %s\n", file)
return ioutil.WriteFile(file, b, 0666)
})
if err != nil {
fmt.Println("Copying exported files failed:", err)
os.Exit(1)
}
}
func contains(a []string, s string) bool {
for _, e := range a {
if s == e {
return true
}
}
return false
}
func indent(b *bytes.Buffer) string {
return strings.Replace(strings.TrimSpace(b.String()), "\n", "\n\t", -1)
}

351
vendor/golang.org/x/text/internal/gen/code.go generated vendored Normal file
View File

@ -0,0 +1,351 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gen
import (
"bytes"
"encoding/gob"
"fmt"
"hash"
"hash/fnv"
"io"
"log"
"os"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
// This file contains utilities for generating code.
// TODO: other write methods like:
// - slices, maps, types, etc.
// CodeWriter is a utility for writing structured code. It computes the content
// hash and size of written content. It ensures there are newlines between
// written code blocks.
type CodeWriter struct {
buf bytes.Buffer
Size int
Hash hash.Hash32 // content hash
gob *gob.Encoder
// For comments we skip the usual one-line separator if they are followed by
// a code block.
skipSep bool
}
func (w *CodeWriter) Write(p []byte) (n int, err error) {
return w.buf.Write(p)
}
// NewCodeWriter returns a new CodeWriter.
func NewCodeWriter() *CodeWriter {
h := fnv.New32()
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
}
// WriteGoFile appends the buffer with the total size of all created structures
// and writes it as a Go file to the the given file with the given package name.
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
f, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer f.Close()
if _, err = w.WriteGo(f, pkg); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
// WriteGo appends the buffer with the total size of all created structures and
// writes it as a Go file to the the given writer with the given package name.
func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) {
sz := w.Size
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
defer w.buf.Reset()
return WriteGo(out, pkg, w.buf.Bytes())
}
func (w *CodeWriter) printf(f string, x ...interface{}) {
fmt.Fprintf(w, f, x...)
}
func (w *CodeWriter) insertSep() {
if w.skipSep {
w.skipSep = false
return
}
// Use at least two newlines to ensure a blank space between the previous
// block. WriteGoFile will remove extraneous newlines.
w.printf("\n\n")
}
// WriteComment writes a comment block. All line starts are prefixed with "//".
// Initial empty lines are gobbled. The indentation for the first line is
// stripped from consecutive lines.
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
s := fmt.Sprintf(comment, args...)
s = strings.Trim(s, "\n")
// Use at least two newlines to ensure a blank space between the previous
// block. WriteGoFile will remove extraneous newlines.
w.printf("\n\n// ")
w.skipSep = true
// strip first indent level.
sep := "\n"
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
sep += s[:1]
}
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
w.printf("\n")
}
func (w *CodeWriter) writeSizeInfo(size int) {
w.printf("// Size: %d bytes\n", size)
}
// WriteConst writes a constant of the given name and value.
func (w *CodeWriter) WriteConst(name string, x interface{}) {
w.insertSep()
v := reflect.ValueOf(x)
switch v.Type().Kind() {
case reflect.String:
w.printf("const %s %s = ", name, typeName(x))
w.WriteString(v.String())
w.printf("\n")
default:
w.printf("const %s = %#v\n", name, x)
}
}
// WriteVar writes a variable of the given name and value.
func (w *CodeWriter) WriteVar(name string, x interface{}) {
w.insertSep()
v := reflect.ValueOf(x)
oldSize := w.Size
sz := int(v.Type().Size())
w.Size += sz
switch v.Type().Kind() {
case reflect.String:
w.printf("var %s %s = ", name, typeName(x))
w.WriteString(v.String())
case reflect.Struct:
w.gob.Encode(x)
fallthrough
case reflect.Slice, reflect.Array:
w.printf("var %s = ", name)
w.writeValue(v)
w.writeSizeInfo(w.Size - oldSize)
default:
w.printf("var %s %s = ", name, typeName(x))
w.gob.Encode(x)
w.writeValue(v)
w.writeSizeInfo(w.Size - oldSize)
}
w.printf("\n")
}
func (w *CodeWriter) writeValue(v reflect.Value) {
x := v.Interface()
switch v.Kind() {
case reflect.String:
w.WriteString(v.String())
case reflect.Array:
// Don't double count: callers of WriteArray count on the size being
// added, so we need to discount it here.
w.Size -= int(v.Type().Size())
w.writeSlice(x, true)
case reflect.Slice:
w.writeSlice(x, false)
case reflect.Struct:
w.printf("%s{\n", typeName(v.Interface()))
t := v.Type()
for i := 0; i < v.NumField(); i++ {
w.printf("%s: ", t.Field(i).Name)
w.writeValue(v.Field(i))
w.printf(",\n")
}
w.printf("}")
default:
w.printf("%#v", x)
}
}
// WriteString writes a string literal.
func (w *CodeWriter) WriteString(s string) {
s = strings.Replace(s, `\`, `\\`, -1)
io.WriteString(w.Hash, s) // content hash
w.Size += len(s)
const maxInline = 40
if len(s) <= maxInline {
w.printf("%q", s)
return
}
// We will render the string as a multi-line string.
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
// When starting on its own line, go fmt indents line 2+ an extra level.
n, max := maxWidth, maxWidth-4
// As per https://golang.org/issue/18078, the compiler has trouble
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
// for large N. We insert redundant, explicit parentheses to work around
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
// ... + s127) + etc + (etc + ... + sN).
explicitParens, extraComment := len(s) > 128*1024, ""
if explicitParens {
w.printf(`(`)
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
}
// Print "" +\n, if a string does not start on its own line.
b := w.buf.Bytes()
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
n, max = maxWidth, maxWidth
}
w.printf(`"`)
for sz, p, nLines := 0, 0, 0; p < len(s); {
var r rune
r, sz = utf8.DecodeRuneInString(s[p:])
out := s[p : p+sz]
chars := 1
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
switch sz {
case 1:
out = fmt.Sprintf("\\x%02x", s[p])
case 2, 3:
out = fmt.Sprintf("\\u%04x", r)
case 4:
out = fmt.Sprintf("\\U%08x", r)
}
chars = len(out)
}
if n -= chars; n < 0 {
nLines++
if explicitParens && nLines&63 == 63 {
w.printf("\") + (\"")
}
w.printf("\" +\n\"")
n = max - len(out)
}
w.printf("%s", out)
p += sz
}
w.printf(`"`)
if explicitParens {
w.printf(`)`)
}
}
// WriteSlice writes a slice value.
func (w *CodeWriter) WriteSlice(x interface{}) {
w.writeSlice(x, false)
}
// WriteArray writes an array value.
func (w *CodeWriter) WriteArray(x interface{}) {
w.writeSlice(x, true)
}
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
v := reflect.ValueOf(x)
w.gob.Encode(v.Len())
w.Size += v.Len() * int(v.Type().Elem().Size())
name := typeName(x)
if isArray {
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
}
if isArray {
w.printf("%s{\n", name)
} else {
w.printf("%s{ // %d elements\n", name, v.Len())
}
switch kind := v.Type().Elem().Kind(); kind {
case reflect.String:
for _, s := range x.([]string) {
w.WriteString(s)
w.printf(",\n")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// nLine and nBlock are the number of elements per line and block.
nLine, nBlock, format := 8, 64, "%d,"
switch kind {
case reflect.Uint8:
format = "%#02x,"
case reflect.Uint16:
format = "%#04x,"
case reflect.Uint32:
nLine, nBlock, format = 4, 32, "%#08x,"
case reflect.Uint, reflect.Uint64:
nLine, nBlock, format = 4, 32, "%#016x,"
case reflect.Int8:
nLine = 16
}
n := nLine
for i := 0; i < v.Len(); i++ {
if i%nBlock == 0 && v.Len() > nBlock {
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
}
x := v.Index(i).Interface()
w.gob.Encode(x)
w.printf(format, x)
if n--; n == 0 {
n = nLine
w.printf("\n")
}
}
w.printf("\n")
case reflect.Struct:
zero := reflect.Zero(v.Type().Elem()).Interface()
for i := 0; i < v.Len(); i++ {
x := v.Index(i).Interface()
w.gob.EncodeValue(v)
if !reflect.DeepEqual(zero, x) {
line := fmt.Sprintf("%#v,\n", x)
line = line[strings.IndexByte(line, '{'):]
w.printf("%d: ", i)
w.printf(line)
}
}
case reflect.Array:
for i := 0; i < v.Len(); i++ {
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
}
default:
panic("gen: slice elem type not supported")
}
w.printf("}")
}
// WriteType writes a definition of the type of the given value and returns the
// type name.
func (w *CodeWriter) WriteType(x interface{}) string {
t := reflect.TypeOf(x)
w.printf("type %s struct {\n", t.Name())
for i := 0; i < t.NumField(); i++ {
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
}
w.printf("}\n")
return t.Name()
}
// typeName returns the name of the go type of x.
func typeName(x interface{}) string {
t := reflect.ValueOf(x).Type()
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
}

281
vendor/golang.org/x/text/internal/gen/gen.go generated vendored Normal file
View File

@ -0,0 +1,281 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gen contains common code for the various code generation tools in the
// text repository. Its usage ensures consistency between tools.
//
// This package defines command line flags that are common to most generation
// tools. The flags allow for specifying specific Unicode and CLDR versions
// in the public Unicode data repository (http://www.unicode.org/Public).
//
// A local Unicode data mirror can be set through the flag -local or the
// environment variable UNICODE_DIR. The former takes precedence. The local
// directory should follow the same structure as the public repository.
//
// IANA data can also optionally be mirrored by putting it in the iana directory
// rooted at the top of the local mirror. Beware, though, that IANA data is not
// versioned. So it is up to the developer to use the right version.
package gen // import "golang.org/x/text/internal/gen"
import (
"bytes"
"flag"
"fmt"
"go/build"
"go/format"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"path/filepath"
"sync"
"unicode"
"golang.org/x/text/unicode/cldr"
)
var (
url = flag.String("url",
"http://www.unicode.org/Public",
"URL of Unicode database directory")
iana = flag.String("iana",
"http://www.iana.org",
"URL of the IANA repository")
unicodeVersion = flag.String("unicode",
getEnv("UNICODE_VERSION", unicode.Version),
"unicode version to use")
cldrVersion = flag.String("cldr",
getEnv("CLDR_VERSION", cldr.Version),
"cldr version to use")
)
func getEnv(name, def string) string {
if v := os.Getenv(name); v != "" {
return v
}
return def
}
// Init performs common initialization for a gen command. It parses the flags
// and sets up the standard logging parameters.
func Init() {
log.SetPrefix("")
log.SetFlags(log.Lshortfile)
flag.Parse()
}
const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package %s
`
// UnicodeVersion reports the requested Unicode version.
func UnicodeVersion() string {
return *unicodeVersion
}
// UnicodeVersion reports the requested CLDR version.
func CLDRVersion() string {
return *cldrVersion
}
// IsLocal reports whether data files are available locally.
func IsLocal() bool {
dir, err := localReadmeFile()
if err != nil {
return false
}
if _, err = os.Stat(dir); err != nil {
return false
}
return true
}
// OpenUCDFile opens the requested UCD file. The file is specified relative to
// the public Unicode root directory. It will call log.Fatal if there are any
// errors.
func OpenUCDFile(file string) io.ReadCloser {
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
}
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
// are any errors.
func OpenCLDRCoreZip() io.ReadCloser {
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
}
// OpenUnicodeFile opens the requested file of the requested category from the
// root of the Unicode data archive. The file is specified relative to the
// public Unicode root directory. If version is "", it will use the default
// Unicode version. It will call log.Fatal if there are any errors.
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
if version == "" {
version = UnicodeVersion()
}
return openUnicode(path.Join(category, version, file))
}
// OpenIANAFile opens the requested IANA file. The file is specified relative
// to the IANA root, which is typically either http://www.iana.org or the
// iana directory in the local mirror. It will call log.Fatal if there are any
// errors.
func OpenIANAFile(path string) io.ReadCloser {
return Open(*iana, "iana", path)
}
var (
dirMutex sync.Mutex
localDir string
)
const permissions = 0755
func localReadmeFile() (string, error) {
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
if err != nil {
return "", fmt.Errorf("Could not locate package: %v", err)
}
return filepath.Join(p.Dir, "DATA", "README"), nil
}
func getLocalDir() string {
dirMutex.Lock()
defer dirMutex.Unlock()
readme, err := localReadmeFile()
if err != nil {
log.Fatal(err)
}
dir := filepath.Dir(readme)
if _, err := os.Stat(readme); err != nil {
if err := os.MkdirAll(dir, permissions); err != nil {
log.Fatalf("Could not create directory: %v", err)
}
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
}
return dir
}
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
This directory contains downloaded files used to generate the various tables
in the golang.org/x/text subrepo.
Note that the language subtag repo (iana/assignments/language-subtag-registry)
and all other times in the iana subdirectory are not versioned and will need
to be periodically manually updated. The easiest way to do this is to remove
the entire iana directory. This is mostly of concern when updating the language
package.
`
// Open opens subdir/path if a local directory is specified and the file exists,
// where subdir is a directory relative to the local root, or fetches it from
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
func Open(urlRoot, subdir, path string) io.ReadCloser {
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
return open(file, urlRoot, path)
}
func openUnicode(path string) io.ReadCloser {
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
return open(file, *url, path)
}
// TODO: automatically periodically update non-versioned files.
func open(file, urlRoot, path string) io.ReadCloser {
if f, err := os.Open(file); err == nil {
return f
}
r := get(urlRoot, path)
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
log.Fatalf("Could not download file: %v", err)
}
os.MkdirAll(filepath.Dir(file), permissions)
if err := ioutil.WriteFile(file, b, permissions); err != nil {
log.Fatalf("Could not create file: %v", err)
}
return ioutil.NopCloser(bytes.NewReader(b))
}
func get(root, path string) io.ReadCloser {
url := root + "/" + path
fmt.Printf("Fetching %s...", url)
defer fmt.Println(" done.")
resp, err := http.Get(url)
if err != nil {
log.Fatalf("HTTP GET: %v", err)
}
if resp.StatusCode != 200 {
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
}
return resp.Body
}
// TODO: use Write*Version in all applicable packages.
// WriteUnicodeVersion writes a constant for the Unicode version from which the
// tables are generated.
func WriteUnicodeVersion(w io.Writer) {
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
}
// WriteCLDRVersion writes a constant for the CLDR version from which the
// tables are generated.
func WriteCLDRVersion(w io.Writer) {
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
}
// WriteGoFile prepends a standard file comment and package statement to the
// given bytes, applies gofmt, and writes them to a file with the given name.
// It will call log.Fatal if there are any errors.
func WriteGoFile(filename, pkg string, b []byte) {
w, err := os.Create(filename)
if err != nil {
log.Fatalf("Could not create file %s: %v", filename, err)
}
defer w.Close()
if _, err = WriteGo(w, pkg, b); err != nil {
log.Fatalf("Error writing file %s: %v", filename, err)
}
}
// WriteGo prepends a standard file comment and package statement to the given
// bytes, applies gofmt, and writes them to w.
func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) {
src := []byte(fmt.Sprintf(header, pkg))
src = append(src, b...)
formatted, err := format.Source(src)
if err != nil {
// Print the generated code even in case of an error so that the
// returned error can be meaningfully interpreted.
n, _ = w.Write(src)
return n, err
}
return w.Write(formatted)
}
// Repackage rewrites a Go file from belonging to package main to belonging to
// the given package.
func Repackage(inFile, outFile, pkg string) {
src, err := ioutil.ReadFile(inFile)
if err != nil {
log.Fatalf("reading %s: %v", inFile, err)
}
const toDelete = "package main\n\n"
i := bytes.Index(src, []byte(toDelete))
if i < 0 {
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
}
w := &bytes.Buffer{}
w.Write(src[i+len(toDelete):])
WriteGoFile(outFile, pkg, w.Bytes())
}

100
vendor/golang.org/x/text/unicode/cldr/base.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"encoding/xml"
"regexp"
"strconv"
)
// Elem is implemented by every XML element.
type Elem interface {
setEnclosing(Elem)
setName(string)
enclosing() Elem
GetCommon() *Common
}
type hidden struct {
CharData string `xml:",chardata"`
Alias *struct {
Common
Source string `xml:"source,attr"`
Path string `xml:"path,attr"`
} `xml:"alias"`
Def *struct {
Common
Choice string `xml:"choice,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
} `xml:"default"`
}
// Common holds several of the most common attributes and sub elements
// of an XML element.
type Common struct {
XMLName xml.Name
name string
enclElem Elem
Type string `xml:"type,attr,omitempty"`
Reference string `xml:"reference,attr,omitempty"`
Alt string `xml:"alt,attr,omitempty"`
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
Draft string `xml:"draft,attr,omitempty"`
hidden
}
// Default returns the default type to select from the enclosed list
// or "" if no default value is specified.
func (e *Common) Default() string {
if e.Def == nil {
return ""
}
if e.Def.Choice != "" {
return e.Def.Choice
} else if e.Def.Type != "" {
// Type is still used by the default element in collation.
return e.Def.Type
}
return ""
}
// GetCommon returns e. It is provided such that Common implements Elem.
func (e *Common) GetCommon() *Common {
return e
}
// Data returns the character data accumulated for this element.
func (e *Common) Data() string {
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
return e.CharData
}
func (e *Common) setName(s string) {
e.name = s
}
func (e *Common) enclosing() Elem {
return e.enclElem
}
func (e *Common) setEnclosing(en Elem) {
e.enclElem = en
}
// Escape characters that can be escaped without further escaping the string.
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
// It assumes the input string is correctly formatted.
func replaceUnicode(s string) string {
if s[1] == '#' {
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
return string(r)
}
r, _, _, _ := strconv.UnquoteChar(s, 0)
return string(r)
}

130
vendor/golang.org/x/text/unicode/cldr/cldr.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run makexml.go -output xml.go
// Package cldr provides a parser for LDML and related XML formats.
// This package is intended to be used by the table generation tools
// for the various internationalization-related packages.
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
// is periodically amended, this package may change considerably over time.
// This mostly means that data may appear and disappear between versions.
// That is, old code should keep compiling for newer versions, but data
// may have moved or changed.
// CLDR version 22 is the first version supported by this package.
// Older versions may not work.
package cldr // import "golang.org/x/text/unicode/cldr"
import (
"fmt"
"sort"
)
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
type CLDR struct {
parent map[string][]string
locale map[string]*LDML
resolved map[string]*LDML
bcp47 *LDMLBCP47
supp *SupplementalData
}
func makeCLDR() *CLDR {
return &CLDR{
parent: make(map[string][]string),
locale: make(map[string]*LDML),
resolved: make(map[string]*LDML),
bcp47: &LDMLBCP47{},
supp: &SupplementalData{},
}
}
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
func (cldr *CLDR) BCP47() *LDMLBCP47 {
return nil
}
// Draft indicates the draft level of an element.
type Draft int
const (
Approved Draft = iota
Contributed
Provisional
Unconfirmed
)
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
// ParseDraft returns the Draft value corresponding to the given string. The
// empty string corresponds to Approved.
func ParseDraft(level string) (Draft, error) {
if level == "" {
return Approved, nil
}
for i, s := range drafts {
if level == s {
return Unconfirmed - Draft(i), nil
}
}
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
}
func (d Draft) String() string {
return drafts[len(drafts)-1-int(d)]
}
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
// Any draft element for which the draft level is higher than lev will be excluded.
// If multiple draft levels are available for a single element, the one with the
// lowest draft level will be selected, unless preferDraft is true, in which case
// the highest draft will be chosen.
// It is assumed that the underlying LDML is canonicalized.
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
// TODO: implement
cldr.resolved = make(map[string]*LDML)
}
// RawLDML returns the LDML XML for id in unresolved form.
// id must be one of the strings returned by Locales.
func (cldr *CLDR) RawLDML(loc string) *LDML {
return cldr.locale[loc]
}
// LDML returns the fully resolved LDML XML for loc, which must be one of
// the strings returned by Locales.
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
return cldr.resolve(loc)
}
// Supplemental returns the parsed supplemental data. If no such data was parsed,
// nil is returned.
func (cldr *CLDR) Supplemental() *SupplementalData {
return cldr.supp
}
// Locales returns the locales for which there exist files.
// Valid sublocales for which there is no file are not included.
// The root locale is always sorted first.
func (cldr *CLDR) Locales() []string {
loc := []string{"root"}
hasRoot := false
for l, _ := range cldr.locale {
if l == "root" {
hasRoot = true
continue
}
loc = append(loc, l)
}
sort.Strings(loc[1:])
if !hasRoot {
return loc[1:]
}
return loc
}
// Get fills in the fields of x based on the XPath path.
func Get(e Elem, path string) (res Elem, err error) {
return walkXPath(e, path)
}

359
vendor/golang.org/x/text/unicode/cldr/collate.go generated vendored Normal file
View File

@ -0,0 +1,359 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// RuleProcessor can be passed to Collator's Process method, which
// parses the rules and calls the respective method for each rule found.
type RuleProcessor interface {
Reset(anchor string, before int) error
Insert(level int, str, context, extend string) error
Index(id string)
}
const (
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
// of a grouping within an index.
// We ignore any rule that starts with this rune.
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
cldrIndex = "\uFDD0"
// specialAnchor is the format in which to represent logical reset positions,
// such as "first tertiary ignorable".
specialAnchor = "<%s/>"
)
// Process parses the rules for the tailorings of this collation
// and calls the respective methods of p for each rule found.
func (c Collation) Process(p RuleProcessor) (err error) {
if len(c.Cr) > 0 {
if len(c.Cr) > 1 {
return fmt.Errorf("multiple cr elements, want 0 or 1")
}
return processRules(p, c.Cr[0].Data())
}
if c.Rules.Any != nil {
return c.processXML(p)
}
return errors.New("no tailoring data")
}
// processRules parses rules in the Collation Rule Syntax defined in
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
func processRules(p RuleProcessor, s string) (err error) {
chk := func(s string, e error) string {
if err == nil {
err = e
}
return s
}
i := 0 // Save the line number for use after the loop.
scanner := bufio.NewScanner(strings.NewReader(s))
for ; scanner.Scan() && err == nil; i++ {
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
level := 5
var ch byte
switch ch, s = s[0], s[1:]; ch {
case '&': // followed by <anchor> or '[' <key> ']'
if s = skipSpace(s); consume(&s, '[') {
s = chk(parseSpecialAnchor(p, s))
} else {
s = chk(parseAnchor(p, 0, s))
}
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
for level = 1; consume(&s, '<'); level++ {
}
if level > 4 {
err = fmt.Errorf("level %d > 4", level)
}
fallthrough
case '=': // identity relation, optionally followed by *.
if consume(&s, '*') {
s = chk(parseSequence(p, level, s))
} else {
s = chk(parseOrder(p, level, s))
}
default:
chk("", fmt.Errorf("illegal operator %q", ch))
break
}
}
}
if chk("", scanner.Err()); err != nil {
return fmt.Errorf("%d: %v", i, err)
}
return nil
}
// parseSpecialAnchor parses the anchor syntax which is either of the form
// ['before' <level>] <anchor>
// or
// [<label>]
// The starting should already be consumed.
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
i := strings.IndexByte(s, ']')
if i == -1 {
return "", errors.New("unmatched bracket")
}
a := strings.TrimSpace(s[:i])
s = s[i+1:]
if strings.HasPrefix(a, "before ") {
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
if err != nil {
return s, err
}
return parseAnchor(p, int(l), s)
}
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
}
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
anchor, s, err := scanString(s)
if err != nil {
return s, err
}
return s, p.Reset(anchor, level)
}
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
var value, context, extend string
if value, s, err = scanString(s); err != nil {
return s, err
}
if strings.HasPrefix(value, cldrIndex) {
p.Index(value[len(cldrIndex):])
return
}
if consume(&s, '|') {
if context, s, err = scanString(s); err != nil {
return s, errors.New("missing string after context")
}
}
if consume(&s, '/') {
if extend, s, err = scanString(s); err != nil {
return s, errors.New("missing string after extension")
}
}
return s, p.Insert(level, value, context, extend)
}
// scanString scans a single input string.
func scanString(s string) (str, tail string, err error) {
if s = skipSpace(s); s == "" {
return s, s, errors.New("missing string")
}
buf := [16]byte{} // small but enough to hold most cases.
value := buf[:0]
for s != "" {
if consume(&s, '\'') {
i := strings.IndexByte(s, '\'')
if i == -1 {
return "", "", errors.New(`unmatched single quote`)
}
if i == 0 {
value = append(value, '\'')
} else {
value = append(value, s[:i]...)
}
s = s[i+1:]
continue
}
r, sz := utf8.DecodeRuneInString(s)
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
break
}
value = append(value, s[:sz]...)
s = s[sz:]
}
return string(value), skipSpace(s), nil
}
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
if s = skipSpace(s); s == "" {
return s, errors.New("empty sequence")
}
last := rune(0)
for s != "" {
r, sz := utf8.DecodeRuneInString(s)
s = s[sz:]
if r == '-' {
// We have a range. The first element was already written.
if last == 0 {
return s, errors.New("range without starter value")
}
r, sz = utf8.DecodeRuneInString(s)
s = s[sz:]
if r == utf8.RuneError || r < last {
return s, fmt.Errorf("invalid range %q-%q", last, r)
}
for i := last + 1; i <= r; i++ {
if err := p.Insert(level, string(i), "", ""); err != nil {
return s, err
}
}
last = 0
continue
}
if unicode.IsSpace(r) || unicode.IsPunct(r) {
break
}
// normal case
if err := p.Insert(level, string(r), "", ""); err != nil {
return s, err
}
last = r
}
return s, nil
}
func skipSpace(s string) string {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
// consumes returns whether the next byte is ch. If so, it gobbles it by
// updating s.
func consume(s *string, ch byte) (ok bool) {
if *s == "" || (*s)[0] != ch {
return false
}
*s = (*s)[1:]
return true
}
// The following code parses Collation rules of CLDR version 24 and before.
var lmap = map[byte]int{
'p': 1,
's': 2,
't': 3,
'i': 5,
}
type rulesElem struct {
Rules struct {
Common
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
} `xml:"rules"`
}
type rule struct {
Value string `xml:",chardata"`
Before string `xml:"before,attr"`
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
}
var emptyValueError = errors.New("cldr: empty rule value")
func (r *rule) value() (string, error) {
// Convert hexadecimal Unicode codepoint notation to a string.
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
r.Value = s
if s == "" {
if len(r.Any) != 1 {
return "", emptyValueError
}
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
r.Any = nil
} else if len(r.Any) != 0 {
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
}
return r.Value, nil
}
func (r rule) process(p RuleProcessor, name, context, extend string) error {
v, err := r.value()
if err != nil {
return err
}
switch name {
case "p", "s", "t", "i":
if strings.HasPrefix(v, cldrIndex) {
p.Index(v[len(cldrIndex):])
return nil
}
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
return err
}
case "pc", "sc", "tc", "ic":
level := lmap[name[0]]
for _, s := range v {
if err := p.Insert(level, string(s), context, extend); err != nil {
return err
}
}
default:
return fmt.Errorf("cldr: unsupported tag: %q", name)
}
return nil
}
// processXML parses the format of CLDR versions 24 and older.
func (c Collation) processXML(p RuleProcessor) (err error) {
// Collation is generated and defined in xml.go.
var v string
for _, r := range c.Rules.Any {
switch r.XMLName.Local {
case "reset":
level := 0
switch r.Before {
case "primary", "1":
level = 1
case "secondary", "2":
level = 2
case "tertiary", "3":
level = 3
case "":
default:
return fmt.Errorf("cldr: unknown level %q", r.Before)
}
v, err = r.value()
if err == nil {
err = p.Reset(v, level)
}
case "x":
var context, extend string
for _, r1 := range r.Any {
v, err = r1.value()
switch r1.XMLName.Local {
case "context":
context = v
case "extend":
extend = v
}
}
for _, r1 := range r.Any {
if t := r1.XMLName.Local; t == "context" || t == "extend" {
continue
}
r1.rule.process(p, r1.XMLName.Local, context, extend)
}
default:
err = r.rule.process(p, r.XMLName.Local, "", "")
}
if err != nil {
return err
}
}
return nil
}

171
vendor/golang.org/x/text/unicode/cldr/decode.go generated vendored Normal file
View File

@ -0,0 +1,171 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"archive/zip"
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
)
// A Decoder loads an archive of CLDR data.
type Decoder struct {
dirFilter []string
sectionFilter []string
loader Loader
cldr *CLDR
curLocale string
}
// SetSectionFilter takes a list top-level LDML element names to which
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
func (d *Decoder) SetSectionFilter(filter ...string) {
d.sectionFilter = filter
// TODO: automatically set dir filter
}
// SetDirFilter limits the loading of LDML XML files of the specied directories.
// Note that sections may be split across directories differently for different CLDR versions.
// For more robust code, use SetSectionFilter.
func (d *Decoder) SetDirFilter(dir ...string) {
d.dirFilter = dir
}
// A Loader provides access to the files of a CLDR archive.
type Loader interface {
Len() int
Path(i int) string
Reader(i int) (io.ReadCloser, error)
}
var fileRe = regexp.MustCompile(".*/(.*)/(.*)\\.xml")
// Decode loads and decodes the files represented by l.
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
d.cldr = makeCLDR()
for i := 0; i < l.Len(); i++ {
fname := l.Path(i)
if m := fileRe.FindStringSubmatch(fname); m != nil {
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
continue
}
var r io.Reader
if r, err = l.Reader(i); err == nil {
err = d.decode(m[1], m[2], r)
}
if err != nil {
return nil, err
}
}
}
d.cldr.finalize(d.sectionFilter)
return d.cldr, nil
}
func (d *Decoder) decode(dir, id string, r io.Reader) error {
var v interface{}
var l *LDML
cldr := d.cldr
switch {
case dir == "supplemental":
v = cldr.supp
case dir == "transforms":
return nil
case dir == "bcp47":
v = cldr.bcp47
case dir == "validity":
return nil
default:
ok := false
if v, ok = cldr.locale[id]; !ok {
l = &LDML{}
v, cldr.locale[id] = l, l
}
}
x := xml.NewDecoder(r)
if err := x.Decode(v); err != nil {
log.Printf("%s/%s: %v", dir, id, err)
return err
}
if l != nil {
if l.Identity == nil {
return fmt.Errorf("%s/%s: missing identity element", dir, id)
}
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
// is resolved.
// path := strings.Split(id, "_")
// if lang := l.Identity.Language.Type; lang != path[0] {
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
// }
}
return nil
}
type pathLoader []string
func makePathLoader(path string) (pl pathLoader, err error) {
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
pl = append(pl, path)
return err
})
return pl, err
}
func (pl pathLoader) Len() int {
return len(pl)
}
func (pl pathLoader) Path(i int) string {
return pl[i]
}
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
return os.Open(pl[i])
}
// DecodePath loads CLDR data from the given path.
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
loader, err := makePathLoader(path)
if err != nil {
return nil, err
}
return d.Decode(loader)
}
type zipLoader struct {
r *zip.Reader
}
func (zl zipLoader) Len() int {
return len(zl.r.File)
}
func (zl zipLoader) Path(i int) string {
return zl.r.File[i].Name
}
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
return zl.r.File[i].Open()
}
// DecodeZip loads CLDR data from the zip archive for which r is the source.
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
buffer, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
return nil, err
}
return d.Decode(zipLoader{archive})
}

400
vendor/golang.org/x/text/unicode/cldr/makexml.go generated vendored Normal file
View File

@ -0,0 +1,400 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This tool generates types for the various XML formats of CLDR.
package main
import (
"archive/zip"
"bytes"
"encoding/xml"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strings"
"golang.org/x/text/internal/gen"
)
var outputFile = flag.String("output", "xml.go", "output file name")
func main() {
flag.Parse()
r := gen.OpenCLDRCoreZip()
buffer, err := ioutil.ReadAll(r)
if err != nil {
log.Fatal("Could not read zip file")
}
r.Close()
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
log.Fatalf("Could not read zip archive: %v", err)
}
var buf bytes.Buffer
version := gen.CLDRVersion()
for _, dtd := range files {
for _, f := range z.File {
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
r, err := f.Open()
failOnError(err)
b := makeBuilder(&buf, dtd)
b.parseDTD(r)
b.resolve(b.index[dtd.top[0]])
b.write()
if b.version != "" && version != b.version {
println(f.Name)
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
}
break
}
}
}
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
fmt.Fprintf(&buf, "const Version = %q\n", version)
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
}
func failOnError(err error) {
if err != nil {
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
os.Exit(1)
}
}
// configuration data per DTD type
type dtd struct {
file string // base file name
root string // Go name of the root XML element
top []string // create a different type for this section
skipElem []string // hard-coded or deprecated elements
skipAttr []string // attributes to exclude
predefined []string // hard-coded elements exist of the form <name>Elem
forceRepeat []string // elements to make slices despite DTD
}
var files = []dtd{
{
file: "ldmlBCP47",
root: "LDMLBCP47",
top: []string{"ldmlBCP47"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
},
{
file: "ldmlSupplemental",
root: "SupplementalData",
top: []string{"supplementalData"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
forceRepeat: []string{
"plurals", // data defined in plurals.xml and ordinals.xml
},
},
{
file: "ldml",
root: "LDML",
top: []string{
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
},
skipElem: []string{
"cp", // not used anywhere
"special", // not used anywhere
"fallback", // deprecated, not used
"alias", // in Common
"default", // in Common
},
skipAttr: []string{
"hiraganaQuarternary", // typo in DTD, correct version included as well
},
predefined: []string{"rules"},
},
}
var comments = map[string]string{
"ldmlBCP47": `
// LDMLBCP47 holds information on allowable values for various variables in LDML.
`,
"supplementalData": `
// SupplementalData holds information relevant for internationalization
// and proper use of CLDR, but that is not contained in the locale hierarchy.
`,
"ldml": `
// LDML is the top-level type for locale-specific data.
`,
"collation": `
// Collation contains rules that specify a certain sort-order,
// as a tailoring of the root order.
// The parsed rules are obtained by passing a RuleProcessor to Collation's
// Process method.
`,
"calendar": `
// Calendar specifies the fields used for formatting and parsing dates and times.
// The month and quarter names are identified numerically, starting at 1.
// The day (of the week) names are identified with short strings, since there is
// no universally-accepted numeric designation.
`,
"dates": `
// Dates contains information regarding the format and parsing of dates and times.
`,
"localeDisplayNames": `
// LocaleDisplayNames specifies localized display names for for scripts, languages,
// countries, currencies, and variants.
`,
"numbers": `
// Numbers supplies information for formatting and parsing numbers and currencies.
`,
}
type element struct {
name string // XML element name
category string // elements contained by this element
signature string // category + attrKey*
attr []*attribute // attributes supported by this element.
sub []struct { // parsed and evaluated sub elements of this element.
e *element
repeat bool // true if the element needs to be a slice
}
resolved bool // prevent multiple resolutions of this element.
}
type attribute struct {
name string
key string
list []string
tag string // Go tag
}
var (
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
reToken = regexp.MustCompile(`\w\-`)
)
// builder is used to read in the DTD files from CLDR and generate Go code
// to be used with the encoding/xml package.
type builder struct {
w io.Writer
index map[string]*element
elem []*element
info dtd
version string
}
func makeBuilder(w io.Writer, d dtd) builder {
return builder{
w: w,
index: make(map[string]*element),
elem: []*element{},
info: d,
}
}
// parseDTD parses a DTD file.
func (b *builder) parseDTD(r io.Reader) {
for d := xml.NewDecoder(r); ; {
t, err := d.Token()
if t == nil {
break
}
failOnError(err)
dir, ok := t.(xml.Directive)
if !ok {
continue
}
m := reHead.FindSubmatch(dir)
dir = dir[len(m[0]):]
ename := string(m[2])
el, elementFound := b.index[ename]
switch string(m[1]) {
case "ELEMENT":
if elementFound {
log.Fatal("parseDTD: duplicate entry for element %q", ename)
}
m := reElem.FindSubmatch(dir)
if m == nil {
log.Fatalf("parseDTD: invalid element %q", string(dir))
}
if len(m[0]) != len(dir) {
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
}
s := string(m[1])
el = &element{
name: ename,
category: s,
}
b.index[ename] = el
case "ATTLIST":
if !elementFound {
log.Fatalf("parseDTD: unknown element %q", ename)
}
s := string(dir)
m := reAttr.FindStringSubmatch(s)
if m == nil {
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
}
if m[4] == "FIXED" {
b.version = m[5]
} else {
switch m[1] {
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
case "type", "choice":
default:
el.attr = append(el.attr, &attribute{
name: m[1],
key: s,
list: reToken.FindAllString(m[3], -1),
})
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
}
}
}
}
}
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
// resolve takes a parsed element and converts it into structured data
// that can be used to generate the XML code.
func (b *builder) resolve(e *element) {
if e.resolved {
return
}
b.elem = append(b.elem, e)
e.resolved = true
s := e.category
found := make(map[string]bool)
sequenceStart := []int{}
for len(s) > 0 {
m := reCat.FindStringSubmatch(s)
if m == nil {
log.Fatalf("%s: invalid category string %q", e.name, s)
}
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
switch m[1] {
case "":
case "(":
sequenceStart = append(sequenceStart, len(e.sub))
case ")":
if len(sequenceStart) == 0 {
log.Fatalf("%s: unmatched closing parenthesis", e.name)
}
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
e.sub[i].repeat = e.sub[i].repeat || repeat
}
sequenceStart = sequenceStart[:len(sequenceStart)-1]
default:
if in(b.info.skipElem, m[1]) {
} else if sub, ok := b.index[m[1]]; ok {
if !found[sub.name] {
e.sub = append(e.sub, struct {
e *element
repeat bool
}{sub, repeat})
found[sub.name] = true
b.resolve(sub)
}
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
} else if m[1] != "EMPTY" {
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
}
}
s = s[len(m[0]):]
}
}
// return true if s is contained in set.
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
var repl = strings.NewReplacer("-", " ", "_", " ")
// title puts the first character or each character following '_' in title case and
// removes all occurrences of '_'.
func title(s string) string {
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
}
// writeElem generates Go code for a single element, recursively.
func (b *builder) writeElem(tab int, e *element) {
p := func(f string, x ...interface{}) {
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
fmt.Fprintf(b.w, f, x...)
}
if len(e.sub) == 0 && len(e.attr) == 0 {
p("Common")
return
}
p("struct {")
tab++
p("\nCommon")
for _, attr := range e.attr {
if !in(b.info.skipAttr, attr.name) {
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
}
}
for _, sub := range e.sub {
if in(b.info.predefined, sub.e.name) {
p("\n%sElem", sub.e.name)
continue
}
if in(b.info.skipElem, sub.e.name) {
continue
}
p("\n%s ", title(sub.e.name))
if sub.repeat {
p("[]")
}
p("*")
if in(b.info.top, sub.e.name) {
p(title(sub.e.name))
} else {
b.writeElem(tab, sub.e)
}
p(" `xml:\"%s\"`", sub.e.name)
}
tab--
p("\n}")
}
// write generates the Go XML code.
func (b *builder) write() {
for i, name := range b.info.top {
e := b.index[name]
if e != nil {
fmt.Fprintf(b.w, comments[name])
name := title(e.name)
if i == 0 {
name = b.info.root
}
fmt.Fprintf(b.w, "type %s ", name)
b.writeElem(0, e)
fmt.Fprint(b.w, "\n")
}
}
}

602
vendor/golang.org/x/text/unicode/cldr/resolve.go generated vendored Normal file
View File

@ -0,0 +1,602 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
// This file implements the various inheritance constructs defined by LDML.
// See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
// for more details.
import (
"fmt"
"log"
"reflect"
"regexp"
"sort"
"strings"
)
// fieldIter iterates over fields in a struct. It includes
// fields of embedded structs.
type fieldIter struct {
v reflect.Value
index, n []int
}
func iter(v reflect.Value) fieldIter {
if v.Kind() != reflect.Struct {
log.Panicf("value %v must be a struct", v)
}
i := fieldIter{
v: v,
index: []int{0},
n: []int{v.NumField()},
}
i.descent()
return i
}
func (i *fieldIter) descent() {
for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
i.index = append(i.index, 0)
i.n = append(i.n, f.Type.NumField())
}
}
func (i *fieldIter) done() bool {
return len(i.index) == 1 && i.index[0] >= i.n[0]
}
func skip(f reflect.StructField) bool {
return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
}
func (i *fieldIter) next() {
for {
k := len(i.index) - 1
i.index[k]++
if i.index[k] < i.n[k] {
if !skip(i.field()) {
break
}
} else {
if k == 0 {
return
}
i.index = i.index[:k]
i.n = i.n[:k]
}
}
i.descent()
}
func (i *fieldIter) value() reflect.Value {
return i.v.FieldByIndex(i.index)
}
func (i *fieldIter) field() reflect.StructField {
return i.v.Type().FieldByIndex(i.index)
}
type visitor func(v reflect.Value) error
var stopDescent = fmt.Errorf("do not recurse")
func (f visitor) visit(x interface{}) error {
return f.visitRec(reflect.ValueOf(x))
}
// visit recursively calls f on all nodes in v.
func (f visitor) visitRec(v reflect.Value) error {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil
}
return f.visitRec(v.Elem())
}
if err := f(v); err != nil {
if err == stopDescent {
return nil
}
return err
}
switch v.Kind() {
case reflect.Struct:
for i := iter(v); !i.done(); i.next() {
if err := f.visitRec(i.value()); err != nil {
return err
}
}
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
if err := f.visitRec(v.Index(i)); err != nil {
return err
}
}
}
return nil
}
// getPath is used for error reporting purposes only.
func getPath(e Elem) string {
if e == nil {
return "<nil>"
}
if e.enclosing() == nil {
return e.GetCommon().name
}
if e.GetCommon().Type == "" {
return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
}
return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
}
// xmlName returns the xml name of the element or attribute
func xmlName(f reflect.StructField) (name string, attr bool) {
tags := strings.Split(f.Tag.Get("xml"), ",")
for _, s := range tags {
attr = attr || s == "attr"
}
return tags[0], attr
}
func findField(v reflect.Value, key string) (reflect.Value, error) {
v = reflect.Indirect(v)
for i := iter(v); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == key {
return i.value(), nil
}
}
return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
}
var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
func walkXPath(e Elem, path string) (res Elem, err error) {
for _, c := range strings.Split(path, "/") {
if c == ".." {
if e = e.enclosing(); e == nil {
panic("path ..")
return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
}
continue
} else if c == "" {
continue
}
m := xpathPart.FindStringSubmatch(c)
if len(m) == 0 || len(m[0]) != len(c) {
return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
}
v, err := findField(reflect.ValueOf(e), m[1])
if err != nil {
return nil, err
}
switch v.Kind() {
case reflect.Slice:
i := 0
if m[2] != "" || v.Len() > 1 {
if m[2] == "" {
m[2] = "type"
if m[3] = e.GetCommon().Default(); m[3] == "" {
return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
}
}
for ; i < v.Len(); i++ {
vi := v.Index(i)
key, err := findField(vi.Elem(), m[2])
if err != nil {
return nil, err
}
key = reflect.Indirect(key)
if key.Kind() == reflect.String && key.String() == m[3] {
break
}
}
}
if i == v.Len() || v.Index(i).IsNil() {
return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
}
e = v.Index(i).Interface().(Elem)
case reflect.Ptr:
if v.IsNil() {
return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
}
var ok bool
if e, ok = v.Interface().(Elem); !ok {
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
} else if m[2] != "" || m[3] != "" {
return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
}
default:
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
}
}
return e, nil
}
const absPrefix = "//ldml/"
func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
if src != "locale" {
if !strings.HasPrefix(path, absPrefix) {
return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
}
path = path[len(absPrefix):]
if e, err = cldr.resolve(src); err != nil {
return nil, err
}
}
return walkXPath(e, path)
}
func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
alias := e.GetCommon().Alias
if alias == nil {
return nil
}
a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
if err != nil {
return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
}
// Ensure alias node was already evaluated. TODO: avoid double evaluation.
err = cldr.resolveAndMergeAlias(a)
v := reflect.ValueOf(e).Elem()
for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
if _, attr := xmlName(i.field()); !attr {
v.FieldByIndex(i.index).Set(vv)
}
}
}
return err
}
func (cldr *CLDR) aliasResolver() visitor {
return func(v reflect.Value) (err error) {
if e, ok := v.Addr().Interface().(Elem); ok {
err = cldr.resolveAndMergeAlias(e)
if err == nil && blocking[e.GetCommon().name] {
return stopDescent
}
}
return err
}
}
// elements within blocking elements do not inherit.
// Taken from CLDR's supplementalMetaData.xml.
var blocking = map[string]bool{
"identity": true,
"supplementalData": true,
"cldrTest": true,
"collation": true,
"transform": true,
}
// Distinguishing attributes affect inheritance; two elements with different
// distinguishing attributes are treated as different for purposes of inheritance,
// except when such attributes occur in the indicated elements.
// Taken from CLDR's supplementalMetaData.xml.
var distinguishing = map[string][]string{
"key": nil,
"request_id": nil,
"id": nil,
"registry": nil,
"alt": nil,
"iso4217": nil,
"iso3166": nil,
"mzone": nil,
"from": nil,
"to": nil,
"type": []string{
"abbreviationFallback",
"default",
"mapping",
"measurementSystem",
"preferenceOrdering",
},
"numberSystem": nil,
}
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
// attrKey computes a key based on the distinguishable attributes of
// an element and it's values.
func attrKey(v reflect.Value, exclude ...string) string {
parts := []string{}
ename := v.Interface().(Elem).GetCommon().name
v = v.Elem()
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); attr {
if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
v := i.value()
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.IsValid() {
parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
}
}
}
}
sort.Strings(parts)
return strings.Join(parts, ";")
}
// Key returns a key for e derived from all distinguishing attributes
// except those specified by exclude.
func Key(e Elem, exclude ...string) string {
return attrKey(reflect.ValueOf(e), exclude...)
}
// linkEnclosing sets the enclosing element as well as the name
// for all sub-elements of child, recursively.
func linkEnclosing(parent, child Elem) {
child.setEnclosing(parent)
v := reflect.ValueOf(child).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
linkEnclosing(child, vf.Index(j).Interface().(Elem))
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
linkEnclosing(child, vf.Interface().(Elem))
}
}
}
func setNames(e Elem, name string) {
e.setName(name)
v := reflect.ValueOf(e).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
name, _ = xmlName(i.field())
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
setNames(vf.Index(j).Interface().(Elem), name)
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
setNames(vf.Interface().(Elem), name)
}
}
}
// deepCopy copies elements of v recursively. All elements of v that may
// be modified by inheritance are explicitly copied.
func deepCopy(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() || v.Elem().Kind() != reflect.Struct {
return v
}
nv := reflect.New(v.Elem().Type())
nv.Elem().Set(v.Elem())
deepCopyRec(nv.Elem(), v.Elem())
return nv
case reflect.Slice:
nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
for i := 0; i < v.Len(); i++ {
deepCopyRec(nv.Index(i), v.Index(i))
}
return nv
}
panic("deepCopy: must be called with pointer or slice")
}
// deepCopyRec is only called by deepCopy.
func deepCopyRec(nv, v reflect.Value) {
if v.Kind() == reflect.Struct {
t := v.Type()
for i := 0; i < v.NumField(); i++ {
if name, attr := xmlName(t.Field(i)); name != "" && !attr {
deepCopyRec(nv.Field(i), v.Field(i))
}
}
} else {
nv.Set(deepCopy(v))
}
}
// newNode is used to insert a missing node during inheritance.
func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
n := reflect.New(v.Type())
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); name == "" || attr {
n.Elem().FieldByIndex(i.index).Set(i.value())
}
}
n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
return n
}
// v, parent must be pointers to struct
func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
nv := reflect.New(t)
nv.Elem().Set(v)
for i := iter(v); !i.done(); i.next() {
vf := i.value()
f := i.field()
name, attr := xmlName(f)
if name == "" || attr {
continue
}
pf := parent.FieldByIndex(i.index)
if blocking[name] {
if vf.IsNil() {
vf = pf
}
nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
continue
}
switch f.Type.Kind() {
case reflect.Ptr:
if f.Type.Elem().Kind() == reflect.Struct {
if !vf.IsNil() {
if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
} else if !pf.IsNil() {
n := cldr.newNode(pf.Elem(), v)
if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
case reflect.Slice:
vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
if err != nil {
return reflect.Zero(t), err
}
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
return nv, nil
}
func root(e Elem) *LDML {
for ; e.enclosing() != nil; e = e.enclosing() {
}
return e.(*LDML)
}
// inheritStructPtr first merges possible aliases in with v and then inherits
// any underspecified elements from parent.
func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
if !v.IsNil() {
e := v.Interface().(Elem).GetCommon()
alias := e.Alias
if alias == nil && !parent.IsNil() {
alias = parent.Interface().(Elem).GetCommon().Alias
}
if alias != nil {
a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
if a != nil {
if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
return reflect.Value{}, err
}
}
}
if !parent.IsNil() {
return cldr.inheritFields(v.Elem(), parent.Elem())
}
} else if parent.IsNil() {
panic("should not reach here")
}
return v, nil
}
// Must be slice of struct pointers.
func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
index := make(map[string]reflect.Value)
if !v.IsNil() {
for i := 0; i < v.Len(); i++ {
vi := v.Index(i)
key := attrKey(vi)
index[key] = vi
}
}
if !parent.IsNil() {
for i := 0; i < parent.Len(); i++ {
vi := parent.Index(i)
key := attrKey(vi)
if w, ok := index[key]; ok {
index[key], err = cldr.inheritStructPtr(w, vi)
} else {
n := cldr.newNode(vi.Elem(), enc)
index[key], err = cldr.inheritStructPtr(n, vi)
}
index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
if err != nil {
return v, err
}
}
}
keys := make([]string, 0, len(index))
for k, _ := range index {
keys = append(keys, k)
}
sort.Strings(keys)
sl := reflect.MakeSlice(t, len(index), len(index))
for i, k := range keys {
sl.Index(i).Set(index[k])
}
return sl, nil
}
func parentLocale(loc string) string {
parts := strings.Split(loc, "_")
if len(parts) == 1 {
return "root"
}
parts = parts[:len(parts)-1]
key := strings.Join(parts, "_")
return key
}
func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
if r := cldr.resolved[loc]; r != nil {
return r, nil
}
x := cldr.RawLDML(loc)
if x == nil {
return nil, fmt.Errorf("cldr: unknown locale %q", loc)
}
var v reflect.Value
if loc == "root" {
x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
linkEnclosing(nil, x)
err = cldr.aliasResolver().visit(x)
} else {
key := parentLocale(loc)
var parent *LDML
for ; cldr.locale[key] == nil; key = parentLocale(key) {
}
if parent, err = cldr.resolve(key); err != nil {
return nil, err
}
v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
x = v.Interface().(*LDML)
linkEnclosing(nil, x)
}
if err != nil {
return nil, err
}
cldr.resolved[loc] = x
return x, err
}
// finalize finalizes the initialization of the raw LDML structs. It also
// removed unwanted fields, as specified by filter, so that they will not
// be unnecessarily evaluated.
func (cldr *CLDR) finalize(filter []string) {
for _, x := range cldr.locale {
if filter != nil {
v := reflect.ValueOf(x).Elem()
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
name, _ := xmlName(f)
if name != "" && name != "identity" && !in(filter, name) {
v.Field(i).Set(reflect.Zero(f.Type))
}
}
}
linkEnclosing(nil, x) // for resolving aliases and paths
setNames(x, "ldml")
}
}

144
vendor/golang.org/x/text/unicode/cldr/slice.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"fmt"
"reflect"
"sort"
)
// Slice provides utilities for modifying slices of elements.
// It can be wrapped around any slice of which the element type implements
// interface Elem.
type Slice struct {
ptr reflect.Value
typ reflect.Type
}
// Value returns the reflect.Value of the underlying slice.
func (s *Slice) Value() reflect.Value {
return s.ptr.Elem()
}
// MakeSlice wraps a pointer to a slice of Elems.
// It replaces the array pointed to by the slice so that subsequent modifications
// do not alter the data in a CLDR type.
// It panics if an incorrect type is passed.
func MakeSlice(slicePtr interface{}) Slice {
ptr := reflect.ValueOf(slicePtr)
if ptr.Kind() != reflect.Ptr {
panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
}
sl := ptr.Elem()
if sl.Kind() != reflect.Slice {
panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
}
intf := reflect.TypeOf((*Elem)(nil)).Elem()
if !sl.Type().Elem().Implements(intf) {
panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
}
nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
reflect.Copy(nsl, sl)
sl.Set(nsl)
return Slice{
ptr: ptr,
typ: sl.Type().Elem().Elem(),
}
}
func (s Slice) indexForAttr(a string) []int {
for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == a {
return i.index
}
}
panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
}
// Filter filters s to only include elements for which fn returns true.
func (s Slice) Filter(fn func(e Elem) bool) {
k := 0
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
if fn(vi.Interface().(Elem)) {
sl.Index(k).Set(vi)
k++
}
}
sl.Set(sl.Slice(0, k))
}
// Group finds elements in s for which fn returns the same value and groups
// them in a new Slice.
func (s Slice) Group(fn func(e Elem) string) []Slice {
m := make(map[string][]reflect.Value)
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
key := fn(vi.Interface().(Elem))
m[key] = append(m[key], vi)
}
keys := []string{}
for k, _ := range m {
keys = append(keys, k)
}
sort.Strings(keys)
res := []Slice{}
for _, k := range keys {
nsl := reflect.New(sl.Type())
nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
res = append(res, MakeSlice(nsl.Interface()))
}
return res
}
// SelectAnyOf filters s to contain only elements for which attr matches
// any of the values.
func (s Slice) SelectAnyOf(attr string, values ...string) {
index := s.indexForAttr(attr)
s.Filter(func(e Elem) bool {
vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
return in(values, vf.String())
})
}
// SelectOnePerGroup filters s to include at most one element e per group of
// elements matching Key(attr), where e has an attribute a that matches any
// the values in v.
// If more than one element in a group matches a value in v preference
// is given to the element that matches the first value in v.
func (s Slice) SelectOnePerGroup(a string, v []string) {
index := s.indexForAttr(a)
grouped := s.Group(func(e Elem) string { return Key(e, a) })
sl := s.Value()
sl.Set(sl.Slice(0, 0))
for _, g := range grouped {
e := reflect.Value{}
found := len(v)
gsl := g.Value()
for i := 0; i < gsl.Len(); i++ {
vi := gsl.Index(i).Elem().FieldByIndex(index)
j := 0
for ; j < len(v) && v[j] != vi.String(); j++ {
}
if j < found {
found = j
e = gsl.Index(i)
}
}
if found < len(v) {
sl.Set(reflect.Append(sl, e))
}
}
}
// SelectDraft drops all elements from the list with a draft level smaller than d
// and selects the highest draft level of the remaining.
// This method assumes that the input CLDR is canonicalized.
func (s Slice) SelectDraft(d Draft) {
s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
}

1456
vendor/golang.org/x/text/unicode/cldr/xml.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

54
vendor/vendor.json vendored
View File

@ -120,6 +120,42 @@
"revision": "cd527374f1e5bff4938207604a14f2e38a9cf512",
"revisionTime": "2012-06-04T00:48:16Z"
},
{
"checksumSHA1": "VQ/yxZt22x+e32AhKLT4ba/5j2g=",
"path": "github.com/SAP/go-hdb/driver",
"revision": "a582e52ee0774b3554cefb389cba53b221924fd7",
"revisionTime": "2017-04-01T11:06:20Z"
},
{
"checksumSHA1": "FxJ2ZP3uh+OKcGDKNAm3KpI7jbc=",
"path": "github.com/SAP/go-hdb/driver/sqltrace",
"revision": "a582e52ee0774b3554cefb389cba53b221924fd7",
"revisionTime": "2017-04-01T11:06:20Z"
},
{
"checksumSHA1": "/b9eSD6x+iph+U5+Y7Cs8t1rlrc=",
"path": "github.com/SAP/go-hdb/internal/bufio",
"revision": "a582e52ee0774b3554cefb389cba53b221924fd7",
"revisionTime": "2017-04-01T11:06:20Z"
},
{
"checksumSHA1": "Q72M8MwjGlPXLtSXJCgTiSzbb3Q=",
"path": "github.com/SAP/go-hdb/internal/protocol",
"revision": "a582e52ee0774b3554cefb389cba53b221924fd7",
"revisionTime": "2017-04-01T11:06:20Z"
},
{
"checksumSHA1": "qtZgybyWJwL1qR/afnMaxY5vkCk=",
"path": "github.com/SAP/go-hdb/internal/unicode",
"revision": "a582e52ee0774b3554cefb389cba53b221924fd7",
"revisionTime": "2017-04-01T11:06:20Z"
},
{
"checksumSHA1": "eHummM7nkyVQIc3GZ9U8qFI6Sj4=",
"path": "github.com/SAP/go-hdb/internal/unicode/cesu8",
"revision": "a582e52ee0774b3554cefb389cba53b221924fd7",
"revisionTime": "2017-04-01T11:06:20Z"
},
{
"checksumSHA1": "t+uej2kiyqRyQYguygI8t9nJH2w=",
"path": "github.com/SermoDigital/jose",
@ -1422,6 +1458,18 @@
"revision": "fb4cac33e3196ff7f507ab9b2d2a44b0142f5b5a",
"revisionTime": "2017-06-14T06:48:48Z"
},
{
"checksumSHA1": "5Pu4kIIFrYb1oJWIvsdzr2pM38s=",
"path": "golang.org/x/text",
"revision": "4ee4af566555f5fbe026368b75596286a312663a",
"revisionTime": "2017-05-30T10:19:54Z"
},
{
"checksumSHA1": "ZQdHbB9VYCXwQ+9/CmZPhJv0+SM=",
"path": "golang.org/x/text/internal/gen",
"revision": "4ee4af566555f5fbe026368b75596286a312663a",
"revisionTime": "2017-05-30T10:19:54Z"
},
{
"checksumSHA1": "faFDXp++cLjLBlvsr+izZ+go1WU=",
"path": "golang.org/x/text/secure/bidirule",
@ -1440,6 +1488,12 @@
"revision": "9e2f80a6ba7ed4ba13e0cd4b1f094bf916875735",
"revisionTime": "2017-06-09T15:53:19Z"
},
{
"checksumSHA1": "ZbYsJjfj1rPbHN+0baD1rg09PXQ=",
"path": "golang.org/x/text/unicode/cldr",
"revision": "4ee4af566555f5fbe026368b75596286a312663a",
"revisionTime": "2017-05-30T10:19:54Z"
},
{
"checksumSHA1": "kKylzIrLEnH8NKyeVAL0dq5gjVQ=",
"path": "golang.org/x/text/unicode/norm",

View File

@ -0,0 +1,87 @@
---
layout: "api"
page_title: "HANA Database Plugin - HTTP API"
sidebar_current: "docs-http-secret-databases-hana"
description: |-
The HANA plugin for Vault's Database backend generates database credentials to access HANA servers.
---
# HANA Database Plugin HTTP API
The HANA Database Plugin is one of the supported plugins for the Database
backend. This plugin generates database credentials dynamically based on
configured roles for the HANA database.
## Configure Connection
In addition to the parameters defined by the [Database
Backend](/api/secret/databases/index.html#configure-connection), this plugin
has a number of parameters to further configure a connection.
| Method | Path | Produces |
| :------- | :--------------------------- | :--------------------- |
| `POST` | `/database/config/:name` | `204 (empty body)` |
### Parameters
- `connection_url` `(string: <required>)` - Specifies the HANA DSN.
- `max_open_connections` `(int: 2)` - Specifies the maximum number of open
connections to the database.
- `max_idle_connections` `(int: 0)` - Specifies the maximum number of idle
connections to the database. A zero uses the value of `max_open_connections`
and a negative value disables idle connections. If larger than
`max_open_connections` it will be reduced to be equal.
- `max_connection_lifetime` `(string: "0s")` - Specifies the maximum amount of
time a connection may be reused. If <= 0s connections are reused forever.
### Sample Payload
```json
{
"plugin_name": "hana-database-plugin",
"allowed_roles": "readonly",
"connection_url": "hdb://username:password@localhost:1433",
"max_open_connections": 5,
"max_connection_lifetime": "5s",
}
```
### Sample Request
```
$ curl \
--header "X-Vault-Token: ..." \
--request POST \
--data @payload.json \
https://vault.rocks/v1/database/config/hana
```
## Statements
Statements are configured during role creation and are used by the plugin to
determine what is sent to the datatabse on user creation, renewing, and
revocation. For more information on configuring roles see the [Role
API](/api/secret/databases/index.html#create-role) in the Database Backend docs.
### Parameters
The following are the statements used by this plugin. If not mentioned in this
list the plugin does not support that statement type.
- `creation_statements` `(string: <required>)` Specifies the database
statements executed to create and configure a user. Must be a
semicolon-separated string, a base64-encoded semicolon-separated string, a
serialized JSON string array, or a base64-encoded serialized JSON string
array. The '{{name}}', '{{password}}', and '{{expiration}}' values will be
substituted.
- The expiration time will be HANA server time plus the role's `default_ttl`.
If `default_ttl` is 0 or not set, a SQL HdbError 438 will be returned.
- `revocation_statements` `(string: "")` Specifies the database statements to
be executed to revoke a user. Must be a semicolon-separated string, a
base64-encoded semicolon-separated string, a serialized JSON string array, or
a base64-encoded serialized JSON string array. The '{{name}}' value will be
substituted. If not provided, defaults to dropping the user only if they have
no dependent objects.

View File

@ -0,0 +1,60 @@
---
layout: "docs"
page_title: "HANA Database Plugin"
sidebar_current: "docs-secrets-databases-HANA"
description: |-
The HANA plugin for Vault's Database backend generates database credentials to access SAP HANA Database.
---
# HANA Database Plugin
Name: `hana-database-plugin`
The HANA Database Plugin is one of the supported plugins for the Database
backend. This plugin generates database credentials dynamically based on
configured roles for the HANA database.
See the [Database Backend](/docs/secrets/databases/index.html) docs for more
information about setting up the Database Backend.
## Quick Start
After the Database Backend is mounted you can configure a HANA connection
by specifying this plugin as the `"plugin_name"` argument. Here is an example
configuration:
```
$ vault write database/config/hana \
plugin_name=hana-database-plugin \
connection_url="hdb://username:password@localhost:1433" \
allowed_roles="readonly"
The following warnings were returned from the Vault server:
* Read access to this endpoint should be controlled via ACLs as it will
return the connection details as is, including passwords, if any.
```
Once the HANA connection is configured we can add a role:
```
$ vault write database/roles/readonly \
db_name=hana \
creation_statements="CREATE USER {{name}} PASSWORD {{password}} VALID UNTIL '{{expiration}}';\
CALL GRANT_ACTIVATED_ROLE ( 'sap.hana.admin.roles::Monitoring', '{{name}}' );" \
default_ttl="12h" \
max_ttl="24h"
Success! Data written to: database/roles/readonly
```
This role can now be used to retrieve a new set of credentials by querying the
"database/creds/readonly" endpoint.
## API
The full list of configurable options can be seen in the [HANA database
plugin API](/api/secret/databases/HANA.html) page.
For more information on the Database secret backend's HTTP API please see the [Database secret
backend API](/api/secret/databases/index.html) page.