Update dependencies, enable pruning for vendor/
So, `dep` got an nice new feature to remove tests and non-go files from `vendor/`, and this brings the size of the vendor directory from ~300MiB down to ~20MiB. We don that now.
This commit is contained in:
parent
3422c1ca83
commit
bff635bc5f
6741 changed files with 26942 additions and 4902033 deletions
323
Gopkg.lock
generated
323
Gopkg.lock
generated
|
@ -3,253 +3,472 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:94e9caf404409a2990cfd22aca37d758494c098eff3e2c37fda1abed862e74dd"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [".","fs","fuseutil"]
|
||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
packages = [
|
||||
".",
|
||||
"fs",
|
||||
"fuseutil",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "4b98a6370e36d7a85192e7bad08a4ebd82eac2a8"
|
||||
version = "v0.20.0"
|
||||
pruneopts = "UT"
|
||||
revision = "aad3f485ee528456e0768f20397b4d9dd941e755"
|
||||
version = "v0.25.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:46ea9487304f4b3c787f54483ecb13a338d686dcd670db0ab1a112ed0ae2128e"
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage","version"]
|
||||
revision = "56332fec5b308fbb6615fa1af6117394cdba186d"
|
||||
version = "v15.0.0"
|
||||
packages = [
|
||||
"storage",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a"
|
||||
version = "v19.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:27d0cd1a78fc836f7c0f07749d029a5f7895c84ad066187b08b70e9d1830098e"
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "ed4b7f5bf1ec0c9ede1fda2681d96771282f2862"
|
||||
version = "v10.4.0"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date",
|
||||
"logger",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "dd94e014aaf16d1df746762e392aa201c1b4c461"
|
||||
version = "v10.15.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2209584c0f7c9b68c23374e659357ab546e1b70eec2761f03280f69a8fd23d77"
|
||||
name = "github.com/cenkalti/backoff"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7cb4fdca4c251b3ef8027c90ea35f70c7b661a593b9eeae34753c65499098bb1"
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
pruneopts = "UT"
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6f9339c912bbdda81302633ad7e99a28dfa5a639c864061f1929510a9a64aa74"
|
||||
name = "github.com/dustin/go-humanize"
|
||||
packages = ["."]
|
||||
revision = "bb3d318650d48840a39aa21a027c6630e198e626"
|
||||
pruneopts = "UT"
|
||||
revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c7edfbb6320d6a93240d663dc52bca92bed4c116abe54c35679eec4e7cc2bd77"
|
||||
name = "github.com/elithrar/simple-scrypt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "d150773194090feb6c897805a7bcea8d49544e2c"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fe8a03a8222d5b913f256972933d26d24ad7c8286692a42943bc01633cc8fce3"
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "6333e38ac20b8949a8dd68baa3650f4dee8f39f0"
|
||||
version = "v1.33.0"
|
||||
pruneopts = "UT"
|
||||
revision = "358ee7663966325963d4e8b2e1fbd570c5195153"
|
||||
version = "v1.38.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15042ad3498153684d09f393bbaec6b216c8eec6d61f63dff711de7d64ed8861"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
pruneopts = "UT"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a"
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = ["cmp","cmp/internal/diff","cmp/internal/function","cmp/internal/value"]
|
||||
revision = "8099a9787ce5dc5984ed879a3bda47dc730a8e97"
|
||||
version = "v0.1.0"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:190ff84d9b2ed6589088f178cba8edb4b8ecb334df4572421fb016be1ac20463"
|
||||
name = "github.com/juju/ratelimit"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9cedee824c21326bd26950bd9e1ffe9dc4e7ca03dc8634d0e6f954ee6a383172"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
pruneopts = "UT"
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1faa76bd9bffce9c25eaca0597afb67bd05a21ae57fe4378154ce8855ef163d1"
|
||||
name = "github.com/kurin/blazer"
|
||||
packages = ["b2","base","internal/b2assets","internal/b2types","internal/blog","x/window"]
|
||||
packages = [
|
||||
"b2",
|
||||
"base",
|
||||
"internal/b2assets",
|
||||
"internal/b2types",
|
||||
"internal/blog",
|
||||
"x/window",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "caf65aa76491dc533bac68ad3243ce72fa4e0a0a"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4e878df5f4e9fd625bf9c9aac77ef7cbfa4a74c01265505527c23470c0e40300"
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d4d17353dbd05cb52a2a52b7fe1771883b682806f68db442b436294926bbfafb"
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:95c73c666919be2843b955eafc83f58c136312b74f79c703152f4c4a95fd64dc"
|
||||
name = "github.com/minio/minio-go"
|
||||
packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"]
|
||||
revision = "66252c2a3c15f7b90cc8493d497a04ac3b6e3606"
|
||||
version = "5.0.0"
|
||||
packages = [
|
||||
".",
|
||||
"pkg/credentials",
|
||||
"pkg/encrypt",
|
||||
"pkg/s3signer",
|
||||
"pkg/s3utils",
|
||||
"pkg/set",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "70799fe8dae6ecfb6c7d7e9e048fce27f23a1992"
|
||||
version = "v6.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8eb17c2ec4df79193ae65b621cd1c0c4697db3bc317fe6afdc76d7f2746abd05"
|
||||
name = "github.com/mitchellh/go-homedir"
|
||||
packages = ["."]
|
||||
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
|
||||
pruneopts = "UT"
|
||||
revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:928de5172dd3563964d1b88a4ee3775cf72e16f1efabb482ab6d0e0bab86ee69"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cfa0d7741863a0e1d30e0ccdd4b48a96a471cdb47892303de8b92c3713af3e77"
|
||||
name = "github.com/pkg/profile"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5b67d428864e92711fcbd2f8629456121a56d91f"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:23ed92ba5d90a2dfe817f3895027ccef796e79c30be5125d48e17afdcc395d73"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "49488377fa2f14143ba3067cf7555f60f6c7b550"
|
||||
version = "1.5.0"
|
||||
pruneopts = "UT"
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0d67664e93e366f072ac9672feea29bfc63c9f90f005e9e8a0df1954153f5a14"
|
||||
name = "github.com/pkg/xattr"
|
||||
packages = ["."]
|
||||
revision = "1d7b7ffe7c46974a836eb583b7452f22de1c18cf"
|
||||
version = "v0.2.3"
|
||||
pruneopts = "UT"
|
||||
revision = "ae385d07bb53f092fcc7daaf738d8513df084931"
|
||||
version = "v0.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:13ecc4000f49cf0aa3ee56fffcc93119c8edffacfff08674c80d2757d8c33a83"
|
||||
name = "github.com/restic/chunker"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "db83917be3b88cc307464b7d8a221c173e34a0db"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8bc629776d035c003c7814d4369521afe67fdb8efc4b5f66540d29343b98cf23"
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925"
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d867dfa6751c8d7a435821ad3b736310c2ed68945d05b50fb9d23aee0540c8cc"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
pruneopts = "UT"
|
||||
revision = "3e01752db0189b9157070a0e1668a620f9a85da2"
|
||||
version = "v1.0.6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e01b05ba901239c783dfe56450bcde607fc858908529868259c9a8765dc176d0"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
|
||||
version = "v0.0.2"
|
||||
packages = [
|
||||
".",
|
||||
"doc",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
pruneopts = "UT"
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:eefb1f49ec07e71206d4c9ea1a3e634cad331c2180733e4121b8ae39e8e92ecb"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["argon2","blake2b","curve25519","ed25519","ed25519/internal/edwards25519","internal/chacha20","pbkdf2","poly1305","scrypt","ssh","ssh/terminal"]
|
||||
revision = "4ec37c66abab2c7e02ae775328b2ff001c3f025a"
|
||||
packages = [
|
||||
"argon2",
|
||||
"blake2b",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
"scrypt",
|
||||
"ssh",
|
||||
"ssh/terminal",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c126467f60eb25f8f27e5a981f32a87e3965053f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8356aa7bdcb10a210b814b64ff76d61de7c36ac4cb6263de3af5e3e2e546956d"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","lex/httplex"]
|
||||
revision = "6078986fec03a1dcc236c34816c71b0e05018fda"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "32f9bdbd7df18e8641d215e7ea68be88b971feb0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bea0314c10bd362ab623af4880d853b5bad3b63d0ab9945c47e461b8d04203ed"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "fdc9e635145ae97e6c2cb777c48305600cf515cb"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "3d292e4d0cdc3a0113e6d207bb137145ef1de42f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:39ebcc2b11457b703ae9ee2e8cca0f68df21969c6102cb3b705f76cca0ea0239"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
pruneopts = "UT"
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a220a85c72a6cb7339c412cb2b117019a7fd94007cdfffb3b5b1d058227a2bf8"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["cpu","unix","windows"]
|
||||
revision = "7db1c3b1a98089d0071c84f646ff5c96aad43682"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "bd9dbc187b6e1dacfdd2722a87e83093c2d7bd6e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e5a8511f063c38c51ab9ab80e718e9149f692652aeb4e393a8c020dd1bf38ca2"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","encoding","encoding/internal","encoding/internal/identifier","encoding/unicode","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","internal/utf8internal","language","runes","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"encoding",
|
||||
"encoding/internal",
|
||||
"encoding/internal/identifier",
|
||||
"encoding/unicode",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"internal/utf8internal",
|
||||
"language",
|
||||
"runes",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fb983acae7bd9c3ed9aadc1b1241d9e559ed21dbf84c17a0dda663ca169ccd69"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "dbbc13f71100fa6ece308335445fca6bb0dd5c2f"
|
||||
packages = [
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "31ca0e01cd791f07750cb23fc99327721f753290"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c8907869850adaa8bd7631887948d0684f3787d0912f1c01ab72581a6c34432e"
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:5bb148b78468350091db2ffbb2370f35cc6dcd74d9378a31b1c7b86ff7528f08"
|
||||
name = "gopkg.in/tomb.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "d5d1b5820637886def9eef33e03a27a9f166942c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "a5de339cba7570216b212439b90e1e6c384c94be8342fe7755b7cb66aa0a3440"
|
||||
input-imports = [
|
||||
"bazil.org/fuse",
|
||||
"bazil.org/fuse/fs",
|
||||
"github.com/Azure/azure-sdk-for-go/storage",
|
||||
"github.com/cenkalti/backoff",
|
||||
"github.com/elithrar/simple-scrypt",
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/juju/ratelimit",
|
||||
"github.com/kurin/blazer/b2",
|
||||
"github.com/mattn/go-isatty",
|
||||
"github.com/minio/minio-go",
|
||||
"github.com/minio/minio-go/pkg/credentials",
|
||||
"github.com/ncw/swift",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/pkg/profile",
|
||||
"github.com/pkg/sftp",
|
||||
"github.com/pkg/xattr",
|
||||
"github.com/restic/chunker",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/cobra/doc",
|
||||
"github.com/spf13/pflag",
|
||||
"golang.org/x/crypto/poly1305",
|
||||
"golang.org/x/crypto/scrypt",
|
||||
"golang.org/x/crypto/ssh/terminal",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/net/context/ctxhttp",
|
||||
"golang.org/x/net/http2",
|
||||
"golang.org/x/oauth2/google",
|
||||
"golang.org/x/sync/errgroup",
|
||||
"golang.org/x/sys/unix",
|
||||
"golang.org/x/text/encoding/unicode",
|
||||
"google.golang.org/api/googleapi",
|
||||
"google.golang.org/api/storage/v1",
|
||||
"gopkg.in/tomb.v2",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -19,3 +19,7 @@
|
|||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
[prune]
|
||||
unused-packages = true
|
||||
go-tests = true
|
||||
|
|
4
vendor/bazil.org/fuse/doc/.gitignore
generated
vendored
4
vendor/bazil.org/fuse/doc/.gitignore
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
/*.seq.svg
|
||||
|
||||
# not ignoring *.seq.png; we want those committed to the repo
|
||||
# for embedding on Github
|
6
vendor/bazil.org/fuse/doc/README.md
generated
vendored
6
vendor/bazil.org/fuse/doc/README.md
generated
vendored
|
@ -1,6 +0,0 @@
|
|||
# bazil.org/fuse documentation
|
||||
|
||||
See also API docs at http://godoc.org/bazil.org/fuse
|
||||
|
||||
- [The mount sequence](mount-sequence.md)
|
||||
- [Writing documentation](writing-docs.md)
|
32
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq
generated
vendored
32
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
seqdiag {
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
fusermount;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
fusermount;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> fusermount [label="spawn, pass socketpair fd"];
|
||||
fusermount -> kernel [label="open /dev/fuse"];
|
||||
fusermount -> kernel [label="mount(2)"];
|
||||
kernel ->> mounts [label="mount is visible"];
|
||||
fusermount <-- kernel [label="mount(2) returns"];
|
||||
fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"];
|
||||
app <-- fuse [label="Mount returns\nConn.Ready is already closed"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"];
|
||||
fuse -> app [label="Init"];
|
||||
fuse <-- app [color=red];
|
||||
fuse -> kernel [label="write /dev/fuse fd", color=red];
|
||||
kernel -> kernel [label="set connection\nstate to error", color=red];
|
||||
fuse <-- kernel;
|
||||
... conn.MountError == nil, so it is still mounted ...
|
||||
... call conn.Close to clean up ...
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq.png
generated
vendored
BIN
vendor/bazil.org/fuse/doc/mount-linux-error-init.seq.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 28 KiB |
41
vendor/bazil.org/fuse/doc/mount-linux.seq
generated
vendored
41
vendor/bazil.org/fuse/doc/mount-linux.seq
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
seqdiag {
|
||||
// seqdiag -T svg -o doc/mount-osx.svg doc/mount-osx.seq
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
fusermount;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> fusermount [label="spawn, pass socketpair fd"];
|
||||
fusermount -> kernel [label="open /dev/fuse"];
|
||||
fusermount -> kernel [label="mount(2)"];
|
||||
kernel ->> mounts [label="mount is visible"];
|
||||
fusermount <-- kernel [label="mount(2) returns"];
|
||||
fuse <<-- fusermount [diagonal, label="exit, receive /dev/fuse fd", leftnote="on Linux, successful exit here\nmeans the mount has happened,\nthough InitRequest might not have yet"];
|
||||
app <-- fuse [label="Mount returns\nConn.Ready is already closed", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/fuse fd", note="starts with InitRequest"];
|
||||
fuse => app [label="FS/Node/Handle methods"];
|
||||
fuse => kernel [label="write /dev/fuse fd"];
|
||||
... repeat ...
|
||||
|
||||
... shutting down ...
|
||||
app -> fuse [label="Unmount"];
|
||||
fuse -> fusermount [label="fusermount -u"];
|
||||
fusermount -> kernel;
|
||||
kernel <<-- mounts;
|
||||
fusermount <-- kernel;
|
||||
fuse <<-- fusermount [diagonal];
|
||||
app <-- fuse [label="Unmount returns"];
|
||||
|
||||
// actually triggers before above
|
||||
fuse <<-- kernel [diagonal, label="/dev/fuse EOF"];
|
||||
app <-- fuse [label="fs.Serve returns"];
|
||||
|
||||
app -> fuse [label="conn.Close"];
|
||||
fuse -> kernel [label="close /dev/fuse fd"];
|
||||
fuse <-- kernel;
|
||||
app <-- fuse;
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-linux.seq.png
generated
vendored
BIN
vendor/bazil.org/fuse/doc/mount-linux.seq.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 44 KiB |
32
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq
generated
vendored
32
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
seqdiag {
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
wait [label="callMount\nhelper goroutine"];
|
||||
mount_osxfusefs;
|
||||
kernel;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> kernel [label="open /dev/osxfuseN"];
|
||||
fuse -> mount_osxfusefs [label="spawn, pass fd"];
|
||||
fuse -> wait [label="goroutine", note="blocks on cmd.Wait"];
|
||||
app <-- fuse [label="Mount returns"];
|
||||
|
||||
mount_osxfusefs -> kernel [label="mount(2)"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"];
|
||||
fuse -> app [label="Init"];
|
||||
fuse <-- app [color=red];
|
||||
fuse -> kernel [label="write /dev/osxfuseN fd", color=red];
|
||||
fuse <-- kernel;
|
||||
|
||||
mount_osxfusefs <-- kernel [label="mount(2) returns", color=red];
|
||||
wait <<-- mount_osxfusefs [diagonal, label="exit", color=red];
|
||||
app <<-- wait [diagonal, label="mount has failed,\nclose Conn.Ready", color=red];
|
||||
|
||||
// actually triggers before above
|
||||
fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"];
|
||||
app <-- fuse [label="fs.Serve returns"];
|
||||
... conn.MountError != nil, so it was was never mounted ...
|
||||
... call conn.Close to clean up ...
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq.png
generated
vendored
BIN
vendor/bazil.org/fuse/doc/mount-osx-error-init.seq.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 32 KiB |
45
vendor/bazil.org/fuse/doc/mount-osx.seq
generated
vendored
45
vendor/bazil.org/fuse/doc/mount-osx.seq
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
seqdiag {
|
||||
// seqdiag -T svg -o doc/mount-osx.svg doc/mount-osx.seq
|
||||
app;
|
||||
fuse [label="bazil.org/fuse"];
|
||||
wait [label="callMount\nhelper goroutine"];
|
||||
mount_osxfusefs;
|
||||
kernel;
|
||||
mounts;
|
||||
|
||||
app -> fuse [label="Mount"];
|
||||
fuse -> kernel [label="open /dev/osxfuseN"];
|
||||
fuse -> mount_osxfusefs [label="spawn, pass fd"];
|
||||
fuse -> wait [label="goroutine", note="blocks on cmd.Wait"];
|
||||
app <-- fuse [label="Mount returns"];
|
||||
|
||||
mount_osxfusefs -> kernel [label="mount(2)"];
|
||||
|
||||
app -> fuse [label="fs.Serve"];
|
||||
fuse => kernel [label="read /dev/osxfuseN fd", note="starts with InitRequest,\nalso seen before mount exits:\ntwo StatfsRequest calls"];
|
||||
fuse => app [label="FS/Node/Handle methods"];
|
||||
fuse => kernel [label="write /dev/osxfuseN fd"];
|
||||
... repeat ...
|
||||
|
||||
kernel ->> mounts [label="mount is visible"];
|
||||
mount_osxfusefs <-- kernel [label="mount(2) returns"];
|
||||
wait <<-- mount_osxfusefs [diagonal, label="exit", leftnote="on OS X, successful exit\nhere means we finally know\nthe mount has happened\n(can't trust InitRequest,\nkernel might have timed out\nwaiting for InitResponse)"];
|
||||
|
||||
app <<-- wait [diagonal, label="mount is ready,\nclose Conn.Ready", rightnote="InitRequest and StatfsRequest\nmay or may not be seen\nbefore Conn.Ready,\ndepending on platform"];
|
||||
|
||||
... shutting down ...
|
||||
app -> fuse [label="Unmount"];
|
||||
fuse -> kernel [label="umount(2)"];
|
||||
kernel <<-- mounts;
|
||||
fuse <-- kernel;
|
||||
app <-- fuse [label="Unmount returns"];
|
||||
|
||||
// actually triggers before above
|
||||
fuse <<-- kernel [diagonal, label="/dev/osxfuseN EOF"];
|
||||
app <-- fuse [label="fs.Serve returns"];
|
||||
|
||||
app -> fuse [label="conn.Close"];
|
||||
fuse -> kernel [label="close /dev/osxfuseN"];
|
||||
fuse <-- kernel;
|
||||
app <-- fuse;
|
||||
}
|
BIN
vendor/bazil.org/fuse/doc/mount-osx.seq.png
generated
vendored
BIN
vendor/bazil.org/fuse/doc/mount-osx.seq.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 50 KiB |
30
vendor/bazil.org/fuse/doc/mount-sequence.md
generated
vendored
30
vendor/bazil.org/fuse/doc/mount-sequence.md
generated
vendored
|
@ -1,30 +0,0 @@
|
|||
# The mount sequence
|
||||
|
||||
FUSE mounting is a little bit tricky. There's a userspace helper tool
|
||||
that performs the handshake with the kernel, and then steps out of the
|
||||
way. This helper behaves differently on different platforms, forcing a
|
||||
more complex API on us.
|
||||
|
||||
## Successful runs
|
||||
|
||||
On Linux, the mount is immediate and file system accesses wait until
|
||||
the requests are served.
|
||||
|
||||

|
||||
|
||||
On OS X, the mount becomes visible only after `InitRequest` (and maybe
|
||||
more) have been served.
|
||||
|
||||

|
||||
|
||||
|
||||
## Errors
|
||||
|
||||
Let's see what happens if `InitRequest` gets an error response. On
|
||||
Linux, the mountpoint is there but all operations will fail:
|
||||
|
||||

|
||||
|
||||
On OS X, the mount never happened:
|
||||
|
||||

|
16
vendor/bazil.org/fuse/doc/writing-docs.md
generated
vendored
16
vendor/bazil.org/fuse/doc/writing-docs.md
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
# Writing documentation
|
||||
|
||||
## Sequence diagrams
|
||||
|
||||
The sequence diagrams are generated with `seqdiag`:
|
||||
http://blockdiag.com/en/seqdiag/index.html
|
||||
|
||||
An easy way to work on them is to automatically update the generated
|
||||
files with https://github.com/cespare/reflex :
|
||||
|
||||
reflex -g 'doc/[^.]*.seq' -- seqdiag -T svg -o '{}.svg' '{}' &
|
||||
|
||||
reflex -g 'doc/[^.]*.seq' -- seqdiag -T png -o '{}.png' '{}' &
|
||||
|
||||
The markdown files refer to PNG images because of Github limitations,
|
||||
but the SVG is generally more pleasant to view.
|
184
vendor/bazil.org/fuse/examples/clockfs/clockfs.go
generated
vendored
184
vendor/bazil.org/fuse/examples/clockfs/clockfs.go
generated
vendored
|
@ -1,184 +0,0 @@
|
|||
// Clockfs implements a file system with the current time in a file.
|
||||
// It was written to demonstrate kernel cache invalidation.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
_ "bazil.org/fuse/fs/fstestutil"
|
||||
"bazil.org/fuse/fuseutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
fmt.Fprintf(os.Stderr, " %s MOUNTPOINT\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func run(mountpoint string) error {
|
||||
c, err := fuse.Mount(
|
||||
mountpoint,
|
||||
fuse.FSName("clock"),
|
||||
fuse.Subtype("clockfsfs"),
|
||||
fuse.LocalVolume(),
|
||||
fuse.VolumeName("Clock filesystem"),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
if p := c.Protocol(); !p.HasInvalidate() {
|
||||
return fmt.Errorf("kernel FUSE support is too old to have invalidations: version %v", p)
|
||||
}
|
||||
|
||||
srv := fs.New(c, nil)
|
||||
filesys := &FS{
|
||||
// We pre-create the clock node so that it's always the same
|
||||
// object returned from all the Lookups. You could carefully
|
||||
// track its lifetime between Lookup&Forget, and have the
|
||||
// ticking & invalidation happen only when active, but let's
|
||||
// keep this example simple.
|
||||
clockFile: &File{
|
||||
fuse: srv,
|
||||
},
|
||||
}
|
||||
filesys.clockFile.tick()
|
||||
// This goroutine never exits. That's fine for this example.
|
||||
go filesys.clockFile.update()
|
||||
if err := srv.Serve(filesys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the mount process has an error to report.
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
mountpoint := flag.Arg(0)
|
||||
|
||||
if err := run(mountpoint); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type FS struct {
|
||||
clockFile *File
|
||||
}
|
||||
|
||||
var _ fs.FS = (*FS)(nil)
|
||||
|
||||
func (f *FS) Root() (fs.Node, error) {
|
||||
return &Dir{fs: f}, nil
|
||||
}
|
||||
|
||||
// Dir implements both Node and Handle for the root directory.
|
||||
type Dir struct {
|
||||
fs *FS
|
||||
}
|
||||
|
||||
var _ fs.Node = (*Dir)(nil)
|
||||
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 1
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ fs.NodeStringLookuper = (*Dir)(nil)
|
||||
|
||||
func (d *Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
if name == "clock" {
|
||||
return d.fs.clockFile, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
var dirDirs = []fuse.Dirent{
|
||||
{Inode: 2, Name: "clock", Type: fuse.DT_File},
|
||||
}
|
||||
|
||||
var _ fs.HandleReadDirAller = (*Dir)(nil)
|
||||
|
||||
func (d *Dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
return dirDirs, nil
|
||||
}
|
||||
|
||||
type File struct {
|
||||
fs.NodeRef
|
||||
fuse *fs.Server
|
||||
content atomic.Value
|
||||
count uint64
|
||||
}
|
||||
|
||||
var _ fs.Node = (*File)(nil)
|
||||
|
||||
func (f *File) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 2
|
||||
a.Mode = 0444
|
||||
t := f.content.Load().(string)
|
||||
a.Size = uint64(len(t))
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ fs.NodeOpener = (*File)(nil)
|
||||
|
||||
func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
||||
if !req.Flags.IsReadOnly() {
|
||||
return nil, fuse.Errno(syscall.EACCES)
|
||||
}
|
||||
resp.Flags |= fuse.OpenKeepCache
|
||||
return f, nil
|
||||
}
|
||||
|
||||
var _ fs.Handle = (*File)(nil)
|
||||
|
||||
var _ fs.HandleReader = (*File)(nil)
|
||||
|
||||
func (f *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||
t := f.content.Load().(string)
|
||||
fuseutil.HandleRead(req, resp, []byte(t))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) tick() {
|
||||
// Intentionally a variable-length format, to demonstrate size changes.
|
||||
f.count++
|
||||
s := fmt.Sprintf("%d\t%s\n", f.count, time.Now())
|
||||
f.content.Store(s)
|
||||
|
||||
// For simplicity, this example tries to send invalidate
|
||||
// notifications even when the kernel does not hold a reference to
|
||||
// the node, so be extra sure to ignore ErrNotCached.
|
||||
if err := f.fuse.InvalidateNodeData(f); err != nil && err != fuse.ErrNotCached {
|
||||
log.Printf("invalidate error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) update() {
|
||||
tick := time.NewTicker(1 * time.Second)
|
||||
defer tick.Stop()
|
||||
for range tick.C {
|
||||
f.tick()
|
||||
}
|
||||
}
|
101
vendor/bazil.org/fuse/examples/hellofs/hello.go
generated
vendored
101
vendor/bazil.org/fuse/examples/hellofs/hello.go
generated
vendored
|
@ -1,101 +0,0 @@
|
|||
// Hellofs implements a simple "hello world" file system.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
_ "bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
fmt.Fprintf(os.Stderr, " %s MOUNTPOINT\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() != 1 {
|
||||
usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
mountpoint := flag.Arg(0)
|
||||
|
||||
c, err := fuse.Mount(
|
||||
mountpoint,
|
||||
fuse.FSName("helloworld"),
|
||||
fuse.Subtype("hellofs"),
|
||||
fuse.LocalVolume(),
|
||||
fuse.VolumeName("Hello world!"),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
err = fs.Serve(c, FS{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// FS implements the hello world file system.
|
||||
type FS struct{}
|
||||
|
||||
func (FS) Root() (fs.Node, error) {
|
||||
return Dir{}, nil
|
||||
}
|
||||
|
||||
// Dir implements both Node and Handle for the root directory.
|
||||
type Dir struct{}
|
||||
|
||||
func (Dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 1
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
if name == "hello" {
|
||||
return File{}, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
var dirDirs = []fuse.Dirent{
|
||||
{Inode: 2, Name: "hello", Type: fuse.DT_File},
|
||||
}
|
||||
|
||||
func (Dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
return dirDirs, nil
|
||||
}
|
||||
|
||||
// File implements both Node and Handle for the hello file.
|
||||
type File struct{}
|
||||
|
||||
const greeting = "hello, world\n"
|
||||
|
||||
func (File) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 2
|
||||
a.Mode = 0444
|
||||
a.Size = uint64(len(greeting))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (File) ReadAll(ctx context.Context) ([]byte, error) {
|
||||
return []byte(greeting), nil
|
||||
}
|
54
vendor/bazil.org/fuse/fs/bench/bench_create_test.go
generated
vendored
54
vendor/bazil.org/fuse/fs/bench/bench_create_test.go
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
package bench_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type dummyFile struct {
|
||||
fstestutil.File
|
||||
}
|
||||
|
||||
type benchCreateDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeCreater = (*benchCreateDir)(nil)
|
||||
|
||||
func (f *benchCreateDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
child := &dummyFile{}
|
||||
return child, child, nil
|
||||
}
|
||||
|
||||
func BenchmarkCreate(b *testing.B) {
|
||||
f := &benchCreateDir{}
|
||||
mnt, err := fstestutil.MountedT(b, fstestutil.SimpleFS{f}, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// prepare file names to decrease test overhead
|
||||
names := make([]string, 0, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
// zero-padded so cost stays the same on every iteration
|
||||
names = append(names, mnt.Dir+"/"+fmt.Sprintf("%08x", i))
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
f, err := os.Create(names[i])
|
||||
if err != nil {
|
||||
b.Fatalf("WriteFile: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
}
|
42
vendor/bazil.org/fuse/fs/bench/bench_lookup_test.go
generated
vendored
42
vendor/bazil.org/fuse/fs/bench/bench_lookup_test.go
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
package bench_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
)
|
||||
|
||||
type benchLookupDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeRequestLookuper = (*benchLookupDir)(nil)
|
||||
|
||||
func (f *benchLookupDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
func BenchmarkLookup(b *testing.B) {
|
||||
f := &benchLookupDir{}
|
||||
mnt, err := fstestutil.MountedT(b, fstestutil.SimpleFS{f}, nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
name := mnt.Dir + "/does-not-exist"
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := os.Stat(name); !os.IsNotExist(err) {
|
||||
b.Fatalf("Stat: wrong error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
}
|
268
vendor/bazil.org/fuse/fs/bench/bench_readwrite_test.go
generated
vendored
268
vendor/bazil.org/fuse/fs/bench/bench_readwrite_test.go
generated
vendored
|
@ -1,268 +0,0 @@
|
|||
package bench_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type benchConfig struct {
|
||||
directIO bool
|
||||
}
|
||||
|
||||
type benchFS struct {
|
||||
conf *benchConfig
|
||||
}
|
||||
|
||||
var _ = fs.FS(benchFS{})
|
||||
|
||||
func (f benchFS) Root() (fs.Node, error) {
|
||||
return benchDir{conf: f.conf}, nil
|
||||
}
|
||||
|
||||
type benchDir struct {
|
||||
conf *benchConfig
|
||||
}
|
||||
|
||||
var _ = fs.Node(benchDir{})
|
||||
var _ = fs.NodeStringLookuper(benchDir{})
|
||||
var _ = fs.Handle(benchDir{})
|
||||
var _ = fs.HandleReadDirAller(benchDir{})
|
||||
|
||||
func (benchDir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 1
|
||||
a.Mode = os.ModeDir | 0555
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d benchDir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
if name == "bench" {
|
||||
return benchFile{conf: d.conf}, nil
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
func (benchDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
l := []fuse.Dirent{
|
||||
{Inode: 2, Name: "bench", Type: fuse.DT_File},
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
type benchFile struct {
|
||||
conf *benchConfig
|
||||
}
|
||||
|
||||
var _ = fs.Node(benchFile{})
|
||||
var _ = fs.NodeOpener(benchFile{})
|
||||
var _ = fs.NodeFsyncer(benchFile{})
|
||||
var _ = fs.Handle(benchFile{})
|
||||
var _ = fs.HandleReader(benchFile{})
|
||||
var _ = fs.HandleWriter(benchFile{})
|
||||
|
||||
func (benchFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = 2
|
||||
a.Mode = 0644
|
||||
a.Size = 9999999999999999
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f benchFile) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
||||
if f.conf.directIO {
|
||||
resp.Flags |= fuse.OpenDirectIO
|
||||
}
|
||||
// TODO configurable?
|
||||
resp.Flags |= fuse.OpenKeepCache
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (benchFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||
resp.Data = resp.Data[:cap(resp.Data)]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (benchFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
||||
resp.Size = len(req.Data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (benchFile) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func benchmark(b *testing.B, fn func(b *testing.B, mnt string), conf *benchConfig) {
|
||||
filesys := benchFS{
|
||||
conf: conf,
|
||||
}
|
||||
mnt, err := fstestutil.Mounted(filesys, nil,
|
||||
fuse.MaxReadahead(64*1024*1024),
|
||||
fuse.AsyncRead(),
|
||||
fuse.WritebackCache(),
|
||||
)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
fn(b, mnt.Dir)
|
||||
}
|
||||
|
||||
type zero struct{}
|
||||
|
||||
func (zero) Read(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
var Zero io.Reader = zero{}
|
||||
|
||||
func doWrites(size int64) func(b *testing.B, mnt string) {
|
||||
return func(b *testing.B, mnt string) {
|
||||
p := path.Join(mnt, "bench")
|
||||
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
b.Fatalf("create: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = io.CopyN(f, Zero, size)
|
||||
if err != nil {
|
||||
b.Fatalf("write: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWrite100(b *testing.B) {
|
||||
benchmark(b, doWrites(100), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWrite10MB(b *testing.B) {
|
||||
benchmark(b, doWrites(10*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWrite100MB(b *testing.B) {
|
||||
benchmark(b, doWrites(100*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkDirectWrite100(b *testing.B) {
|
||||
benchmark(b, doWrites(100), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectWrite10MB(b *testing.B) {
|
||||
benchmark(b, doWrites(10*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectWrite100MB(b *testing.B) {
|
||||
benchmark(b, doWrites(100*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func doWritesSync(size int64) func(b *testing.B, mnt string) {
|
||||
return func(b *testing.B, mnt string) {
|
||||
p := path.Join(mnt, "bench")
|
||||
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
b.Fatalf("create: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = io.CopyN(f, Zero, size)
|
||||
if err != nil {
|
||||
b.Fatalf("write: %v", err)
|
||||
}
|
||||
|
||||
if err := f.Sync(); err != nil {
|
||||
b.Fatalf("sync: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteSync100(b *testing.B) {
|
||||
benchmark(b, doWritesSync(100), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWriteSync10MB(b *testing.B) {
|
||||
benchmark(b, doWritesSync(10*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkWriteSync100MB(b *testing.B) {
|
||||
benchmark(b, doWritesSync(100*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func doReads(size int64) func(b *testing.B, mnt string) {
|
||||
return func(b *testing.B, mnt string) {
|
||||
p := path.Join(mnt, "bench")
|
||||
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
b.Fatalf("close: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
n, err := io.CopyN(ioutil.Discard, f, size)
|
||||
if err != nil {
|
||||
b.Fatalf("read: %v", err)
|
||||
}
|
||||
if n != size {
|
||||
b.Errorf("unexpected size: %d != %d", n, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRead100(b *testing.B) {
|
||||
benchmark(b, doReads(100), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkRead10MB(b *testing.B) {
|
||||
benchmark(b, doReads(10*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkRead100MB(b *testing.B) {
|
||||
benchmark(b, doReads(100*1024*1024), &benchConfig{})
|
||||
}
|
||||
|
||||
func BenchmarkDirectRead100(b *testing.B) {
|
||||
benchmark(b, doReads(100), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectRead10MB(b *testing.B) {
|
||||
benchmark(b, doReads(10*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkDirectRead100MB(b *testing.B) {
|
||||
benchmark(b, doReads(100*1024*1024), &benchConfig{
|
||||
directIO: true,
|
||||
})
|
||||
}
|
5
vendor/bazil.org/fuse/fs/bench/doc.go
generated
vendored
5
vendor/bazil.org/fuse/fs/bench/doc.go
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
// Package bench contains benchmarks.
|
||||
//
|
||||
// It is kept in a separate package to avoid conflicting with the
|
||||
// debug-heavy defaults for the actual tests.
|
||||
package bench
|
70
vendor/bazil.org/fuse/fs/fstestutil/checkdir.go
generated
vendored
70
vendor/bazil.org/fuse/fs/fstestutil/checkdir.go
generated
vendored
|
@ -1,70 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileInfoCheck is a function that validates an os.FileInfo according
|
||||
// to some criteria.
|
||||
type FileInfoCheck func(fi os.FileInfo) error
|
||||
|
||||
type checkDirError struct {
|
||||
missing map[string]struct{}
|
||||
extra map[string]os.FileMode
|
||||
}
|
||||
|
||||
func (e *checkDirError) Error() string {
|
||||
return fmt.Sprintf("wrong directory contents: missing %v, extra %v", e.missing, e.extra)
|
||||
}
|
||||
|
||||
// CheckDir checks the contents of the directory at path, making sure
|
||||
// every directory entry listed in want is present. If the check is
|
||||
// not nil, it must also pass.
|
||||
//
|
||||
// If want contains the impossible filename "", unexpected files are
|
||||
// checked with that. If the key is not in want, unexpected files are
|
||||
// an error.
|
||||
//
|
||||
// Missing entries, that are listed in want but not seen, are an
|
||||
// error.
|
||||
func CheckDir(path string, want map[string]FileInfoCheck) error {
|
||||
problems := &checkDirError{
|
||||
missing: make(map[string]struct{}, len(want)),
|
||||
extra: make(map[string]os.FileMode),
|
||||
}
|
||||
for k := range want {
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
problems.missing[k] = struct{}{}
|
||||
}
|
||||
|
||||
fis, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read directory: %v", err)
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
check, ok := want[fi.Name()]
|
||||
if !ok {
|
||||
check, ok = want[""]
|
||||
}
|
||||
if !ok {
|
||||
problems.extra[fi.Name()] = fi.Mode()
|
||||
continue
|
||||
}
|
||||
delete(problems.missing, fi.Name())
|
||||
if check != nil {
|
||||
if err := check(fi); err != nil {
|
||||
return fmt.Errorf("check failed: %v: %v", fi.Name(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(problems.missing) > 0 || len(problems.extra) > 0 {
|
||||
return problems
|
||||
}
|
||||
return nil
|
||||
}
|
65
vendor/bazil.org/fuse/fs/fstestutil/debug.go
generated
vendored
65
vendor/bazil.org/fuse/fs/fstestutil/debug.go
generated
vendored
|
@ -1,65 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
type flagDebug bool
|
||||
|
||||
var debug flagDebug
|
||||
|
||||
var _ = flag.Value(&debug)
|
||||
|
||||
func (f *flagDebug) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func nop(msg interface{}) {}
|
||||
|
||||
func (f *flagDebug) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*f = flagDebug(v)
|
||||
if v {
|
||||
fuse.Debug = logMsg
|
||||
} else {
|
||||
fuse.Debug = nop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *flagDebug) String() string {
|
||||
return strconv.FormatBool(bool(*f))
|
||||
}
|
||||
|
||||
func logMsg(msg interface{}) {
|
||||
log.Printf("FUSE: %s\n", msg)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Var(&debug, "fuse.debug", "log FUSE processing details")
|
||||
}
|
||||
|
||||
// DebugByDefault changes the default of the `-fuse.debug` flag to
|
||||
// true.
|
||||
//
|
||||
// This package registers a command line flag `-fuse.debug` and when
|
||||
// run with that flag (and activated inside the tests), logs FUSE
|
||||
// debug messages.
|
||||
//
|
||||
// This is disabled by default, as most callers probably won't care
|
||||
// about FUSE details. Use DebugByDefault for tests where you'd
|
||||
// normally be passing `-fuse.debug` all the time anyway.
|
||||
//
|
||||
// Call from an init function.
|
||||
func DebugByDefault() {
|
||||
f := flag.Lookup("fuse.debug")
|
||||
f.DefValue = "true"
|
||||
f.Value.Set(f.DefValue)
|
||||
}
|
1
vendor/bazil.org/fuse/fs/fstestutil/doc.go
generated
vendored
1
vendor/bazil.org/fuse/fs/fstestutil/doc.go
generated
vendored
|
@ -1 +0,0 @@
|
|||
package fstestutil // import "bazil.org/fuse/fs/fstestutil"
|
141
vendor/bazil.org/fuse/fs/fstestutil/mounted.go
generated
vendored
141
vendor/bazil.org/fuse/fs/fstestutil/mounted.go
generated
vendored
|
@ -1,141 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
)
|
||||
|
||||
// Mount contains information about the mount for the test to use.
|
||||
type Mount struct {
|
||||
// Dir is the temporary directory where the filesystem is mounted.
|
||||
Dir string
|
||||
|
||||
Conn *fuse.Conn
|
||||
Server *fs.Server
|
||||
|
||||
// Error will receive the return value of Serve.
|
||||
Error <-chan error
|
||||
|
||||
done <-chan struct{}
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Close unmounts the filesystem and waits for fs.Serve to return. Any
|
||||
// returned error will be stored in Err. It is safe to call Close
|
||||
// multiple times.
|
||||
func (mnt *Mount) Close() {
|
||||
if mnt.closed {
|
||||
return
|
||||
}
|
||||
mnt.closed = true
|
||||
for tries := 0; tries < 1000; tries++ {
|
||||
err := fuse.Unmount(mnt.Dir)
|
||||
if err != nil {
|
||||
// TODO do more than log?
|
||||
log.Printf("unmount error: %v", err)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
<-mnt.done
|
||||
mnt.Conn.Close()
|
||||
os.Remove(mnt.Dir)
|
||||
}
|
||||
|
||||
// MountedFunc mounts a filesystem at a temporary directory. The
|
||||
// filesystem used is constructed by calling a function, to allow
|
||||
// storing fuse.Conn and fs.Server in the FS.
|
||||
//
|
||||
// It also waits until the filesystem is known to be visible (OS X
|
||||
// workaround).
|
||||
//
|
||||
// After successful return, caller must clean up by calling Close.
|
||||
func MountedFunc(fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
dir, err := ioutil.TempDir("", "fusetest")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := fuse.Mount(dir, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server := fs.New(c, conf)
|
||||
done := make(chan struct{})
|
||||
serveErr := make(chan error, 1)
|
||||
mnt := &Mount{
|
||||
Dir: dir,
|
||||
Conn: c,
|
||||
Server: server,
|
||||
Error: serveErr,
|
||||
done: done,
|
||||
}
|
||||
filesys := fn(mnt)
|
||||
go func() {
|
||||
defer close(done)
|
||||
serveErr <- server.Serve(filesys)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-mnt.Conn.Ready:
|
||||
if err := mnt.Conn.MountError; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mnt, nil
|
||||
case err = <-mnt.Error:
|
||||
// Serve quit early
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New("Serve exited early")
|
||||
}
|
||||
}
|
||||
|
||||
// Mounted mounts the fuse.Server at a temporary directory.
|
||||
//
|
||||
// It also waits until the filesystem is known to be visible (OS X
|
||||
// workaround).
|
||||
//
|
||||
// After successful return, caller must clean up by calling Close.
|
||||
func Mounted(filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
fn := func(*Mount) fs.FS { return filesys }
|
||||
return MountedFunc(fn, conf, options...)
|
||||
}
|
||||
|
||||
// MountedFuncT mounts a filesystem at a temporary directory,
|
||||
// directing it's debug log to the testing logger.
|
||||
//
|
||||
// See MountedFunc for usage.
|
||||
//
|
||||
// The debug log is not enabled by default. Use `-fuse.debug` or call
|
||||
// DebugByDefault to enable.
|
||||
func MountedFuncT(t testing.TB, fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
if conf == nil {
|
||||
conf = &fs.Config{}
|
||||
}
|
||||
if debug && conf.Debug == nil {
|
||||
conf.Debug = func(msg interface{}) {
|
||||
t.Logf("FUSE: %s", msg)
|
||||
}
|
||||
}
|
||||
return MountedFunc(fn, conf, options...)
|
||||
}
|
||||
|
||||
// MountedT mounts the filesystem at a temporary directory,
|
||||
// directing it's debug log to the testing logger.
|
||||
//
|
||||
// See Mounted for usage.
|
||||
//
|
||||
// The debug log is not enabled by default. Use `-fuse.debug` or call
|
||||
// DebugByDefault to enable.
|
||||
func MountedT(t testing.TB, filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) {
|
||||
fn := func(*Mount) fs.FS { return filesys }
|
||||
return MountedFuncT(t, fn, conf, options...)
|
||||
}
|
26
vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go
generated
vendored
26
vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
// MountInfo describes a mounted file system.
|
||||
type MountInfo struct {
|
||||
FSName string
|
||||
Type string
|
||||
}
|
||||
|
||||
// GetMountInfo finds information about the mount at mnt. It is
|
||||
// intended for use by tests only, and only fetches information
|
||||
// relevant to the current tests.
|
||||
func GetMountInfo(mnt string) (*MountInfo, error) {
|
||||
return getMountInfo(mnt)
|
||||
}
|
||||
|
||||
// cstr converts a nil-terminated C string into a Go string
|
||||
func cstr(ca []int8) string {
|
||||
s := make([]byte, 0, len(ca))
|
||||
for _, c := range ca {
|
||||
if c == 0x00 {
|
||||
break
|
||||
}
|
||||
s = append(s, byte(c))
|
||||
}
|
||||
return string(s)
|
||||
}
|
29
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go
generated
vendored
29
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var re = regexp.MustCompile(`\\(.)`)
|
||||
|
||||
// unescape removes backslash-escaping. The escaped characters are not
|
||||
// mapped in any way; that is, unescape(`\n` ) == `n`.
|
||||
func unescape(s string) string {
|
||||
return re.ReplaceAllString(s, `$1`)
|
||||
}
|
||||
|
||||
func getMountInfo(mnt string) (*MountInfo, error) {
|
||||
var st syscall.Statfs_t
|
||||
err := syscall.Statfs(mnt, &st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i := &MountInfo{
|
||||
// osx getmntent(3) fails to un-escape the data, so we do it..
|
||||
// this might lead to double-unescaping in the future. fun.
|
||||
// TestMountOptionFSNameEvilBackslashDouble checks for that.
|
||||
FSName: unescape(cstr(st.Mntfromname[:])),
|
||||
}
|
||||
return i, nil
|
||||
}
|
7
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go
generated
vendored
7
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go
generated
vendored
|
@ -1,7 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
import "errors"
|
||||
|
||||
func getMountInfo(mnt string) (*MountInfo, error) {
|
||||
return nil, errors.New("FreeBSD has no useful mount information")
|
||||
}
|
51
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go
generated
vendored
51
vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go
generated
vendored
|
@ -1,51 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Linux /proc/mounts shows current mounts.
|
||||
// Same format as /etc/fstab. Quoting getmntent(3):
|
||||
//
|
||||
// Since fields in the mtab and fstab files are separated by whitespace,
|
||||
// octal escapes are used to represent the four characters space (\040),
|
||||
// tab (\011), newline (\012) and backslash (\134) in those files when
|
||||
// they occur in one of the four strings in a mntent structure.
|
||||
//
|
||||
// http://linux.die.net/man/3/getmntent
|
||||
|
||||
var fstabUnescape = strings.NewReplacer(
|
||||
`\040`, "\040",
|
||||
`\011`, "\011",
|
||||
`\012`, "\012",
|
||||
`\134`, "\134",
|
||||
)
|
||||
|
||||
var errNotFound = errors.New("mount not found")
|
||||
|
||||
func getMountInfo(mnt string) (*MountInfo, error) {
|
||||
data, err := ioutil.ReadFile("/proc/mounts")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, line := range strings.Split(string(data), "\n") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 3 {
|
||||
continue
|
||||
}
|
||||
// Fields are: fsname dir type opts freq passno
|
||||
fsname := fstabUnescape.Replace(fields[0])
|
||||
dir := fstabUnescape.Replace(fields[1])
|
||||
fstype := fstabUnescape.Replace(fields[2])
|
||||
if mnt == dir {
|
||||
info := &MountInfo{
|
||||
FSName: fsname,
|
||||
Type: fstype,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
return nil, errNotFound
|
||||
}
|
28
vendor/bazil.org/fuse/fs/fstestutil/record/buffer.go
generated
vendored
28
vendor/bazil.org/fuse/fs/fstestutil/record/buffer.go
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
package record
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer is like bytes.Buffer but safe to access from multiple
|
||||
// goroutines.
|
||||
type Buffer struct {
|
||||
mu sync.Mutex
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
var _ = io.Writer(&Buffer{})
|
||||
|
||||
func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.buf.Write(p)
|
||||
}
|
||||
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.buf.Bytes()
|
||||
}
|
409
vendor/bazil.org/fuse/fs/fstestutil/record/record.go
generated
vendored
409
vendor/bazil.org/fuse/fs/fstestutil/record/record.go
generated
vendored
|
@ -1,409 +0,0 @@
|
|||
package record // import "bazil.org/fuse/fs/fstestutil/record"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Writes gathers data from FUSE Write calls.
|
||||
type Writes struct {
|
||||
buf Buffer
|
||||
}
|
||||
|
||||
var _ = fs.HandleWriter(&Writes{})
|
||||
|
||||
func (w *Writes) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
||||
n, err := w.buf.Write(req.Data)
|
||||
resp.Size = n
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writes) RecordedWriteData() []byte {
|
||||
return w.buf.Bytes()
|
||||
}
|
||||
|
||||
// Counter records number of times a thing has occurred.
|
||||
type Counter struct {
|
||||
count uint32
|
||||
}
|
||||
|
||||
func (r *Counter) Inc() {
|
||||
atomic.AddUint32(&r.count, 1)
|
||||
}
|
||||
|
||||
func (r *Counter) Count() uint32 {
|
||||
return atomic.LoadUint32(&r.count)
|
||||
}
|
||||
|
||||
// MarkRecorder records whether a thing has occurred.
|
||||
type MarkRecorder struct {
|
||||
count Counter
|
||||
}
|
||||
|
||||
func (r *MarkRecorder) Mark() {
|
||||
r.count.Inc()
|
||||
}
|
||||
|
||||
func (r *MarkRecorder) Recorded() bool {
|
||||
return r.count.Count() > 0
|
||||
}
|
||||
|
||||
// Flushes notes whether a FUSE Flush call has been seen.
|
||||
type Flushes struct {
|
||||
rec MarkRecorder
|
||||
}
|
||||
|
||||
var _ = fs.HandleFlusher(&Flushes{})
|
||||
|
||||
func (r *Flushes) Flush(ctx context.Context, req *fuse.FlushRequest) error {
|
||||
r.rec.Mark()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Flushes) RecordedFlush() bool {
|
||||
return r.rec.Recorded()
|
||||
}
|
||||
|
||||
type Recorder struct {
|
||||
mu sync.Mutex
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// Record that we've seen value. A nil value is indistinguishable from
|
||||
// no value recorded.
|
||||
func (r *Recorder) Record(value interface{}) {
|
||||
r.mu.Lock()
|
||||
r.val = value
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func (r *Recorder) Recorded() interface{} {
|
||||
r.mu.Lock()
|
||||
val := r.val
|
||||
r.mu.Unlock()
|
||||
return val
|
||||
}
|
||||
|
||||
type RequestRecorder struct {
|
||||
rec Recorder
|
||||
}
|
||||
|
||||
// Record a fuse.Request, after zeroing header fields that are hard to
|
||||
// reproduce.
|
||||
//
|
||||
// Make sure to record a copy, not the original request.
|
||||
func (r *RequestRecorder) RecordRequest(req fuse.Request) {
|
||||
hdr := req.Hdr()
|
||||
*hdr = fuse.Header{}
|
||||
r.rec.Record(req)
|
||||
}
|
||||
|
||||
func (r *RequestRecorder) Recorded() fuse.Request {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return nil
|
||||
}
|
||||
return val.(fuse.Request)
|
||||
}
|
||||
|
||||
// Setattrs records a Setattr request and its fields.
|
||||
type Setattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeSetattrer(&Setattrs{})
|
||||
|
||||
func (r *Setattrs) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Setattrs) RecordedSetattr() fuse.SetattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.SetattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.SetattrRequest))
|
||||
}
|
||||
|
||||
// Fsyncs records an Fsync request and its fields.
|
||||
type Fsyncs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeFsyncer(&Fsyncs{})
|
||||
|
||||
func (r *Fsyncs) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Fsyncs) RecordedFsync() fuse.FsyncRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.FsyncRequest{}
|
||||
}
|
||||
return *(val.(*fuse.FsyncRequest))
|
||||
}
|
||||
|
||||
// Mkdirs records a Mkdir request and its fields.
|
||||
type Mkdirs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeMkdirer(&Mkdirs{})
|
||||
|
||||
// Mkdir records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Mkdirs) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedMkdir returns information about the Mkdir request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Mkdirs) RecordedMkdir() fuse.MkdirRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.MkdirRequest{}
|
||||
}
|
||||
return *(val.(*fuse.MkdirRequest))
|
||||
}
|
||||
|
||||
// Symlinks records a Symlink request and its fields.
|
||||
type Symlinks struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeSymlinker(&Symlinks{})
|
||||
|
||||
// Symlink records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Symlinks) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedSymlink returns information about the Symlink request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Symlinks) RecordedSymlink() fuse.SymlinkRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.SymlinkRequest{}
|
||||
}
|
||||
return *(val.(*fuse.SymlinkRequest))
|
||||
}
|
||||
|
||||
// Links records a Link request and its fields.
|
||||
type Links struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeLinker(&Links{})
|
||||
|
||||
// Link records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Links) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedLink returns information about the Link request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Links) RecordedLink() fuse.LinkRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.LinkRequest{}
|
||||
}
|
||||
return *(val.(*fuse.LinkRequest))
|
||||
}
|
||||
|
||||
// Mknods records a Mknod request and its fields.
|
||||
type Mknods struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeMknoder(&Mknods{})
|
||||
|
||||
// Mknod records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Mknods) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedMknod returns information about the Mknod request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Mknods) RecordedMknod() fuse.MknodRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.MknodRequest{}
|
||||
}
|
||||
return *(val.(*fuse.MknodRequest))
|
||||
}
|
||||
|
||||
// Opens records a Open request and its fields.
|
||||
type Opens struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeOpener(&Opens{})
|
||||
|
||||
// Open records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Opens) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedOpen returns information about the Open request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Opens) RecordedOpen() fuse.OpenRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.OpenRequest{}
|
||||
}
|
||||
return *(val.(*fuse.OpenRequest))
|
||||
}
|
||||
|
||||
// Getxattrs records a Getxattr request and its fields.
|
||||
type Getxattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeGetxattrer(&Getxattrs{})
|
||||
|
||||
// Getxattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Getxattrs) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return fuse.ErrNoXattr
|
||||
}
|
||||
|
||||
// RecordedGetxattr returns information about the Getxattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Getxattrs) RecordedGetxattr() fuse.GetxattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.GetxattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.GetxattrRequest))
|
||||
}
|
||||
|
||||
// Listxattrs records a Listxattr request and its fields.
|
||||
type Listxattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeListxattrer(&Listxattrs{})
|
||||
|
||||
// Listxattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Listxattrs) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return fuse.ErrNoXattr
|
||||
}
|
||||
|
||||
// RecordedListxattr returns information about the Listxattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Listxattrs) RecordedListxattr() fuse.ListxattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.ListxattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.ListxattrRequest))
|
||||
}
|
||||
|
||||
// Setxattrs records a Setxattr request and its fields.
|
||||
type Setxattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeSetxattrer(&Setxattrs{})
|
||||
|
||||
// Setxattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Setxattrs) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
|
||||
tmp := *req
|
||||
// The byte slice points to memory that will be reused, so make a
|
||||
// deep copy.
|
||||
tmp.Xattr = append([]byte(nil), req.Xattr...)
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordedSetxattr returns information about the Setxattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Setxattrs) RecordedSetxattr() fuse.SetxattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.SetxattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.SetxattrRequest))
|
||||
}
|
||||
|
||||
// Removexattrs records a Removexattr request and its fields.
|
||||
type Removexattrs struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeRemovexattrer(&Removexattrs{})
|
||||
|
||||
// Removexattr records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Removexattrs) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordedRemovexattr returns information about the Removexattr request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Removexattrs) RecordedRemovexattr() fuse.RemovexattrRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.RemovexattrRequest{}
|
||||
}
|
||||
return *(val.(*fuse.RemovexattrRequest))
|
||||
}
|
||||
|
||||
// Creates records a Create request and its fields.
|
||||
type Creates struct {
|
||||
rec RequestRecorder
|
||||
}
|
||||
|
||||
var _ = fs.NodeCreater(&Creates{})
|
||||
|
||||
// Create records the request and returns an error. Most callers should
|
||||
// wrap this call in a function that returns a more useful result.
|
||||
func (r *Creates) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
tmp := *req
|
||||
r.rec.RecordRequest(&tmp)
|
||||
return nil, nil, fuse.EIO
|
||||
}
|
||||
|
||||
// RecordedCreate returns information about the Create request.
|
||||
// If no request was seen, returns a zero value.
|
||||
func (r *Creates) RecordedCreate() fuse.CreateRequest {
|
||||
val := r.rec.Recorded()
|
||||
if val == nil {
|
||||
return fuse.CreateRequest{}
|
||||
}
|
||||
return *(val.(*fuse.CreateRequest))
|
||||
}
|
55
vendor/bazil.org/fuse/fs/fstestutil/record/wait.go
generated
vendored
55
vendor/bazil.org/fuse/fs/fstestutil/record/wait.go
generated
vendored
|
@ -1,55 +0,0 @@
|
|||
package record
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type nothing struct{}
|
||||
|
||||
// ReleaseWaiter notes whether a FUSE Release call has been seen.
|
||||
//
|
||||
// Releases are not guaranteed to happen synchronously with any client
|
||||
// call, so they must be waited for.
|
||||
type ReleaseWaiter struct {
|
||||
once sync.Once
|
||||
seen chan nothing
|
||||
}
|
||||
|
||||
var _ = fs.HandleReleaser(&ReleaseWaiter{})
|
||||
|
||||
func (r *ReleaseWaiter) init() {
|
||||
r.once.Do(func() {
|
||||
r.seen = make(chan nothing, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func (r *ReleaseWaiter) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
|
||||
r.init()
|
||||
close(r.seen)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForRelease waits for Release to be called.
|
||||
//
|
||||
// With zero duration, wait forever. Otherwise, timeout early
|
||||
// in a more controller way than `-test.timeout`.
|
||||
//
|
||||
// Returns whether a Release was seen. Always true if dur==0.
|
||||
func (r *ReleaseWaiter) WaitForRelease(dur time.Duration) bool {
|
||||
r.init()
|
||||
var timeout <-chan time.Time
|
||||
if dur > 0 {
|
||||
timeout = time.After(dur)
|
||||
}
|
||||
select {
|
||||
case <-r.seen:
|
||||
return true
|
||||
case <-timeout:
|
||||
return false
|
||||
}
|
||||
}
|
55
vendor/bazil.org/fuse/fs/fstestutil/testfs.go
generated
vendored
55
vendor/bazil.org/fuse/fs/fstestutil/testfs.go
generated
vendored
|
@ -1,55 +0,0 @@
|
|||
package fstestutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// SimpleFS is a trivial FS that just implements the Root method.
|
||||
type SimpleFS struct {
|
||||
Node fs.Node
|
||||
}
|
||||
|
||||
var _ = fs.FS(SimpleFS{})
|
||||
|
||||
func (f SimpleFS) Root() (fs.Node, error) {
|
||||
return f.Node, nil
|
||||
}
|
||||
|
||||
// File can be embedded in a struct to make it look like a file.
|
||||
type File struct{}
|
||||
|
||||
func (f File) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = 0666
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dir can be embedded in a struct to make it look like a directory.
|
||||
type Dir struct{}
|
||||
|
||||
func (f Dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0777
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChildMap is a directory with child nodes looked up from a map.
|
||||
type ChildMap map[string]fs.Node
|
||||
|
||||
var _ = fs.Node(&ChildMap{})
|
||||
var _ = fs.NodeStringLookuper(&ChildMap{})
|
||||
|
||||
func (f *ChildMap) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = os.ModeDir | 0777
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ChildMap) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
child, ok := (*f)[name]
|
||||
if !ok {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
return child, nil
|
||||
}
|
67
vendor/bazil.org/fuse/fs/helpers_test.go
generated
vendored
67
vendor/bazil.org/fuse/fs/helpers_test.go
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
package fs_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var childHelpers = map[string]func(){}
|
||||
|
||||
type childProcess struct {
|
||||
name string
|
||||
fn func()
|
||||
}
|
||||
|
||||
var _ flag.Value = (*childProcess)(nil)
|
||||
|
||||
func (c *childProcess) String() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
func (c *childProcess) Set(s string) error {
|
||||
fn, ok := childHelpers[s]
|
||||
if !ok {
|
||||
return errors.New("helper not found")
|
||||
}
|
||||
c.name = s
|
||||
c.fn = fn
|
||||
return nil
|
||||
}
|
||||
|
||||
var childMode childProcess
|
||||
|
||||
func init() {
|
||||
flag.Var(&childMode, "fuse.internal.child", "internal use only")
|
||||
}
|
||||
|
||||
// childCmd prepares a test function to be run in a subprocess, with
|
||||
// childMode set to true. Caller must still call Run or Start.
|
||||
//
|
||||
// Re-using the test executable as the subprocess is useful because
|
||||
// now test executables can e.g. be cross-compiled, transferred
|
||||
// between hosts, and run in settings where the whole Go development
|
||||
// environment is not installed.
|
||||
func childCmd(childName string) (*exec.Cmd, error) {
|
||||
// caller may set cwd, so we can't rely on relative paths
|
||||
executable, err := filepath.Abs(os.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd := exec.Command(executable, "-fuse.internal.child="+childName)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
if childMode.fn != nil {
|
||||
childMode.fn()
|
||||
os.Exit(0)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
30
vendor/bazil.org/fuse/fs/serve_darwin_test.go
generated
vendored
30
vendor/bazil.org/fuse/fs/serve_darwin_test.go
generated
vendored
|
@ -1,30 +0,0 @@
|
|||
package fs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type exchangeData struct {
|
||||
fstestutil.File
|
||||
// this struct cannot be zero size or multiple instances may look identical
|
||||
_ int
|
||||
}
|
||||
|
||||
func TestExchangeDataNotSupported(t *testing.T) {
|
||||
t.Parallel()
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{&fstestutil.ChildMap{
|
||||
"one": &exchangeData{},
|
||||
"two": &exchangeData{},
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
if err := unix.Exchangedata(mnt.Dir+"/one", mnt.Dir+"/two", 0); err != unix.ENOTSUP {
|
||||
t.Fatalf("expected ENOTSUP from exchangedata: %v", err)
|
||||
}
|
||||
}
|
2843
vendor/bazil.org/fuse/fs/serve_test.go
generated
vendored
2843
vendor/bazil.org/fuse/fs/serve_test.go
generated
vendored
File diff suppressed because it is too large
Load diff
1
vendor/bazil.org/fuse/fuse.go
generated
vendored
1
vendor/bazil.org/fuse/fuse.go
generated
vendored
|
@ -1262,6 +1262,7 @@ func (r *StatfsRequest) Respond(resp *StatfsResponse) {
|
|||
Bfree: resp.Bfree,
|
||||
Bavail: resp.Bavail,
|
||||
Files: resp.Files,
|
||||
Ffree: resp.Ffree,
|
||||
Bsize: resp.Bsize,
|
||||
Namelen: resp.Namelen,
|
||||
Frsize: resp.Frsize,
|
||||
|
|
63
vendor/bazil.org/fuse/fuse_kernel_test.go
generated
vendored
63
vendor/bazil.org/fuse/fuse_kernel_test.go
generated
vendored
|
@ -1,63 +0,0 @@
|
|||
package fuse_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
)
|
||||
|
||||
func TestOpenFlagsAccmodeMaskReadWrite(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC)
|
||||
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadWrite; g != e {
|
||||
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
|
||||
}
|
||||
if f.IsReadOnly() {
|
||||
t.Fatalf("IsReadOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsWriteOnly() {
|
||||
t.Fatalf("IsWriteOnly is wrong: %v", f)
|
||||
}
|
||||
if !f.IsReadWrite() {
|
||||
t.Fatalf("IsReadWrite is wrong: %v", f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenFlagsAccmodeMaskReadOnly(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_RDONLY | os.O_SYNC)
|
||||
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenReadOnly; g != e {
|
||||
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
|
||||
}
|
||||
if !f.IsReadOnly() {
|
||||
t.Fatalf("IsReadOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsWriteOnly() {
|
||||
t.Fatalf("IsWriteOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsReadWrite() {
|
||||
t.Fatalf("IsReadWrite is wrong: %v", f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenFlagsAccmodeMaskWriteOnly(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_WRONLY | os.O_SYNC)
|
||||
if g, e := f&fuse.OpenAccessModeMask, fuse.OpenWriteOnly; g != e {
|
||||
t.Fatalf("OpenAccessModeMask behaves wrong: %v: %o != %o", f, g, e)
|
||||
}
|
||||
if f.IsReadOnly() {
|
||||
t.Fatalf("IsReadOnly is wrong: %v", f)
|
||||
}
|
||||
if !f.IsWriteOnly() {
|
||||
t.Fatalf("IsWriteOnly is wrong: %v", f)
|
||||
}
|
||||
if f.IsReadWrite() {
|
||||
t.Fatalf("IsReadWrite is wrong: %v", f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenFlagsString(t *testing.T) {
|
||||
var f = fuse.OpenFlags(os.O_RDWR | os.O_SYNC | os.O_APPEND)
|
||||
if g, e := f.String(), "OpenReadWrite+OpenAppend+OpenSync"; g != e {
|
||||
t.Fatalf("OpenFlags.String: %q != %q", g, e)
|
||||
}
|
||||
}
|
64
vendor/bazil.org/fuse/options_daemon_timeout_test.go
generated
vendored
64
vendor/bazil.org/fuse/options_daemon_timeout_test.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
// Test for adjustable timeout between a FUSE request and the daemon's response.
|
||||
//
|
||||
// +build darwin freebsd
|
||||
|
||||
package fuse_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type slowCreaterDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeCreater = slowCreaterDir{}
|
||||
|
||||
func (c slowCreaterDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
time.Sleep(10 * time.Second)
|
||||
// pick a really distinct error, to identify it later
|
||||
return nil, nil, fuse.Errno(syscall.ENAMETOOLONG)
|
||||
}
|
||||
|
||||
func TestMountOptionDaemonTimeout(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" && runtime.GOOS != "freebsd" {
|
||||
return
|
||||
}
|
||||
if testing.Short() {
|
||||
t.Skip("skipping time-based test in short mode")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
mnt, err := fstestutil.MountedT(t,
|
||||
fstestutil.SimpleFS{slowCreaterDir{}},
|
||||
nil,
|
||||
fuse.DaemonTimeout("2"),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// This should fail by the kernel timing out the request.
|
||||
f, err := os.Create(mnt.Dir + "/child")
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
perr, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %T: %v", err, err)
|
||||
}
|
||||
if perr.Err == syscall.ENAMETOOLONG {
|
||||
t.Fatalf("expected other than ENAMETOOLONG, got %T: %v", err, err)
|
||||
}
|
||||
}
|
10
vendor/bazil.org/fuse/options_helper_test.go
generated
vendored
10
vendor/bazil.org/fuse/options_helper_test.go
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
package fuse
|
||||
|
||||
// for TestMountOptionCommaError
|
||||
func ForTestSetMountOption(k, v string) MountOption {
|
||||
fn := func(conf *mountConfig) error {
|
||||
conf.options[k] = v
|
||||
return nil
|
||||
}
|
||||
return fn
|
||||
}
|
31
vendor/bazil.org/fuse/options_nocomma_test.go
generated
vendored
31
vendor/bazil.org/fuse/options_nocomma_test.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
// This file contains tests for platforms that have no escape
|
||||
// mechanism for including commas in mount options.
|
||||
//
|
||||
// +build darwin
|
||||
|
||||
package fuse_test
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
)
|
||||
|
||||
func TestMountOptionCommaError(t *testing.T) {
|
||||
t.Parallel()
|
||||
// this test is not tied to any specific option, it just needs
|
||||
// some string content
|
||||
var evil = "FuseTest,Marker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.ForTestSetMountOption("fusetest", evil),
|
||||
)
|
||||
if err == nil {
|
||||
mnt.Close()
|
||||
t.Fatal("expected an error about commas")
|
||||
}
|
||||
if g, e := err.Error(), `mount options cannot contain commas on `+runtime.GOOS+`: "fusetest"="FuseTest,Marker"`; g != e {
|
||||
t.Fatalf("wrong error: %q != %q", g, e)
|
||||
}
|
||||
}
|
231
vendor/bazil.org/fuse/options_test.go
generated
vendored
231
vendor/bazil.org/fuse/options_test.go
generated
vendored
|
@ -1,231 +0,0 @@
|
|||
package fuse_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"bazil.org/fuse/fs/fstestutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstestutil.DebugByDefault()
|
||||
}
|
||||
|
||||
func TestMountOptionFSName(t *testing.T) {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support FSName")
|
||||
}
|
||||
t.Parallel()
|
||||
const name = "FuseTestMarker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.FSName(name),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
info, err := fstestutil.GetMountInfo(mnt.Dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := info.FSName, name; g != e {
|
||||
t.Errorf("wrong FSName: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func testMountOptionFSNameEvil(t *testing.T, evil string) {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support FSName")
|
||||
}
|
||||
t.Parallel()
|
||||
var name = "FuseTest" + evil + "Marker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.FSName(name),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
info, err := fstestutil.GetMountInfo(mnt.Dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := info.FSName, name; g != e {
|
||||
t.Errorf("wrong FSName: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilComma(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
// see TestMountOptionCommaError for a test that enforces we
|
||||
// at least give a nice error, instead of corrupting the mount
|
||||
// options
|
||||
t.Skip("TODO: OS X gets this wrong, commas in mount options cannot be escaped at all")
|
||||
}
|
||||
testMountOptionFSNameEvil(t, ",")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilSpace(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, " ")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilTab(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, "\t")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilNewline(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, "\n")
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilBackslash(t *testing.T) {
|
||||
testMountOptionFSNameEvil(t, `\`)
|
||||
}
|
||||
|
||||
func TestMountOptionFSNameEvilBackslashDouble(t *testing.T) {
|
||||
// catch double-unescaping, if it were to happen
|
||||
testMountOptionFSNameEvil(t, `\\`)
|
||||
}
|
||||
|
||||
func TestMountOptionSubtype(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
t.Skip("OS X does not support Subtype")
|
||||
}
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support Subtype")
|
||||
}
|
||||
t.Parallel()
|
||||
const name = "FuseTestMarker"
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.Subtype(name),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
info, err := fstestutil.GetMountInfo(mnt.Dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := info.Type, "fuse."+name; g != e {
|
||||
t.Errorf("wrong Subtype: %q != %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO test LocalVolume
|
||||
|
||||
// TODO test AllowOther; hard because needs system-level authorization
|
||||
|
||||
func TestMountOptionAllowOtherThenAllowRoot(t *testing.T) {
|
||||
t.Parallel()
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.AllowOther(),
|
||||
fuse.AllowRoot(),
|
||||
)
|
||||
if err == nil {
|
||||
mnt.Close()
|
||||
}
|
||||
if g, e := err, fuse.ErrCannotCombineAllowOtherAndAllowRoot; g != e {
|
||||
t.Fatalf("wrong error: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO test AllowRoot; hard because needs system-level authorization
|
||||
|
||||
func TestMountOptionAllowRootThenAllowOther(t *testing.T) {
|
||||
t.Parallel()
|
||||
mnt, err := fstestutil.MountedT(t, fstestutil.SimpleFS{fstestutil.Dir{}}, nil,
|
||||
fuse.AllowRoot(),
|
||||
fuse.AllowOther(),
|
||||
)
|
||||
if err == nil {
|
||||
mnt.Close()
|
||||
}
|
||||
if g, e := err, fuse.ErrCannotCombineAllowOtherAndAllowRoot; g != e {
|
||||
t.Fatalf("wrong error: %v != %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
type unwritableFile struct{}
|
||||
|
||||
func (f unwritableFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Mode = 0000
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestMountOptionDefaultPermissions(t *testing.T) {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
t.Skip("FreeBSD does not support DefaultPermissions")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
mnt, err := fstestutil.MountedT(t,
|
||||
fstestutil.SimpleFS{
|
||||
&fstestutil.ChildMap{"child": unwritableFile{}},
|
||||
},
|
||||
nil,
|
||||
fuse.DefaultPermissions(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// This will be prevented by kernel-level access checking when
|
||||
// DefaultPermissions is used.
|
||||
f, err := os.OpenFile(mnt.Dir+"/child", os.O_WRONLY, 0000)
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if !os.IsPermission(err) {
|
||||
t.Fatalf("expected a permission error, got %T: %v", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
type createrDir struct {
|
||||
fstestutil.Dir
|
||||
}
|
||||
|
||||
var _ fs.NodeCreater = createrDir{}
|
||||
|
||||
func (createrDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
// pick a really distinct error, to identify it later
|
||||
return nil, nil, fuse.Errno(syscall.ENAMETOOLONG)
|
||||
}
|
||||
|
||||
func TestMountOptionReadOnly(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
mnt, err := fstestutil.MountedT(t,
|
||||
fstestutil.SimpleFS{createrDir{}},
|
||||
nil,
|
||||
fuse.ReadOnly(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer mnt.Close()
|
||||
|
||||
// This will be prevented by kernel-level access checking when
|
||||
// ReadOnly is used.
|
||||
f, err := os.Create(mnt.Dir + "/child")
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
perr, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %T: %v", err, err)
|
||||
}
|
||||
if perr.Err != syscall.EROFS {
|
||||
t.Fatalf("expected EROFS, got %T: %v", err, err)
|
||||
}
|
||||
}
|
13
vendor/bazil.org/fuse/syscallx/doc.go
generated
vendored
13
vendor/bazil.org/fuse/syscallx/doc.go
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
// Package syscallx provides wrappers that make syscalls on various
|
||||
// platforms more interoperable.
|
||||
//
|
||||
// The API intentionally omits the OS X-specific position and option
|
||||
// arguments for extended attribute calls.
|
||||
//
|
||||
// Not having position means it might not be useful for accessing the
|
||||
// resource fork. If that's needed by code inside fuse, a function
|
||||
// with a different name may be added on the side.
|
||||
//
|
||||
// Options can be implemented with separate wrappers, in the style of
|
||||
// Linux getxattr/lgetxattr/fgetxattr.
|
||||
package syscallx // import "bazil.org/fuse/syscallx"
|
34
vendor/bazil.org/fuse/syscallx/generate
generated
vendored
34
vendor/bazil.org/fuse/syscallx/generate
generated
vendored
|
@ -1,34 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
mksys="$(go env GOROOT)/src/pkg/syscall/mksyscall.pl"
|
||||
|
||||
fix() {
|
||||
sed 's,^package syscall$,&x\nimport "syscall",' \
|
||||
| gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \
|
||||
| gofmt -r='Syscall6 -> syscall.Syscall6' \
|
||||
| gofmt -r='Syscall -> syscall.Syscall' \
|
||||
| gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \
|
||||
| gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \
|
||||
| gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \
|
||||
| gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \
|
||||
| gofmt -r='SYS_MSYNC -> syscall.SYS_MSYNC'
|
||||
}
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
$mksys xattr_darwin.go \
|
||||
| fix \
|
||||
>xattr_darwin_amd64.go
|
||||
|
||||
$mksys -l32 xattr_darwin.go \
|
||||
| fix \
|
||||
>xattr_darwin_386.go
|
||||
|
||||
$mksys msync.go \
|
||||
| fix \
|
||||
>msync_amd64.go
|
||||
|
||||
$mksys -l32 msync.go \
|
||||
| fix \
|
||||
>msync_386.go
|
9
vendor/bazil.org/fuse/syscallx/msync.go
generated
vendored
9
vendor/bazil.org/fuse/syscallx/msync.go
generated
vendored
|
@ -1,9 +0,0 @@
|
|||
package syscallx
|
||||
|
||||
/* This is the source file for msync_*.go, to regenerate run
|
||||
|
||||
./generate
|
||||
|
||||
*/
|
||||
|
||||
//sys Msync(b []byte, flags int) (err error)
|
24
vendor/bazil.org/fuse/syscallx/msync_386.go
generated
vendored
24
vendor/bazil.org/fuse/syscallx/msync_386.go
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
// mksyscall.pl -l32 msync.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Msync(b []byte, flags int) (err error) {
|
||||
var _p0 unsafe.Pointer
|
||||
if len(b) > 0 {
|
||||
_p0 = unsafe.Pointer(&b[0])
|
||||
} else {
|
||||
_p0 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
24
vendor/bazil.org/fuse/syscallx/msync_amd64.go
generated
vendored
24
vendor/bazil.org/fuse/syscallx/msync_amd64.go
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
// mksyscall.pl msync.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func Msync(b []byte, flags int) (err error) {
|
||||
var _p0 unsafe.Pointer
|
||||
if len(b) > 0 {
|
||||
_p0 = unsafe.Pointer(&b[0])
|
||||
} else {
|
||||
_p0 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
4
vendor/bazil.org/fuse/syscallx/syscallx.go
generated
vendored
4
vendor/bazil.org/fuse/syscallx/syscallx.go
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
package syscallx
|
||||
|
||||
// make us look more like package syscall, so mksyscall.pl output works
|
||||
var _zero uintptr
|
26
vendor/bazil.org/fuse/syscallx/syscallx_std.go
generated
vendored
26
vendor/bazil.org/fuse/syscallx/syscallx_std.go
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
// +build !darwin
|
||||
|
||||
package syscallx
|
||||
|
||||
// This file just contains wrappers for platforms that already have
|
||||
// the right stuff in golang.org/x/sys/unix.
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
return unix.Getxattr(path, attr, dest)
|
||||
}
|
||||
|
||||
func Listxattr(path string, dest []byte) (sz int, err error) {
|
||||
return unix.Listxattr(path, dest)
|
||||
}
|
||||
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return unix.Setxattr(path, attr, data, flags)
|
||||
}
|
||||
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return unix.Removexattr(path, attr)
|
||||
}
|
38
vendor/bazil.org/fuse/syscallx/xattr_darwin.go
generated
vendored
38
vendor/bazil.org/fuse/syscallx/xattr_darwin.go
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
package syscallx
|
||||
|
||||
/* This is the source file for syscallx_darwin_*.go, to regenerate run
|
||||
|
||||
./generate
|
||||
|
||||
*/
|
||||
|
||||
// cannot use dest []byte here because OS X getxattr really wants a
|
||||
// NULL to trigger size probing, size==0 is not enough
|
||||
//
|
||||
//sys getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error)
|
||||
|
||||
func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
|
||||
var destp *byte
|
||||
if len(dest) > 0 {
|
||||
destp = &dest[0]
|
||||
}
|
||||
return getxattr(path, attr, destp, len(dest), 0, 0)
|
||||
}
|
||||
|
||||
//sys listxattr(path string, dest []byte, options int) (sz int, err error)
|
||||
|
||||
func Listxattr(path string, dest []byte) (sz int, err error) {
|
||||
return listxattr(path, dest, 0)
|
||||
}
|
||||
|
||||
//sys setxattr(path string, attr string, data []byte, position uint32, flags int) (err error)
|
||||
|
||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
||||
return setxattr(path, attr, data, 0, flags)
|
||||
}
|
||||
|
||||
//sys removexattr(path string, attr string, options int) (err error)
|
||||
|
||||
func Removexattr(path string, attr string) (err error) {
|
||||
return removexattr(path, attr, 0)
|
||||
}
|
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_386.go
generated
vendored
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_386.go
generated
vendored
|
@ -1,97 +0,0 @@
|
|||
// mksyscall.pl -l32 xattr_darwin.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func listxattr(path string, dest []byte, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func removexattr(path string, attr string, options int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_amd64.go
generated
vendored
97
vendor/bazil.org/fuse/syscallx/xattr_darwin_amd64.go
generated
vendored
|
@ -1,97 +0,0 @@
|
|||
// mksyscall.pl xattr_darwin.go
|
||||
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
|
||||
|
||||
package syscallx
|
||||
|
||||
import "syscall"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options))
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func listxattr(path string, dest []byte, options int) (sz int, err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 unsafe.Pointer
|
||||
if len(dest) > 0 {
|
||||
_p1 = unsafe.Pointer(&dest[0])
|
||||
} else {
|
||||
_p1 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
|
||||
sz = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func setxattr(path string, attr string, data []byte, position uint32, flags int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p2 unsafe.Pointer
|
||||
if len(data) > 0 {
|
||||
_p2 = unsafe.Pointer(&data[0])
|
||||
} else {
|
||||
_p2 = unsafe.Pointer(&_zero)
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(position), uintptr(flags))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func removexattr(path string, attr string, options int) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *byte
|
||||
_p1, err = syscall.BytePtrFromString(attr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
21
vendor/cloud.google.com/go/.travis.yml
generated
vendored
21
vendor/cloud.google.com/go/.travis.yml
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
install:
|
||||
- go get -v cloud.google.com/go/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d
|
||||
- tar xvf keys.tar
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
|
||||
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
|
||||
./run-tests.sh $TRAVIS_COMMIT
|
||||
env:
|
||||
matrix:
|
||||
# The GCLOUD_TESTS_API_KEY environment variable.
|
||||
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI=
|
152
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
152
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -1,152 +0,0 @@
|
|||
# Contributing
|
||||
|
||||
1. Sign one of the contributor license agreements below.
|
||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
||||
1. You will need to ensure that your `GOBIN` directory (by default
|
||||
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
|
||||
1. If you would like, you may want to set up aliases for git-codereview,
|
||||
such that `git codereview change` becomes `git change`. See the
|
||||
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
|
||||
1. Should you run into issues with the git-codereview tool, please note
|
||||
that all error messages will assume that you have set up these
|
||||
aliases.
|
||||
1. Get the cloud package by running `go get -d cloud.google.com/go`.
|
||||
1. If you have already checked out the source, make sure that the remote git
|
||||
origin is https://code.googlesource.com/gocloud:
|
||||
|
||||
git remote set-url origin https://code.googlesource.com/gocloud
|
||||
1. Make sure your auth is configured correctly by visiting
|
||||
https://code.googlesource.com, clicking "Generate Password", and following
|
||||
the directions.
|
||||
1. Make changes and create a change by running `git codereview change <name>`,
|
||||
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
|
||||
1. Keep amending to the change with `git codereview change` and mail as your receive
|
||||
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit.
|
||||
|
||||
## Integration Tests
|
||||
|
||||
In addition to the unit tests, you may run the integration test suite.
|
||||
|
||||
To run the integrations tests, creating and configuration of a project in the
|
||||
Google Developers Console is required.
|
||||
|
||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
||||
Ensure the project-level **Owner**
|
||||
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
|
||||
service account. Alternatively, the account can be granted all of the following roles:
|
||||
- **Editor**
|
||||
- **Logs Configuration Writer**
|
||||
- **PubSub Admin**
|
||||
|
||||
Once you create a project, set the following environment variables to be able to
|
||||
run the against the actual APIs.
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
- **GCLOUD_TESTS_API_KEY**: Your API key.
|
||||
|
||||
Firestore requires a different project and key:
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID
|
||||
supporting Firestore
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create some resources used in integration tests.
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Set the default project in your env.
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticate the gcloud tool with your account.
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes used in the datastore integration tests.
|
||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
||||
|
||||
# Create a Google Cloud storage bucket with the same name as your test project,
|
||||
# and with the Stackdriver Logging service account as owner, for the sink
|
||||
# integration tests in logging.
|
||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Create a PubSub topic for integration tests of storage notifications.
|
||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
||||
|
||||
# Create a Spanner instance for the spanner integration tests.
|
||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
|
||||
# the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
|
||||
|
||||
```
|
||||
|
||||
Once you've set the environment variables, you can run the integration tests by
|
||||
running:
|
||||
|
||||
``` sh
|
||||
$ go test -v cloud.google.com/go/...
|
||||
```
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your
|
||||
work**, then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
1
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
1
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
|
@ -22,6 +22,7 @@ David Symonds <dsymonds@golang.org>
|
|||
Filippo Valsorda <hi@filippo.io>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
James Hall <james.hall@shopify.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Kunpei Sakai <namusyaka@gmail.com>
|
||||
|
|
54
vendor/cloud.google.com/go/MIGRATION.md
generated
vendored
54
vendor/cloud.google.com/go/MIGRATION.md
generated
vendored
|
@ -1,54 +0,0 @@
|
|||
# Code Changes
|
||||
|
||||
## v0.10.0
|
||||
|
||||
- pubsub: Replace
|
||||
|
||||
```
|
||||
sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
|
||||
```
|
||||
|
||||
with
|
||||
|
||||
```
|
||||
sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
|
||||
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
|
||||
})
|
||||
```
|
||||
|
||||
- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
|
||||
Given an initialized `*trace.Client` named `tc`, instead of
|
||||
|
||||
```
|
||||
s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
|
||||
```
|
||||
|
||||
write
|
||||
|
||||
```
|
||||
s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
|
||||
```
|
||||
|
||||
- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
|
||||
Instead of
|
||||
|
||||
```
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
|
||||
```
|
||||
|
||||
write
|
||||
|
||||
```
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
|
||||
```
|
||||
|
||||
- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
|
||||
interceptor as a dial option as shown below when initializing Cloud package
|
||||
clients:
|
||||
|
||||
```
|
||||
c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
|
||||
if err != nil {
|
||||
...
|
||||
}
|
||||
```
|
570
vendor/cloud.google.com/go/README.md
generated
vendored
570
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -1,570 +0,0 @@
|
|||
# Google Cloud Client Libraries for Go
|
||||
|
||||
[](https://godoc.org/cloud.google.com/go)
|
||||
|
||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
||||
|
||||
``` go
|
||||
import "cloud.google.com/go"
|
||||
```
|
||||
|
||||
To install the packages on your system,
|
||||
|
||||
```
|
||||
$ go get -u cloud.google.com/go/...
|
||||
```
|
||||
|
||||
**NOTE:** Some of these packages are under development, and may occasionally
|
||||
make backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
* [News](#news)
|
||||
* [Supported APIs](#supported-apis)
|
||||
* [Go Versions Supported](#go-versions-supported)
|
||||
* [Authorization](#authorization)
|
||||
* [Cloud Datastore](#cloud-datastore-)
|
||||
* [Cloud Storage](#cloud-storage-)
|
||||
* [Cloud Pub/Sub](#cloud-pub-sub-)
|
||||
* [Cloud BigQuery](#cloud-bigquery-)
|
||||
* [Stackdriver Logging](#stackdriver-logging-)
|
||||
* [Cloud Spanner](#cloud-spanner-)
|
||||
|
||||
|
||||
## News
|
||||
|
||||
_March 22, 2018_
|
||||
|
||||
*v0.20.*
|
||||
|
||||
- bigquery: Support SchemaUpdateOptions for load jobs.
|
||||
|
||||
- bigtable:
|
||||
- Add SampleRowKeys.
|
||||
- cbt: Support union, intersection GCPolicy.
|
||||
- Retry admin RPCS.
|
||||
- Add trace spans to retries.
|
||||
|
||||
- datastore: Add OpenCensus tracing.
|
||||
|
||||
- firestore:
|
||||
- Fix queries involving Null and NaN.
|
||||
- Allow Timestamp protobuffers for time values.
|
||||
|
||||
- logging: Add a WriteTimeout option.
|
||||
|
||||
- spanner: Support Batch API.
|
||||
|
||||
- storage: Add OpenCensus tracing.
|
||||
|
||||
|
||||
_February 26, 2018_
|
||||
|
||||
*v0.19.0*
|
||||
|
||||
- bigquery:
|
||||
- Support customer-managed encryption keys.
|
||||
|
||||
- bigtable:
|
||||
- Improved emulator support.
|
||||
- Support GetCluster.
|
||||
|
||||
- datastore:
|
||||
- Add general mutations.
|
||||
- Support pointer struct fields.
|
||||
- Support transaction options.
|
||||
|
||||
- firestore:
|
||||
- Add Transaction.GetAll.
|
||||
- Support document cursors.
|
||||
|
||||
- logging:
|
||||
- Support concurrent RPCs to the service.
|
||||
- Support per-entry resources.
|
||||
|
||||
- profiler:
|
||||
- Add config options to disable heap and thread profiling.
|
||||
- Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set.
|
||||
|
||||
- pubsub:
|
||||
- BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the
|
||||
callback returns).
|
||||
- Add SubscriptionInProject.
|
||||
- Add OpenCensus instrumentation for streaming pull.
|
||||
|
||||
- storage:
|
||||
- Support CORS.
|
||||
|
||||
|
||||
_January 18, 2018_
|
||||
|
||||
*v0.18.0*
|
||||
|
||||
- bigquery:
|
||||
- Marked stable.
|
||||
- Schema inference of nullable fields supported.
|
||||
- Added TimePartitioning to QueryConfig.
|
||||
|
||||
- firestore: Data provided to DocumentRef.Set with a Merge option can contain
|
||||
Delete sentinels.
|
||||
|
||||
- logging: Clients can accept parent resources other than projects.
|
||||
|
||||
- pubsub:
|
||||
- pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome.
|
||||
- Support updating more subscription metadata: AckDeadline,
|
||||
RetainAckedMessages and RetentionDuration.
|
||||
|
||||
- oslogin/apiv1beta: New client for the Cloud OS Login API.
|
||||
|
||||
- rpcreplay: A package for recording and replaying gRPC traffic.
|
||||
|
||||
- spanner:
|
||||
- Add a ReadWithOptions that supports a row limit, as well as an index.
|
||||
- Support query plan and execution statistics.
|
||||
- Added [OpenCensus](http://opencensus.io) support.
|
||||
|
||||
- storage: Clarify checksum validation for gzipped files (it is not validated
|
||||
when the file is served uncompressed).
|
||||
|
||||
|
||||
_December 11, 2017_
|
||||
|
||||
*v0.17.0*
|
||||
|
||||
- firestore BREAKING CHANGES:
|
||||
- Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
|
||||
Change
|
||||
`docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
|
||||
to
|
||||
`docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
|
||||
|
||||
Change
|
||||
`docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
|
||||
to
|
||||
`docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
|
||||
- Rename MergePaths to Merge; require args to be FieldPaths
|
||||
- A value stored as an integer can be read into a floating-point field, and vice versa.
|
||||
- bigtable/cmd/cbt:
|
||||
- Support deleting a column.
|
||||
- Add regex option for row read.
|
||||
- spanner: Mark stable.
|
||||
- storage:
|
||||
- Add Reader.ContentEncoding method.
|
||||
- Fix handling of SignedURL headers.
|
||||
- bigquery:
|
||||
- If Uploader.Put is called with no rows, it returns nil without making a
|
||||
call.
|
||||
- Schema inference supports the "nullable" option in struct tags for
|
||||
non-required fields.
|
||||
- TimePartitioning supports "Field".
|
||||
|
||||
|
||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||
|
||||
## Supported APIs
|
||||
|
||||
Google API | Status | Package
|
||||
---------------------------------|--------------|-----------------------------------------------------------
|
||||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
||||
[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref]
|
||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
|
||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
|
||||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
|
||||
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
||||
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
|
||||
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
|
||||
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
||||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||
|
||||
|
||||
> **Alpha status**: the API is still being actively developed. As a
|
||||
> result, it might change in backward-incompatible ways and is not recommended
|
||||
> for production use.
|
||||
>
|
||||
> **Beta status**: the API is largely complete, but still has outstanding
|
||||
> features and bugs to be addressed. There may be minor backwards-incompatible
|
||||
> changes where necessary.
|
||||
>
|
||||
> **Stable status**: the API is mature and ready for production use. We will
|
||||
> continue addressing bugs and feature requests.
|
||||
|
||||
Documentation and examples are available at
|
||||
https://godoc.org/cloud.google.com/go
|
||||
|
||||
Visit or join the
|
||||
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
|
||||
for updates on these packages.
|
||||
|
||||
## Go Versions Supported
|
||||
|
||||
We support the two most recent major versions of Go. If Google App Engine uses
|
||||
an older version, we support that as well. You can see which versions are
|
||||
currently supported by looking at the lines following `go:` in
|
||||
[`.travis.yml`](.travis.yml).
|
||||
|
||||
## Authorization
|
||||
|
||||
By default, each API will use [Google Application Default Credentials][default-creds]
|
||||
for authorization credentials used in calling the API endpoints. This will allow your
|
||||
application to run in many environments without requiring explicit configuration.
|
||||
|
||||
[snip]:# (auth)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
```
|
||||
|
||||
To authorize using a
|
||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
||||
pass
|
||||
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
|
||||
to the `NewClient` function of the desired package. For example:
|
||||
|
||||
[snip]:# (auth-JSON)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
|
||||
```
|
||||
|
||||
You can exert more control over authorization by using the
|
||||
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
|
||||
create an `oauth2.TokenSource`. Then pass
|
||||
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
|
||||
to the `NewClient` function:
|
||||
[snip]:# (auth-ts)
|
||||
```go
|
||||
tokenSource := ...
|
||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||
```
|
||||
|
||||
## Cloud Datastore [](https://godoc.org/cloud.google.com/go/datastore)
|
||||
|
||||
- [About Cloud Datastore][cloud-datastore]
|
||||
- [Activating the API for your project][cloud-datastore-activation]
|
||||
- [API documentation][cloud-datastore-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
|
||||
- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `datastore.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (datastore-1)
|
||||
```go
|
||||
client, err := datastore.NewClient(ctx, "my-project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Then use that client to interact with the API:
|
||||
|
||||
[snip]:# (datastore-2)
|
||||
```go
|
||||
type Post struct {
|
||||
Title string
|
||||
Body string `datastore:",noindex"`
|
||||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NameKey("Post", "post1", nil),
|
||||
datastore.NameKey("Post", "post2", nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud Storage [](https://godoc.org/cloud.google.com/go/storage)
|
||||
|
||||
- [About Cloud Storage][cloud-storage]
|
||||
- [API documentation][cloud-storage-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `storage.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (storage-1)
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
[snip]:# (storage-2)
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud Pub/Sub [](https://godoc.org/cloud.google.com/go/pubsub)
|
||||
|
||||
- [About Cloud Pubsub][cloud-pubsub]
|
||||
- [API documentation][cloud-pubsub-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `pubsub.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (pubsub-1)
|
||||
```go
|
||||
client, err := pubsub.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Then use the client to publish and subscribe:
|
||||
|
||||
[snip]:# (pubsub-2)
|
||||
```go
|
||||
// Publish "hello world" on topic1.
|
||||
topic := client.Topic("topic1")
|
||||
res := topic.Publish(ctx, &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
// The publish happens asynchronously.
|
||||
// Later, you can get the result from res:
|
||||
...
|
||||
msgID, err := res.Get(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Use a callback to receive messages via subscription1.
|
||||
sub := client.Subscription("subscription1")
|
||||
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
|
||||
fmt.Println(m.Data)
|
||||
m.Ack() // Acknowledge that we've consumed the message.
|
||||
})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud BigQuery [](https://godoc.org/cloud.google.com/go/bigquery)
|
||||
|
||||
- [About Cloud BigQuery][cloud-bigquery]
|
||||
- [API documentation][cloud-bigquery-docs]
|
||||
- [Go client documentation][cloud-bigquery-ref]
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `bigquery.Client` to use throughout your application:
|
||||
[snip]:# (bq-1)
|
||||
```go
|
||||
c, err := bigquery.NewClient(ctx, "my-project-ID")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
Then use that client to interact with the API:
|
||||
[snip]:# (bq-2)
|
||||
```go
|
||||
// Construct a query.
|
||||
q := c.Query(`
|
||||
SELECT year, SUM(number)
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
// Execute the query.
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Iterate through the results.
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Stackdriver Logging [](https://godoc.org/cloud.google.com/go/logging)
|
||||
|
||||
- [About Stackdriver Logging][cloud-logging]
|
||||
- [API documentation][cloud-logging-docs]
|
||||
- [Go client documentation][cloud-logging-ref]
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `logging.Client` to use throughout your application:
|
||||
[snip]:# (logging-1)
|
||||
```go
|
||||
ctx := context.Background()
|
||||
client, err := logging.NewClient(ctx, "my-project")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
Usually, you'll want to add log entries to a buffer to be periodically flushed
|
||||
(automatically and asynchronously) to the Stackdriver Logging service.
|
||||
[snip]:# (logging-2)
|
||||
```go
|
||||
logger := client.Logger("my-log")
|
||||
logger.Log(logging.Entry{Payload: "something happened!"})
|
||||
```
|
||||
|
||||
Close your client before your program exits, to flush any buffered log entries.
|
||||
[snip]:# (logging-3)
|
||||
```go
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud Spanner [](https://godoc.org/cloud.google.com/go/spanner)
|
||||
|
||||
- [About Cloud Spanner][cloud-spanner]
|
||||
- [API documentation][cloud-spanner-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `spanner.Client` to use throughout your application:
|
||||
|
||||
[snip]:# (spanner-1)
|
||||
```go
|
||||
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
[snip]:# (spanner-2)
|
||||
```go
|
||||
// Simple Reads And Writes
|
||||
_, err = client.Apply(ctx, []*spanner.Mutation{
|
||||
spanner.Insert("Users",
|
||||
[]string{"name", "email"},
|
||||
[]interface{}{"alice", "a@example.com"})})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
row, err := client.Single().ReadRow(ctx, "Users",
|
||||
spanner.Key{"alice"}, []string{"email"})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
|
||||
document for details. We're using Gerrit for our code reviews. Please don't open pull
|
||||
requests against this repo, new pull requests will be automatically closed.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
|
||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-firestore]: https://cloud.google.com/firestore/
|
||||
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
|
||||
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
|
||||
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
|
||||
[cloud-storage-docs]: https://cloud.google.com/storage/docs
|
||||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
|
||||
|
||||
[cloud-bigtable]: https://cloud.google.com/bigtable/
|
||||
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
|
||||
|
||||
[cloud-bigquery]: https://cloud.google.com/bigquery/
|
||||
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
|
||||
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
|
||||
|
||||
[cloud-logging]: https://cloud.google.com/logging/
|
||||
[cloud-logging-docs]: https://cloud.google.com/logging/docs
|
||||
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
|
||||
|
||||
[cloud-monitoring]: https://cloud.google.com/monitoring/
|
||||
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3
|
||||
|
||||
[cloud-vision]: https://cloud.google.com/vision
|
||||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1
|
||||
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
|
||||
|
||||
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
|
||||
[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest
|
||||
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
|
||||
|
||||
[cloud-spanner]: https://cloud.google.com/spanner/
|
||||
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
|
||||
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
|
||||
|
||||
[cloud-translation]: https://cloud.google.com/translation
|
||||
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
|
||||
|
||||
[cloud-video]: https://cloud.google.com/video-intelligence/
|
||||
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
|
||||
|
||||
[cloud-errors]: https://cloud.google.com/error-reporting/
|
||||
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
|
||||
|
||||
[cloud-container]: https://cloud.google.com/containers/
|
||||
[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1
|
||||
|
||||
[cloud-debugger]: https://cloud.google.com/debugger/
|
||||
[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2
|
||||
|
||||
[cloud-dlp]: https://cloud.google.com/dlp/
|
||||
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
|
||||
|
||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
# This file configures AppVeyor (http://www.appveyor.com),
|
||||
# a Windows-based CI service similar to Travis.
|
||||
|
||||
# Identifier for this run
|
||||
version: "{build}"
|
||||
|
||||
# Clone the repo into this path, which conforms to the standard
|
||||
# Go workspace structure.
|
||||
clone_folder: c:\gopath\src\cloud.google.com\go
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762
|
||||
GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json
|
||||
KEYFILE_CONTENTS:
|
||||
secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje
|
||||
|
||||
install:
|
||||
# Info for debugging.
|
||||
- echo %PATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -d -t ./...
|
||||
|
||||
|
||||
# Provide a build script, or AppVeyor will call msbuild.
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
- echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY%
|
||||
|
||||
test_script:
|
||||
- go test -v ./...
|
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
|
@ -1,49 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloud_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/datastore"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
func Example_applicationDefaultCredentials() {
|
||||
// Google Application Default Credentials is the recommended way to authorize
|
||||
// and authenticate clients.
|
||||
//
|
||||
// See the following link on how to create and obtain Application Default Credentials:
|
||||
// https://developers.google.com/identity/protocols/application-default-credentials.
|
||||
client, err := datastore.NewClient(context.Background(), "project-id")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
}
|
||||
|
||||
func Example_serviceAccountFile() {
|
||||
// Use a JSON key file associated with a Google service account to
|
||||
// authenticate and authorize. Service Account keys can be created and
|
||||
// downloaded from https://console.developers.google.com/permissions/serviceaccounts.
|
||||
//
|
||||
// Note: This example uses the datastore client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
client, err := datastore.NewClient(context.Background(),
|
||||
"project-id", option.WithServiceAccountFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
}
|
8
vendor/cloud.google.com/go/bigquery/benchmarks/README.md
generated
vendored
8
vendor/cloud.google.com/go/bigquery/benchmarks/README.md
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
# BigQuery Benchmark
|
||||
This directory contains benchmarks for BigQuery client.
|
||||
|
||||
## Usage
|
||||
`go run bench.go -- <your project id> queries.json`
|
||||
|
||||
BigQuery service caches requests so the benchmark should be run
|
||||
at least twice, disregarding the first result.
|
85
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
generated
vendored
85
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
generated
vendored
|
@ -1,85 +0,0 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := bigquery.NewClient(ctx, flag.Arg(0))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var queries []string
|
||||
if err := json.Unmarshal(queriesJSON, &queries); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, q := range queries {
|
||||
doQuery(ctx, c, q)
|
||||
}
|
||||
}
|
||||
|
||||
func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
|
||||
startTime := time.Now()
|
||||
q := c.Query(qt)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
numRows, numCols := 0, 0
|
||||
var firstByte time.Duration
|
||||
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if numRows == 0 {
|
||||
numCols = len(values)
|
||||
firstByte = time.Since(startTime)
|
||||
} else if numCols != len(values) {
|
||||
log.Fatalf("got %d columns, want %d", len(values), numCols)
|
||||
}
|
||||
numRows++
|
||||
}
|
||||
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
|
||||
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
|
||||
}
|
10
vendor/cloud.google.com/go/bigquery/benchmarks/queries.json
generated
vendored
10
vendor/cloud.google.com/go/bigquery/benchmarks/queries.json
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
[
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
|
||||
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
|
||||
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
|
||||
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
|
||||
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
|
||||
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
|
||||
]
|
161
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
161
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
|
@ -1,161 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/version"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
userAgent = "gcloud-golang-bigquery/20160429"
|
||||
)
|
||||
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
// Location, if set, will be used as the default location for all subsequent
|
||||
// dataset creation and job operations. A location specified directly in one of
|
||||
// those operations will override this value.
|
||||
Location string
|
||||
|
||||
projectID string
|
||||
bqs *bq.Service
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
// Operations performed via the client are billed to the specified GCP project.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
o := []option.ClientOption{
|
||||
option.WithEndpoint(prodAddr),
|
||||
option.WithScopes(Scope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bigquery: dialing: %v", err)
|
||||
}
|
||||
bqs, err := bq.New(httpClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bigquery: constructing client: %v", err)
|
||||
}
|
||||
bqs.BasePath = endpoint
|
||||
c := &Client{
|
||||
projectID: projectID,
|
||||
bqs: bqs,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calls the Jobs.Insert RPC and returns a Job.
|
||||
func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
|
||||
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if media != nil {
|
||||
call.Media(media)
|
||||
}
|
||||
var res *bq.Job
|
||||
var err error
|
||||
invoke := func() error {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}
|
||||
// A job with a client-generated ID can be retried; the presence of the
|
||||
// ID makes the insert operation idempotent.
|
||||
// We don't retry if there is media, because it is an io.Reader. We'd
|
||||
// have to read the contents and keep it in memory, and that could be expensive.
|
||||
// TODO(jba): Look into retrying if media != nil.
|
||||
if job.JobReference != nil && media == nil {
|
||||
err = runWithRetry(ctx, invoke)
|
||||
} else {
|
||||
err = invoke()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToJob(res, c)
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return !retryableError(err), err
|
||||
})
|
||||
}
|
||||
|
||||
// This is the correct definition of retryable according to the BigQuery team.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
|
||||
}
|
106
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
106
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
|
@ -1,106 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// CopyConfig holds the configuration for a copy job.
|
||||
type CopyConfig struct {
|
||||
// Srcs are the tables from which data will be copied.
|
||||
Srcs []*Table
|
||||
|
||||
// Dst is the table into which the data will be copied.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteEmpty.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
}
|
||||
|
||||
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
||||
var ts []*bq.TableReference
|
||||
for _, t := range c.Srcs {
|
||||
ts = append(ts, t.toBQ())
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: c.Labels,
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.toBQ(),
|
||||
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(),
|
||||
SourceTables: ts,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
|
||||
cc := &CopyConfig{
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
|
||||
Dst: bqToTable(q.Copy.DestinationTable, c),
|
||||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration),
|
||||
}
|
||||
for _, t := range q.Copy.SourceTables {
|
||||
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
|
||||
type Copier struct {
|
||||
JobIDConfig
|
||||
CopyConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// CopierFrom returns a Copier which can be used to copy data into a
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// The returned Copier may optionally be further configured before its Run method is called.
|
||||
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
||||
return &Copier{
|
||||
c: t.c,
|
||||
CopyConfig: CopyConfig{
|
||||
Srcs: srcs,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a copy job.
|
||||
func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
||||
return c.c.insertJob(ctx, c.newJob(), nil)
|
||||
}
|
||||
|
||||
func (c *Copier) newJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: c.JobIDConfig.createJobRef(c.c),
|
||||
Configuration: c.CopyConfig.toBQ(),
|
||||
}
|
||||
}
|
165
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
165
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
|
@ -1,165 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultCopyJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "d-project-id",
|
||||
DatasetId: "d-dataset-id",
|
||||
TableId: "d-table-id",
|
||||
},
|
||||
SourceTables: []*bq.TableReference{
|
||||
{
|
||||
ProjectId: "s-project-id",
|
||||
DatasetId: "s-dataset-id",
|
||||
TableId: "s-table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
defer fixRandomID("RANDOM")()
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
srcs []*Table
|
||||
jobID string
|
||||
location string
|
||||
config CopyConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
want: defaultCopyJob(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
jobID: "job-id",
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.JobReference.JobId = "job-id"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
location: "asia-northeast1",
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.JobReference.Location = "asia-northeast1"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
c := &Client{projectID: "client-project-id"}
|
||||
for i, tc := range testCases {
|
||||
tc.dst.c = c
|
||||
copier := tc.dst.CopierFrom(tc.srcs...)
|
||||
copier.JobID = tc.jobID
|
||||
copier.Location = tc.location
|
||||
tc.config.Srcs = tc.srcs
|
||||
tc.config.Dst = tc.dst
|
||||
copier.CopyConfig = tc.config
|
||||
got := copier.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig,
|
||||
cmpopts.IgnoreUnexported(Table{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
505
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
505
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
|
@ -1,505 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// Dataset is a reference to a BigQuery dataset.
|
||||
type Dataset struct {
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
c *Client
|
||||
}
|
||||
|
||||
// DatasetMetadata contains information about a BigQuery dataset.
|
||||
type DatasetMetadata struct {
|
||||
// These fields can be set when creating a dataset.
|
||||
Name string // The user-friendly name for this dataset.
|
||||
Description string // The user-friendly description of this dataset.
|
||||
Location string // The geo location of the dataset.
|
||||
DefaultTableExpiration time.Duration // The default expiration time for new tables.
|
||||
Labels map[string]string // User-provided labels.
|
||||
Access []*AccessEntry // Access permissions.
|
||||
|
||||
// These fields are read-only.
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
|
||||
FullID string // The full dataset ID in the form projectID:datasetID.
|
||||
|
||||
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
|
||||
// ensure that the metadata hasn't changed since it was read.
|
||||
ETag string
|
||||
}
|
||||
|
||||
// DatasetMetadataToUpdate is used when updating a dataset's metadata.
|
||||
// Only non-nil fields will be updated.
|
||||
type DatasetMetadataToUpdate struct {
|
||||
Description optional.String // The user-friendly description of this table.
|
||||
Name optional.String // The user-friendly name for this dataset.
|
||||
|
||||
// DefaultTableExpiration is the the default expiration time for new tables.
|
||||
// If set to time.Duration(0), new tables never expire.
|
||||
DefaultTableExpiration optional.Duration
|
||||
|
||||
// The entire access list. It is not possible to replace individual entries.
|
||||
Access []*AccessEntry
|
||||
|
||||
labelUpdater
|
||||
}
|
||||
|
||||
// Dataset creates a handle to a BigQuery dataset in the client's project.
|
||||
func (c *Client) Dataset(id string) *Dataset {
|
||||
return c.DatasetInProject(c.projectID, id)
|
||||
}
|
||||
|
||||
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
|
||||
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: projectID,
|
||||
DatasetID: datasetID,
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates a dataset in the BigQuery service. An error will be returned if the
|
||||
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
|
||||
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
|
||||
ds, err := md.toBQ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
|
||||
// Use Client.Location as a default.
|
||||
if ds.Location == "" {
|
||||
ds.Location = d.c.Location
|
||||
}
|
||||
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
_, err = call.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
if dm == nil {
|
||||
return ds, nil
|
||||
}
|
||||
ds.FriendlyName = dm.Name
|
||||
ds.Description = dm.Description
|
||||
ds.Location = dm.Location
|
||||
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
|
||||
ds.Labels = dm.Labels
|
||||
var err error
|
||||
ds.Access, err = accessListToBQ(dm.Access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !dm.CreationTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
|
||||
}
|
||||
if !dm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
|
||||
}
|
||||
if dm.FullID != "" {
|
||||
return nil, errors.New("bigquery: Dataset.FullID is not writable")
|
||||
}
|
||||
if dm.ETag != "" {
|
||||
return nil, errors.New("bigquery: Dataset.ETag is not writable")
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
|
||||
var q []*bq.DatasetAccess
|
||||
for _, e := range a {
|
||||
a, err := e.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q = append(q, a)
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// Delete deletes the dataset.
|
||||
func (d *Dataset) Delete(ctx context.Context) error {
|
||||
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return call.Do()
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the dataset.
|
||||
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
||||
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var ds *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToDatasetMetadata(ds)
|
||||
}
|
||||
|
||||
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
|
||||
dm := &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
FullID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
ETag: d.Etag,
|
||||
}
|
||||
for _, a := range d.Access {
|
||||
e, err := bqToAccessEntry(a, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dm.Access = append(dm.Access, e)
|
||||
}
|
||||
return dm, nil
|
||||
}
|
||||
|
||||
// Update modifies specific Dataset metadata fields.
|
||||
// To perform a read-modify-write that protects against intervening reads,
|
||||
// set the etag argument to the DatasetMetadata.ETag field from the read.
|
||||
// Pass the empty string for etag for a "blind write" that will always succeed.
|
||||
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
|
||||
ds, err := dm.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var ds2 *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds2, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToDatasetMetadata(ds2)
|
||||
}
|
||||
|
||||
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
forceSend := func(field string) {
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if dm.Description != nil {
|
||||
ds.Description = optional.ToString(dm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if dm.Name != nil {
|
||||
ds.FriendlyName = optional.ToString(dm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if dm.DefaultTableExpiration != nil {
|
||||
dur := optional.ToDuration(dm.DefaultTableExpiration)
|
||||
if dur == 0 {
|
||||
// Send a null to delete the field.
|
||||
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
|
||||
} else {
|
||||
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
|
||||
}
|
||||
}
|
||||
if dm.Access != nil {
|
||||
var err error
|
||||
ds.Access, err = accessListToBQ(dm.Access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ds.Access) == 0 {
|
||||
ds.NullFields = append(ds.NullFields, "Access")
|
||||
}
|
||||
}
|
||||
labels, forces, nulls := dm.update()
|
||||
ds.Labels = labels
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, forces...)
|
||||
ds.NullFields = append(ds.NullFields, nulls...)
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// Table creates a handle to a BigQuery table in the dataset.
|
||||
// To determine if a table exists, call Table.Metadata.
|
||||
// If the table does not already exist, use Table.Create to create it.
|
||||
func (d *Dataset) Table(tableID string) *Table {
|
||||
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
|
||||
}
|
||||
|
||||
// Tables returns an iterator over the tables in the Dataset.
|
||||
func (d *Dataset) Tables(ctx context.Context) *TableIterator {
|
||||
it := &TableIterator{
|
||||
ctx: ctx,
|
||||
dataset: d,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.tables) },
|
||||
func() interface{} { b := it.tables; it.tables = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// A TableIterator is an iterator over Tables.
|
||||
type TableIterator struct {
|
||||
ctx context.Context
|
||||
dataset *Dataset
|
||||
tables []*Table
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is Done if there are
|
||||
// no more results. Once Next returns Done, all subsequent calls will return
|
||||
// Done.
|
||||
func (it *TableIterator) Next() (*Table, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := it.tables[0]
|
||||
it.tables = it.tables[1:]
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// for testing
|
||||
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
|
||||
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
|
||||
PageToken(pageToken).
|
||||
Context(it.ctx)
|
||||
setClientHeader(call.Header())
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(int64(pageSize))
|
||||
}
|
||||
var res *bq.TableList
|
||||
err := runWithRetry(it.ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
res, err := listTables(it, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
|
||||
}
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func bqToTable(tr *bq.TableReference, c *Client) *Table {
|
||||
return &Table{
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
TableID: tr.TableId,
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Datasets returns an iterator over the datasets in a project.
|
||||
// The Client's project is used by default, but that can be
|
||||
// changed by setting ProjectID on the returned iterator before calling Next.
|
||||
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
|
||||
return c.DatasetsInProject(ctx, c.projectID)
|
||||
}
|
||||
|
||||
// DatasetsInProject returns an iterator over the datasets in the provided project.
|
||||
//
|
||||
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
|
||||
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
|
||||
it := &DatasetIterator{
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
ProjectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// DatasetIterator iterates over the datasets in a project.
|
||||
type DatasetIterator struct {
|
||||
// ListHidden causes hidden datasets to be listed when set to true.
|
||||
// Set before the first call to Next.
|
||||
ListHidden bool
|
||||
|
||||
// Filter restricts the datasets returned by label. The filter syntax is described in
|
||||
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
|
||||
// Set before the first call to Next.
|
||||
Filter string
|
||||
|
||||
// The project ID of the listed datasets.
|
||||
// Set before the first call to Next.
|
||||
ProjectID string
|
||||
|
||||
ctx context.Context
|
||||
c *Client
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*Dataset
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *DatasetIterator) Next() (*Dataset, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// for testing
|
||||
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
|
||||
call := it.c.bqs.Datasets.List(it.ProjectID).
|
||||
Context(it.ctx).
|
||||
PageToken(pageToken).
|
||||
All(it.ListHidden)
|
||||
setClientHeader(call.Header())
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(int64(pageSize))
|
||||
}
|
||||
if it.Filter != "" {
|
||||
call.Filter(it.Filter)
|
||||
}
|
||||
var res *bq.DatasetList
|
||||
err := runWithRetry(it.ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
res, err := listDatasets(it, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, d := range res.Datasets {
|
||||
it.items = append(it.items, &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
c: it.c,
|
||||
})
|
||||
}
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
// An AccessEntry describes the permissions that an entity has on a dataset.
|
||||
type AccessEntry struct {
|
||||
Role AccessRole // The role of the entity
|
||||
EntityType EntityType // The type of entity
|
||||
Entity string // The entity (individual or group) granted access
|
||||
View *Table // The view granted access (EntityType must be ViewEntity)
|
||||
}
|
||||
|
||||
// AccessRole is the level of access to grant to a dataset.
|
||||
type AccessRole string
|
||||
|
||||
const (
|
||||
OwnerRole AccessRole = "OWNER"
|
||||
ReaderRole AccessRole = "READER"
|
||||
WriterRole AccessRole = "WRITER"
|
||||
)
|
||||
|
||||
// EntityType is the type of entity in an AccessEntry.
|
||||
type EntityType int
|
||||
|
||||
const (
|
||||
// A domain (e.g. "example.com")
|
||||
DomainEntity EntityType = iota + 1
|
||||
|
||||
// Email address of a Google Group
|
||||
GroupEmailEntity
|
||||
|
||||
// Email address of an individual user.
|
||||
UserEmailEntity
|
||||
|
||||
// A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers.
|
||||
SpecialGroupEntity
|
||||
|
||||
// A BigQuery view.
|
||||
ViewEntity
|
||||
)
|
||||
|
||||
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
|
||||
q := &bq.DatasetAccess{Role: string(e.Role)}
|
||||
switch e.EntityType {
|
||||
case DomainEntity:
|
||||
q.Domain = e.Entity
|
||||
case GroupEmailEntity:
|
||||
q.GroupByEmail = e.Entity
|
||||
case UserEmailEntity:
|
||||
q.UserByEmail = e.Entity
|
||||
case SpecialGroupEntity:
|
||||
q.SpecialGroup = e.Entity
|
||||
case ViewEntity:
|
||||
q.View = e.View.toBQ()
|
||||
default:
|
||||
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
|
||||
e := &AccessEntry{Role: AccessRole(q.Role)}
|
||||
switch {
|
||||
case q.Domain != "":
|
||||
e.Entity = q.Domain
|
||||
e.EntityType = DomainEntity
|
||||
case q.GroupByEmail != "":
|
||||
e.Entity = q.GroupByEmail
|
||||
e.EntityType = GroupEmailEntity
|
||||
case q.UserByEmail != "":
|
||||
e.Entity = q.UserByEmail
|
||||
e.EntityType = UserEmailEntity
|
||||
case q.SpecialGroup != "":
|
||||
e.Entity = q.SpecialGroup
|
||||
e.EntityType = SpecialGroupEntity
|
||||
case q.View != nil:
|
||||
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
|
||||
e.EntityType = ViewEntity
|
||||
default:
|
||||
return nil, errors.New("bigquery: invalid access value")
|
||||
}
|
||||
return e, nil
|
||||
}
|
328
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
328
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
|
@ -1,328 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
itest "google.golang.org/api/iterator/testing"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
tables []*bq.TableListTables
|
||||
}
|
||||
|
||||
func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
|
||||
if it.dataset.ProjectID != s.expectedProject {
|
||||
return nil, errors.New("wrong project id")
|
||||
}
|
||||
if it.dataset.DatasetID != s.expectedDataset {
|
||||
return nil, errors.New("wrong dataset id")
|
||||
}
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
end := start + pageSize
|
||||
if end > len(s.tables) {
|
||||
end = len(s.tables)
|
||||
}
|
||||
nextPageToken := ""
|
||||
if end < len(s.tables) {
|
||||
nextPageToken = strconv.Itoa(end)
|
||||
}
|
||||
return &bq.TableList{
|
||||
Tables: s.tables[start:end],
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestTables(t *testing.T) {
|
||||
c := &Client{projectID: "p1"}
|
||||
inTables := []*bq.TableListTables{
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}},
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}},
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}},
|
||||
}
|
||||
outTables := []*Table{
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c},
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c},
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c},
|
||||
}
|
||||
|
||||
lts := &listTablesStub{
|
||||
expectedProject: "p1",
|
||||
expectedDataset: "d1",
|
||||
tables: inTables,
|
||||
}
|
||||
old := listTables
|
||||
listTables = lts.listTables // cannot use t.Parallel with this test
|
||||
defer func() { listTables = old }()
|
||||
|
||||
msg, ok := itest.TestIterator(outTables,
|
||||
func() interface{} { return c.Dataset("d1").Tables(context.Background()) },
|
||||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type listDatasetsStub struct {
|
||||
expectedProject string
|
||||
datasets []*bq.DatasetListDatasets
|
||||
hidden map[*bq.DatasetListDatasets]bool
|
||||
}
|
||||
|
||||
func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
if it.Filter != "" {
|
||||
return nil, errors.New("filter not supported")
|
||||
}
|
||||
if it.ProjectID != s.expectedProject {
|
||||
return nil, errors.New("bad project ID")
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var (
|
||||
i int
|
||||
result []*bq.DatasetListDatasets
|
||||
nextPageToken string
|
||||
)
|
||||
for i = start; len(result) < pageSize && i < len(s.datasets); i++ {
|
||||
if s.hidden[s.datasets[i]] && !it.ListHidden {
|
||||
continue
|
||||
}
|
||||
result = append(result, s.datasets[i])
|
||||
}
|
||||
if i < len(s.datasets) {
|
||||
nextPageToken = strconv.Itoa(i)
|
||||
}
|
||||
return &bq.DatasetList{
|
||||
Datasets: result,
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestDatasets(t *testing.T) {
|
||||
client := &Client{projectID: "p"}
|
||||
inDatasets := []*bq.DatasetListDatasets{
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}},
|
||||
}
|
||||
outDatasets := []*Dataset{
|
||||
{"p", "a", client},
|
||||
{"p", "b", client},
|
||||
{"p", "hidden", client},
|
||||
{"p", "c", client},
|
||||
}
|
||||
lds := &listDatasetsStub{
|
||||
expectedProject: "p",
|
||||
datasets: inDatasets,
|
||||
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true},
|
||||
}
|
||||
old := listDatasets
|
||||
listDatasets = lds.listDatasets // cannot use t.Parallel with this test
|
||||
defer func() { listDatasets = old }()
|
||||
|
||||
msg, ok := itest.TestIterator(outDatasets,
|
||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=true: %s", msg)
|
||||
}
|
||||
|
||||
msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]},
|
||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=false: %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatasetToBQ(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *DatasetMetadata
|
||||
want *bq.Dataset
|
||||
}{
|
||||
{nil, &bq.Dataset{}},
|
||||
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
|
||||
{&DatasetMetadata{
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}},
|
||||
}, &bq.Dataset{
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}},
|
||||
}},
|
||||
} {
|
||||
got, err := test.in.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that non-writeable fields are unset.
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
for _, dm := range []*DatasetMetadata{
|
||||
{CreationTime: aTime},
|
||||
{LastModifiedTime: aTime},
|
||||
{FullID: "x"},
|
||||
{ETag: "e"},
|
||||
} {
|
||||
if _, err := dm.toBQ(); err == nil {
|
||||
t.Errorf("%+v: got nil, want error", dm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQToDatasetMetadata(t *testing.T) {
|
||||
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
cMillis := cTime.UnixNano() / 1e6
|
||||
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local)
|
||||
mMillis := mTime.UnixNano() / 1e6
|
||||
q := &bq.Dataset{
|
||||
CreationTime: cMillis,
|
||||
LastModifiedTime: mMillis,
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*bq.DatasetAccess{
|
||||
{Role: "READER", UserByEmail: "joe@example.com"},
|
||||
{Role: "WRITER", GroupByEmail: "users@example.com"},
|
||||
},
|
||||
Etag: "etag",
|
||||
}
|
||||
want := &DatasetMetadata{
|
||||
CreationTime: cTime,
|
||||
LastModifiedTime: mTime,
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*AccessEntry{
|
||||
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity},
|
||||
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity},
|
||||
},
|
||||
ETag: "etag",
|
||||
}
|
||||
got, err := bqToDatasetMetadata(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatasetMetadataToUpdateToBQ(t *testing.T) {
|
||||
dm := DatasetMetadataToUpdate{
|
||||
Description: "desc",
|
||||
Name: "name",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
}
|
||||
dm.SetLabel("label", "value")
|
||||
dm.DeleteLabel("del")
|
||||
|
||||
got, err := dm.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := &bq.Dataset{
|
||||
Description: "desc",
|
||||
FriendlyName: "name",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Labels: map[string]string{"label": "value"},
|
||||
ForceSendFields: []string{"Description", "FriendlyName"},
|
||||
NullFields: []string{"Labels.del"},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertAccessEntry(t *testing.T) {
|
||||
c := &Client{projectID: "pid"}
|
||||
for _, e := range []*AccessEntry{
|
||||
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity},
|
||||
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity},
|
||||
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity},
|
||||
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity},
|
||||
{Role: ReaderRole, EntityType: ViewEntity,
|
||||
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}},
|
||||
} {
|
||||
q, err := e.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := bqToAccessEntry(q, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
e := &AccessEntry{Role: ReaderRole, Entity: "e"}
|
||||
if _, err := e.toBQ(); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
67
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
67
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package datatransfer
|
||||
|
||||
import (
|
||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
||||
)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = fmt.Sprintf
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
|
||||
func TestDataTransferServiceSmoke(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping smoke test in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
|
||||
if ts == nil {
|
||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
}
|
||||
|
||||
projectId := testutil.ProjID()
|
||||
_ = projectId
|
||||
|
||||
c, err := NewClient(ctx, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var formattedParent string = fmt.Sprintf("projects/%s", projectId)
|
||||
var request = &datatransferpb.ListDataSourcesRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
|
||||
iter := c.ListDataSources(ctx, request)
|
||||
if _, err := iter.Next(); err != nil && err != iterator.Done {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
601
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
601
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
|
@ -1,601 +0,0 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package datatransfer
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
GetDataSource []gax.CallOption
|
||||
ListDataSources []gax.CallOption
|
||||
CreateTransferConfig []gax.CallOption
|
||||
UpdateTransferConfig []gax.CallOption
|
||||
DeleteTransferConfig []gax.CallOption
|
||||
GetTransferConfig []gax.CallOption
|
||||
ListTransferConfigs []gax.CallOption
|
||||
ScheduleTransferRuns []gax.CallOption
|
||||
GetTransferRun []gax.CallOption
|
||||
DeleteTransferRun []gax.CallOption
|
||||
ListTransferRuns []gax.CallOption
|
||||
ListTransferLogs []gax.CallOption
|
||||
CheckValidCreds []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultCallOptions() *CallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &CallOptions{
|
||||
GetDataSource: retry[[2]string{"default", "idempotent"}],
|
||||
ListDataSources: retry[[2]string{"default", "idempotent"}],
|
||||
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
|
||||
GetTransferConfig: retry[[2]string{"default", "idempotent"}],
|
||||
ListTransferConfigs: retry[[2]string{"default", "idempotent"}],
|
||||
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
|
||||
GetTransferRun: retry[[2]string{"default", "idempotent"}],
|
||||
DeleteTransferRun: retry[[2]string{"default", "idempotent"}],
|
||||
ListTransferRuns: retry[[2]string{"default", "idempotent"}],
|
||||
ListTransferLogs: retry[[2]string{"default", "idempotent"}],
|
||||
CheckValidCreds: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// Client is a client for interacting with BigQuery Data Transfer API.
|
||||
type Client struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
client datatransferpb.DataTransferServiceClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *CallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new data transfer service client.
|
||||
//
|
||||
// The Google BigQuery Data Transfer Service API enables BigQuery users to
|
||||
// configure the transfer of their data from other Google Products into BigQuery.
|
||||
// This service contains methods that are end user exposed. It backs up the
|
||||
// frontend.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
conn: conn,
|
||||
CallOptions: defaultCallOptions(),
|
||||
|
||||
client: datatransferpb.NewDataTransferServiceClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// GetDataSource retrieves a supported data source and returns its settings,
|
||||
// which can be used for UI rendering.
|
||||
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
|
||||
var resp *datatransferpb.DataSource
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListDataSources lists supported data sources and returns their settings,
|
||||
// which can be used for UI rendering.
|
||||
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
|
||||
it := &DataSourceIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
|
||||
var resp *datatransferpb.ListDataSourcesResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.DataSources, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// CreateTransferConfig creates a new data transfer configuration.
|
||||
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
|
||||
var resp *datatransferpb.TransferConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateTransferConfig updates a data transfer configuration.
|
||||
// All fields must be set, even if they are not updated.
|
||||
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
|
||||
var resp *datatransferpb.TransferConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteTransferConfig deletes a data transfer configuration,
|
||||
// including any associated transfer runs and logs.
|
||||
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTransferConfig returns information about a data transfer config.
|
||||
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
|
||||
var resp *datatransferpb.TransferConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListTransferConfigs returns information about all data transfers in the project.
|
||||
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
|
||||
it := &TransferConfigIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
|
||||
var resp *datatransferpb.ListTransferConfigsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.TransferConfigs, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time].
|
||||
// For each date - or whatever granularity the data source supports - in the
|
||||
// range, one transfer run is created.
|
||||
// Note that runs are created per UTC time in the time range.
|
||||
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
|
||||
var resp *datatransferpb.ScheduleTransferRunsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetTransferRun returns information about the particular transfer run.
|
||||
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
|
||||
var resp *datatransferpb.TransferRun
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteTransferRun deletes the specified transfer run.
|
||||
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListTransferRuns returns information about running and completed jobs.
|
||||
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
|
||||
it := &TransferRunIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
|
||||
var resp *datatransferpb.ListTransferRunsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.TransferRuns, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// ListTransferLogs returns user facing log messages for the data transfer run.
|
||||
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
|
||||
it := &TransferMessageIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
|
||||
var resp *datatransferpb.ListTransferLogsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.TransferMessages, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// CheckValidCreds returns true if valid credentials exist for the given data source and
|
||||
// requesting user.
|
||||
// Some data sources doesn't support service account, so we need to talk to
|
||||
// them on behalf of the end user. This API just checks whether we have OAuth
|
||||
// token for the particular user, which is a pre-requisite before user can
|
||||
// create a transfer config.
|
||||
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
|
||||
var resp *datatransferpb.CheckValidCredsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DataSourceIterator manages a stream of *datatransferpb.DataSource.
|
||||
type DataSourceIterator struct {
|
||||
items []*datatransferpb.DataSource
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
|
||||
var item *datatransferpb.DataSource
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *DataSourceIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *DataSourceIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
|
||||
type TransferConfigIterator struct {
|
||||
items []*datatransferpb.TransferConfig
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
|
||||
var item *datatransferpb.TransferConfig
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TransferConfigIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TransferConfigIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
|
||||
type TransferMessageIterator struct {
|
||||
items []*datatransferpb.TransferMessage
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
|
||||
var item *datatransferpb.TransferMessage
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TransferMessageIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TransferMessageIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
|
||||
type TransferRunIterator struct {
|
||||
items []*datatransferpb.TransferRun
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
|
||||
var item *datatransferpb.TransferRun
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TransferRunIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TransferRunIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
288
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go
generated
vendored
288
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go
generated
vendored
|
@ -1,288 +0,0 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package datatransfer_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/bigquery/datatransfer/apiv1"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClient_GetDataSource() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetDataSourceRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetDataSource(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListDataSources() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListDataSourcesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListDataSources(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_CreateTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.CreateTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_UpdateTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.UpdateTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.DeleteTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_GetTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListTransferConfigs() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferConfigsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferConfigs(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ScheduleTransferRuns() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ScheduleTransferRunsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ScheduleTransferRuns(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_GetTransferRun() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetTransferRunRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetTransferRun(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteTransferRun() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.DeleteTransferRunRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteTransferRun(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ListTransferRuns() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferRunsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferRuns(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ListTransferLogs() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferLogsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferLogs(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_CheckValidCreds() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.CheckValidCredsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CheckValidCreds(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
47
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
47
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
|
@ -1,47 +0,0 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package datatransfer is an auto-generated package for the
|
||||
// BigQuery Data Transfer API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Transfers data from partner SaaS applications to Google BigQuery on a
|
||||
// scheduled, managed basis.
|
||||
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
}
|
||||
}
|
1146
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
1146
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
File diff suppressed because it is too large
Load diff
135
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go
generated
vendored
135
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go
generated
vendored
|
@ -1,135 +0,0 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datatransfer
|
||||
|
||||
// ProjectPath returns the path for the project resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s", project)
|
||||
// instead.
|
||||
func ProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationPath returns the path for the location resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s", project, location)
|
||||
// instead.
|
||||
func LocationPath(project, location string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationDataSourcePath returns the path for the location data source resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource)
|
||||
// instead.
|
||||
func LocationDataSourcePath(project, location, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationTransferConfigPath returns the path for the location transfer config resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig)
|
||||
// instead.
|
||||
func LocationTransferConfigPath(project, location, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationRunPath returns the path for the location run resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run)
|
||||
// instead.
|
||||
func LocationRunPath(project, location, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
||||
|
||||
// DataSourcePath returns the path for the data source resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource)
|
||||
// instead.
|
||||
func DataSourcePath(project, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// TransferConfigPath returns the path for the transfer config resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig)
|
||||
// instead.
|
||||
func TransferConfigPath(project, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// RunPath returns the path for the run resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run)
|
||||
// instead.
|
||||
func RunPath(project, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
303
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
303
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
|
@ -1,303 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package bigquery provides a client for the BigQuery service.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
The following assumes a basic familiarity with BigQuery concepts.
|
||||
See https://cloud.google.com/bigquery/docs.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, projectID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Querying
|
||||
|
||||
To query existing tables, create a Query and call its Read method:
|
||||
|
||||
q := client.Query(`
|
||||
SELECT year, SUM(number) as num
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Then iterate through the resulting rows. You can store a row using
|
||||
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
|
||||
A slice is simplest:
|
||||
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
|
||||
You can also use a struct whose exported fields match the query:
|
||||
|
||||
type Count struct {
|
||||
Year int
|
||||
Num int
|
||||
}
|
||||
for {
|
||||
var c Count
|
||||
err := it.Next(&c)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(c)
|
||||
}
|
||||
|
||||
You can also start the query running and get the results later.
|
||||
Create the query as above, but call Run instead of Read. This returns a Job,
|
||||
which represents an asychronous operation.
|
||||
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Get the job's ID, a printable string. You can save this string to retrieve
|
||||
the results at a later time, even in another process.
|
||||
|
||||
jobID := job.ID()
|
||||
fmt.Printf("The job ID is %s\n", jobID)
|
||||
|
||||
To retrieve the job's results from the ID, first look up the Job:
|
||||
|
||||
job, err = client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Use the Job.Read method to obtain an iterator, and loop over the rows.
|
||||
Query.Read is just a convenience method that combines Query.Run and Job.Read.
|
||||
|
||||
it, err = job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Proceed with iteration as above.
|
||||
|
||||
Datasets and Tables
|
||||
|
||||
You can refer to datasets in the client's project with the Dataset method, and
|
||||
in other projects with the DatasetInProject method:
|
||||
|
||||
myDataset := client.Dataset("my_dataset")
|
||||
yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
|
||||
|
||||
These methods create references to datasets, not the datasets themselves. You can have
|
||||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
|
||||
create a dataset from a reference:
|
||||
|
||||
if err := myDataset.Create(ctx, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
|
||||
to an object in BigQuery that may or may not exist.
|
||||
|
||||
table := myDataset.Table("my_table")
|
||||
|
||||
You can create, delete and update the metadata of tables with methods on Table.
|
||||
For instance, you could create a temporary table with:
|
||||
|
||||
err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
|
||||
ExpirationTime: time.Now().Add(1*time.Hour)})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
We'll see how to create a table with a schema in the next section.
|
||||
|
||||
Schemas
|
||||
|
||||
There are two ways to construct schemas with this package.
|
||||
You can build a schema by hand, like so:
|
||||
|
||||
schema1 := bigquery.Schema{
|
||||
{Name: "Name", Required: true, Type: bigquery.StringFieldType},
|
||||
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
|
||||
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType},
|
||||
}
|
||||
|
||||
Or you can infer the schema from a struct:
|
||||
|
||||
type student struct {
|
||||
Name string
|
||||
Grades []int
|
||||
Optional bigquery.NullInt64
|
||||
}
|
||||
schema2, err := bigquery.InferSchema(student{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema1 and schema2 are identical.
|
||||
|
||||
Struct inference supports tags like those of the encoding/json package, so you can
|
||||
change names, ignore fields, or mark a field as nullable (non-required). Fields
|
||||
declared as on of the Null types (NullInt64, NullFloat64, NullString, NullBool,
|
||||
NullTimestamp, NullDate, NullTime and NullDateTime) are automatically inferred as
|
||||
nullable, so the "nullable" tag is only needed for []byte and pointer-to-struct
|
||||
fields.
|
||||
|
||||
type student2 struct {
|
||||
Name string `bigquery:"full_name"`
|
||||
Grades []int
|
||||
Secret string `bigquery:"-"`
|
||||
Optional []byte `bigquery:",nullable"
|
||||
}
|
||||
schema3, err := bigquery.InferSchema(student2{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional".
|
||||
|
||||
Having constructed a schema, you can create a table with it like so:
|
||||
|
||||
if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Copying
|
||||
|
||||
You can copy one or more tables to another table. Begin by constructing a Copier
|
||||
describing the copy. Then set any desired copy options, and finally call Run to get a Job:
|
||||
|
||||
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
|
||||
copier.WriteDisposition = bigquery.WriteTruncate
|
||||
job, err = copier.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can chain the call to Run if you don't want to set options:
|
||||
|
||||
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can wait for your job to complete:
|
||||
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Job.Wait polls with exponential backoff. You can also poll yourself, if you
|
||||
wish:
|
||||
|
||||
for {
|
||||
status, err := job.Status(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Done() {
|
||||
if status.Err() != nil {
|
||||
log.Fatalf("Job failed with error %v", status.Err())
|
||||
}
|
||||
break
|
||||
}
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
|
||||
Loading and Uploading
|
||||
|
||||
There are two ways to populate a table with this package: load the data from a Google Cloud Storage
|
||||
object, or upload rows directly from your program.
|
||||
|
||||
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
|
||||
it as well, and call its Run method.
|
||||
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
loader := myDataset.Table("dest").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
job, err = loader.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
|
||||
Then create an Uploader, and call its Put method with a slice of values.
|
||||
|
||||
u := table.Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
|
||||
to specify the schema and insert ID by hand, or just supply the struct or struct pointer
|
||||
directly and the schema will be inferred:
|
||||
|
||||
type Item2 struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
// Item implements the ValueSaver interface.
|
||||
items2 := []*Item2{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items2); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Extracting
|
||||
|
||||
If you've been following so far, extracting data from a BigQuery table
|
||||
into a Google Cloud Storage object will feel familiar. First create an
|
||||
Extractor, then optionally configure it, and lastly call its Run method.
|
||||
|
||||
extractor := table.ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
job, err = extractor.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package bigquery // import "cloud.google.com/go/bigquery"
|
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
|
@ -1,82 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Error contains detailed information about a failed bigquery operation.
|
||||
type Error struct {
|
||||
// Mirrors bq.ErrorProto, but drops DebugInfo
|
||||
Location, Message, Reason string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func bqToError(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
return &Error{
|
||||
Location: ep.Location,
|
||||
Message: ep.Message,
|
||||
Reason: ep.Reason,
|
||||
}
|
||||
}
|
||||
|
||||
// A MultiError contains multiple related errors.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
switch len(m) {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return m[0].Error()
|
||||
case 2:
|
||||
return m[0].Error() + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
|
||||
}
|
||||
|
||||
// RowInsertionError contains all errors that occurred when attempting to insert a row.
|
||||
type RowInsertionError struct {
|
||||
InsertID string // The InsertID associated with the affected row.
|
||||
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted.
|
||||
Errors MultiError
|
||||
}
|
||||
|
||||
func (e *RowInsertionError) Error() string {
|
||||
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
|
||||
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
|
||||
}
|
||||
|
||||
// PutMultiError contains an error for each row which was not successfully inserted
|
||||
// into a BigQuery table.
|
||||
type PutMultiError []RowInsertionError
|
||||
|
||||
func (pme PutMultiError) Error() string {
|
||||
plural := "s"
|
||||
if len(pme) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
|
||||
}
|
110
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
110
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
|
@ -1,110 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func rowInsertionError(msg string) RowInsertionError {
|
||||
return RowInsertionError{Errors: []error{errors.New(msg)}}
|
||||
}
|
||||
|
||||
func TestPutMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs PutMultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: PutMultiError{},
|
||||
want: "0 row insertions failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a")},
|
||||
want: "1 row insertion failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")},
|
||||
want: "2 row insertions failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs MultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: MultiError{},
|
||||
want: "(0 errors)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a")},
|
||||
want: "a",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b")},
|
||||
want: "a (and 1 other error)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")},
|
||||
want: "a (and 2 other errors)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorFromErrorProto(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *bq.ErrorProto
|
||||
want *Error
|
||||
}{
|
||||
{nil, nil},
|
||||
{
|
||||
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"},
|
||||
want: &Error{Location: "L", Message: "M", Reason: "R"},
|
||||
},
|
||||
} {
|
||||
if got := bqToError(test.in); !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorString(t *testing.T) {
|
||||
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"}
|
||||
got := e.Error()
|
||||
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") {
|
||||
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got)
|
||||
}
|
||||
}
|
829
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
829
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
|
@ -1,829 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = client // TODO: Use client.
|
||||
}
|
||||
|
||||
func ExampleClient_Dataset() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.DatasetInProject("their-project-id", "their-dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_Datasets() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetsInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.DatasetsInProject(ctx, "their-project-id")
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func getJobID() string { return "" }
|
||||
|
||||
func ExampleClient_JobFromID() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere.
|
||||
job, err := client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(job.LastStatus()) // Display the job's status.
|
||||
}
|
||||
|
||||
func ExampleClient_Jobs() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Jobs(ctx)
|
||||
it.State = bigquery.Running // list only running jobs.
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleNewGCSReference() {
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
fmt.Println(gcsRef)
|
||||
}
|
||||
|
||||
func ExampleClient_Query() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
q.DefaultProjectID = "project-id"
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleClient_Query_parameters() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select num from t1 where name = @user")
|
||||
q.Parameters = []bigquery.QueryParameter{
|
||||
{Name: "user", Value: "Elizabeth"},
|
||||
}
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
// This example demonstrates how to run a query job on a table
|
||||
// with a customer-managed encryption key. The same
|
||||
// applies to load and copy jobs as well.
|
||||
func ExampleClient_Query_encryptionKey() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
// TODO: Replace this key with a key you have created in Cloud KMS.
|
||||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
|
||||
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName}
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleQuery_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var row []bigquery.Value
|
||||
err := it.Next(&row)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(row)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var s score
|
||||
err := it.Next(&s)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(s)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleJob_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
// Call Query.Run to get a Job, then call Read on the job.
|
||||
// Note: Query.Read is a shorthand for this.
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleJob_Wait() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleJob_Config() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
jc, err := job.Config()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
copyConfig := jc.(*bigquery.CopyConfig)
|
||||
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition)
|
||||
}
|
||||
|
||||
func ExampleDataset_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
// This example illustrates how to perform a read-modify-write sequence on dataset
|
||||
// metadata. Passing the metadata's ETag to the Update call ensures that the call
|
||||
// will fail if the metadata was changed since the read.
|
||||
func ExampleDataset_Update_readModifyWrite() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
md, err := ds.Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md2, err := ds.Update(ctx,
|
||||
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name},
|
||||
md.ETag)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md2)
|
||||
}
|
||||
|
||||
// To perform a blind write, ignoring the existing state (and possibly overwriting
|
||||
// other updates), pass the empty string as the etag.
|
||||
func ExampleDataset_Update_blindWrite() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleDataset_Table() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Table creates a reference to the table. It does not create the actual
|
||||
// table in BigQuery; to do so, use Table.Create.
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
fmt.Println(t)
|
||||
}
|
||||
|
||||
func ExampleDataset_Tables() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleDatasetIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
for {
|
||||
ds, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleInferSchema() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// Count INTEGER
|
||||
}
|
||||
|
||||
func ExampleInferSchema_tags() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int `bigquery:"number"`
|
||||
Secret []byte `bigquery:"-"`
|
||||
Optional bigquery.NullBool
|
||||
OptBytes []byte `bigquery:",nullable"`
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type, fs.Required)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING true
|
||||
// Size FLOAT true
|
||||
// number INTEGER true
|
||||
// Optional BOOLEAN false
|
||||
// OptBytes BYTES false
|
||||
}
|
||||
|
||||
func ExampleTable_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize a new table by passing TableMetadata to Table.Create.
|
||||
func ExampleTable_Create_initialize() {
|
||||
ctx := context.Background()
|
||||
// Infer table schema from a Go type.
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx,
|
||||
&bigquery.TableMetadata{
|
||||
Name: "My New Table",
|
||||
Schema: schema,
|
||||
ExpirationTime: time.Now().Add(24 * time.Hour),
|
||||
}); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
// This example demonstrates how to create a table with
|
||||
// a customer-managed encryption key.
|
||||
func ExampleTable_Create_encryptionKey() {
|
||||
ctx := context.Background()
|
||||
// Infer table schema from a Go type.
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
|
||||
// TODO: Replace this key with a key you have created in Cloud KMS.
|
||||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
|
||||
if err := t.Create(ctx,
|
||||
&bigquery.TableMetadata{
|
||||
Name: "My New Table",
|
||||
Schema: schema,
|
||||
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName},
|
||||
}); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader_options() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
u.IgnoreUnknownValues = true
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_CopierFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2"))
|
||||
c.WriteDisposition = bigquery.WriteTruncate
|
||||
// TODO: set other options on the Copier.
|
||||
job, err := c.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_ExtractorTo() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.FieldDelimiter = ":"
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
extractor := ds.Table("my_table").ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
// TODO: set other options on the Extractor.
|
||||
job, err := extractor.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
gcsRef.MaxBadRecords = 5
|
||||
gcsRef.Schema = schema
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom_reader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
f, err := os.Open("data.csv")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
rs := bigquery.NewReaderSource(f)
|
||||
rs.AllowJaggedRows = true
|
||||
rs.MaxBadRecords = 5
|
||||
rs.Schema = schema
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(rs)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Table("my_table").Read(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
// This example illustrates how to perform a read-modify-write sequence on table
|
||||
// metadata. Passing the metadata's ETag to the Update call ensures that the call
|
||||
// will fail if the metadata was changed since the read.
|
||||
func ExampleTable_Update_readModifyWrite() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
md, err := t.Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md2, err := t.Update(ctx,
|
||||
bigquery.TableMetadataToUpdate{Name: "new " + md.Name},
|
||||
md.ETag)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md2)
|
||||
}
|
||||
|
||||
// To perform a blind write, ignoring the existing state (and possibly overwriting
|
||||
// other updates), pass the empty string as the etag.
|
||||
func ExampleTable_Update_blindWrite() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
|
||||
Description: "my favorite table",
|
||||
}, "")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(tm)
|
||||
}
|
||||
|
||||
func ExampleTableIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
for {
|
||||
t, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(t)
|
||||
}
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
|
||||
// Save implements the ValueSaver interface.
|
||||
func (i *Item) Save() (map[string]bigquery.Value, string, error) {
|
||||
return map[string]bigquery.Value{
|
||||
"Name": i.Name,
|
||||
"Size": i.Size,
|
||||
"Count": i.Count,
|
||||
}, "", nil
|
||||
}
|
||||
|
||||
func ExampleUploader_Put() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
var schema bigquery.Schema
|
||||
|
||||
func ExampleUploader_Put_structSaver() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
// Assume schema holds the table's schema.
|
||||
savers := []*bigquery.StructSaver{
|
||||
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
|
||||
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
|
||||
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
|
||||
}
|
||||
if err := u.Put(ctx, savers); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleUploader_Put_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
scores := []score{
|
||||
{Name: "n1", Num: 12},
|
||||
{Name: "n2", Num: 31},
|
||||
{Name: "n3", Num: 7},
|
||||
}
|
||||
// Schema is inferred from the score type.
|
||||
if err := u.Put(ctx, scores); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleUploader_Put_valuesSaver() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
var vss []*bigquery.ValuesSaver
|
||||
for i, name := range []string{"n1", "n2", "n3"} {
|
||||
// Assume schema holds the table's schema.
|
||||
vss = append(vss, &bigquery.ValuesSaver{
|
||||
Schema: schema,
|
||||
InsertID: name,
|
||||
Row: []bigquery.Value{name, int64(i)},
|
||||
})
|
||||
}
|
||||
|
||||
if err := u.Put(ctx, vss); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
398
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
398
vendor/cloud.google.com/go/bigquery/external.go
generated
vendored
|
@ -1,398 +0,0 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"unicode/utf8"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
GoogleSheets DataFormat = "GOOGLE_SHEETS"
|
||||
Bigtable DataFormat = "BIGTABLE"
|
||||
)
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
|
||||
// *ExternalDataConfig.
|
||||
// GCSReference also implements it, for backwards compatibility.
|
||||
type ExternalData interface {
|
||||
toBQ() bq.ExternalDataConfiguration
|
||||
}
|
||||
|
||||
// ExternalDataConfig describes data external to BigQuery that can be used
|
||||
// in queries and to create external tables.
|
||||
type ExternalDataConfig struct {
|
||||
// The format of the data. Required.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// The fully-qualified URIs that point to your
|
||||
// data in Google Cloud. Required.
|
||||
//
|
||||
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
|
||||
// and it must come after the 'bucket' name. Size limits related to load jobs
|
||||
// apply to external data sources.
|
||||
//
|
||||
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
|
||||
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
|
||||
//
|
||||
// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
|
||||
// the '*' wildcard character is not allowed.
|
||||
SourceURIs []string
|
||||
|
||||
// The schema of the data. Required for CSV and JSON; disallowed for the
|
||||
// other formats.
|
||||
Schema Schema
|
||||
|
||||
// Try to detect schema and format options automatically.
|
||||
// Any option specified explicitly will be honored.
|
||||
AutoDetect bool
|
||||
|
||||
// The compression type of the data.
|
||||
Compression Compression
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be
|
||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values
|
||||
// at the end of a line. For JSON this ignores named values that do not
|
||||
// match any column name. If this field is not set, records containing
|
||||
// unknown values are treated as bad records. The MaxBadRecords field can
|
||||
// be used to customize how bad records are handled.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
||||
// Additional options for CSV, GoogleSheets and Bigtable formats.
|
||||
Options ExternalDataConfigOptions
|
||||
}
|
||||
|
||||
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
|
||||
q := bq.ExternalDataConfiguration{
|
||||
SourceFormat: string(e.SourceFormat),
|
||||
SourceUris: e.SourceURIs,
|
||||
Autodetect: e.AutoDetect,
|
||||
Compression: string(e.Compression),
|
||||
IgnoreUnknownValues: e.IgnoreUnknownValues,
|
||||
MaxBadRecords: e.MaxBadRecords,
|
||||
}
|
||||
if e.Schema != nil {
|
||||
q.Schema = e.Schema.toBQ()
|
||||
}
|
||||
if e.Options != nil {
|
||||
e.Options.populateExternalDataConfig(&q)
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
|
||||
e := &ExternalDataConfig{
|
||||
SourceFormat: DataFormat(q.SourceFormat),
|
||||
SourceURIs: q.SourceUris,
|
||||
AutoDetect: q.Autodetect,
|
||||
Compression: Compression(q.Compression),
|
||||
IgnoreUnknownValues: q.IgnoreUnknownValues,
|
||||
MaxBadRecords: q.MaxBadRecords,
|
||||
Schema: bqToSchema(q.Schema),
|
||||
}
|
||||
switch {
|
||||
case q.CsvOptions != nil:
|
||||
e.Options = bqToCSVOptions(q.CsvOptions)
|
||||
case q.GoogleSheetsOptions != nil:
|
||||
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
|
||||
case q.BigtableOptions != nil:
|
||||
var err error
|
||||
e.Options, err = bqToBigtableOptions(q.BigtableOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// ExternalDataConfigOptions are additional options for external data configurations.
|
||||
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
|
||||
type ExternalDataConfigOptions interface {
|
||||
populateExternalDataConfig(*bq.ExternalDataConfiguration)
|
||||
}
|
||||
|
||||
// CSVOptions are additional options for CSV external data sources.
|
||||
type CSVOptions struct {
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
}
|
||||
|
||||
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
c.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: o.AllowJaggedRows,
|
||||
AllowQuotedNewlines: o.AllowQuotedNewlines,
|
||||
Encoding: string(o.Encoding),
|
||||
FieldDelimiter: o.FieldDelimiter,
|
||||
Quote: o.quote(),
|
||||
SkipLeadingRows: o.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (o *CSVOptions) quote() *string {
|
||||
if o.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if o.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &o.Quote
|
||||
}
|
||||
|
||||
func (o *CSVOptions) setQuote(ps *string) {
|
||||
if ps != nil {
|
||||
o.Quote = *ps
|
||||
if o.Quote == "" {
|
||||
o.ForceZeroQuote = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
|
||||
o := &CSVOptions{
|
||||
AllowJaggedRows: q.AllowJaggedRows,
|
||||
AllowQuotedNewlines: q.AllowQuotedNewlines,
|
||||
Encoding: Encoding(q.Encoding),
|
||||
FieldDelimiter: q.FieldDelimiter,
|
||||
SkipLeadingRows: q.SkipLeadingRows,
|
||||
}
|
||||
o.setQuote(q.Quote)
|
||||
return o
|
||||
}
|
||||
|
||||
// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
|
||||
type GoogleSheetsOptions struct {
|
||||
// The number of rows at the top of a sheet that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
}
|
||||
|
||||
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
|
||||
SkipLeadingRows: o.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
|
||||
return &GoogleSheetsOptions{
|
||||
SkipLeadingRows: q.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
// BigtableOptions are additional options for Bigtable external data sources.
|
||||
type BigtableOptions struct {
|
||||
// A list of column families to expose in the table schema along with their
|
||||
// types. If omitted, all column families are present in the table schema and
|
||||
// their values are read as BYTES.
|
||||
ColumnFamilies []*BigtableColumnFamily
|
||||
|
||||
// If true, then the column families that are not specified in columnFamilies
|
||||
// list are not exposed in the table schema. Otherwise, they are read with BYTES
|
||||
// type values. The default is false.
|
||||
IgnoreUnspecifiedColumnFamilies bool
|
||||
|
||||
// If true, then the rowkey column families will be read and converted to string.
|
||||
// Otherwise they are read with BYTES type values and users need to manually cast
|
||||
// them with CAST if necessary. The default is false.
|
||||
ReadRowkeyAsString bool
|
||||
}
|
||||
|
||||
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
q := &bq.BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
|
||||
ReadRowkeyAsString: o.ReadRowkeyAsString,
|
||||
}
|
||||
for _, f := range o.ColumnFamilies {
|
||||
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
|
||||
}
|
||||
c.BigtableOptions = q
|
||||
}
|
||||
|
||||
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
|
||||
b := &BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
|
||||
ReadRowkeyAsString: q.ReadRowkeyAsString,
|
||||
}
|
||||
for _, f := range q.ColumnFamilies {
|
||||
f2, err := bqToBigtableColumnFamily(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.ColumnFamilies = append(b.ColumnFamilies, f2)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
|
||||
type BigtableColumnFamily struct {
|
||||
// Identifier of the column family.
|
||||
FamilyID string
|
||||
|
||||
// Lists of columns that should be exposed as individual fields as opposed to a
|
||||
// list of (column name, value) pairs. All columns whose qualifier matches a
|
||||
// qualifier in this list can be accessed as .. Other columns can be accessed as
|
||||
// a list through .Column field.
|
||||
Columns []*BigtableColumn
|
||||
|
||||
// The encoding of the values when the type is not STRING. Acceptable encoding values are:
|
||||
// - TEXT - indicates values are alphanumeric text strings.
|
||||
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
|
||||
// This can be overridden for a specific column by listing that column in 'columns' and
|
||||
// specifying an encoding for it.
|
||||
Encoding string
|
||||
|
||||
// If true, only the latest version of values are exposed for all columns in this
|
||||
// column family. This can be overridden for a specific column by listing that
|
||||
// column in 'columns' and specifying a different setting for that column.
|
||||
OnlyReadLatest bool
|
||||
|
||||
// The type to convert the value in cells of this
|
||||
// column family. The values are expected to be encoded using HBase
|
||||
// Bytes.toBytes function when using the BINARY encoding value.
|
||||
// Following BigQuery types are allowed (case-sensitive):
|
||||
// BYTES STRING INTEGER FLOAT BOOLEAN.
|
||||
// The default type is BYTES. This can be overridden for a specific column by
|
||||
// listing that column in 'columns' and specifying a type for it.
|
||||
Type string
|
||||
}
|
||||
|
||||
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
|
||||
q := &bq.BigtableColumnFamily{
|
||||
FamilyId: b.FamilyID,
|
||||
Encoding: b.Encoding,
|
||||
OnlyReadLatest: b.OnlyReadLatest,
|
||||
Type: b.Type,
|
||||
}
|
||||
for _, col := range b.Columns {
|
||||
q.Columns = append(q.Columns, col.toBQ())
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
|
||||
b := &BigtableColumnFamily{
|
||||
FamilyID: q.FamilyId,
|
||||
Encoding: q.Encoding,
|
||||
OnlyReadLatest: q.OnlyReadLatest,
|
||||
Type: q.Type,
|
||||
}
|
||||
for _, col := range q.Columns {
|
||||
c, err := bqToBigtableColumn(col)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Columns = append(b.Columns, c)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BigtableColumn describes how BigQuery should access a Bigtable column.
|
||||
type BigtableColumn struct {
|
||||
// Qualifier of the column. Columns in the parent column family that have this
|
||||
// exact qualifier are exposed as . field. The column field name is the
|
||||
// same as the column qualifier.
|
||||
Qualifier string
|
||||
|
||||
// If the qualifier is not a valid BigQuery field identifier i.e. does not match
|
||||
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
|
||||
// name and is used as field name in queries.
|
||||
FieldName string
|
||||
|
||||
// If true, only the latest version of values are exposed for this column.
|
||||
// See BigtableColumnFamily.OnlyReadLatest.
|
||||
OnlyReadLatest bool
|
||||
|
||||
// The encoding of the values when the type is not STRING.
|
||||
// See BigtableColumnFamily.Encoding
|
||||
Encoding string
|
||||
|
||||
// The type to convert the value in cells of this column.
|
||||
// See BigtableColumnFamily.Type
|
||||
Type string
|
||||
}
|
||||
|
||||
func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
|
||||
q := &bq.BigtableColumn{
|
||||
FieldName: b.FieldName,
|
||||
OnlyReadLatest: b.OnlyReadLatest,
|
||||
Encoding: b.Encoding,
|
||||
Type: b.Type,
|
||||
}
|
||||
if utf8.ValidString(b.Qualifier) {
|
||||
q.QualifierString = b.Qualifier
|
||||
} else {
|
||||
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
|
||||
b := &BigtableColumn{
|
||||
FieldName: q.FieldName,
|
||||
OnlyReadLatest: q.OnlyReadLatest,
|
||||
Encoding: q.Encoding,
|
||||
Type: q.Type,
|
||||
}
|
||||
if q.QualifierString != "" {
|
||||
b.Qualifier = q.QualifierString
|
||||
} else {
|
||||
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Qualifier = string(bytes)
|
||||
}
|
||||
return b, nil
|
||||
}
|
143
vendor/cloud.google.com/go/bigquery/external_test.go
generated
vendored
143
vendor/cloud.google.com/go/bigquery/external_test.go
generated
vendored
|
@ -1,143 +0,0 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
)
|
||||
|
||||
func TestExternalDataConfig(t *testing.T) {
|
||||
// Round-trip of ExternalDataConfig to underlying representation.
|
||||
for i, want := range []*ExternalDataConfig{
|
||||
{
|
||||
SourceFormat: CSV,
|
||||
SourceURIs: []string{"uri"},
|
||||
Schema: Schema{{Name: "n", Type: IntegerFieldType}},
|
||||
AutoDetect: true,
|
||||
Compression: Gzip,
|
||||
IgnoreUnknownValues: true,
|
||||
MaxBadRecords: 17,
|
||||
Options: &CSVOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
FieldDelimiter: "f",
|
||||
Quote: "q",
|
||||
SkipLeadingRows: 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
SourceFormat: GoogleSheets,
|
||||
Options: &GoogleSheetsOptions{SkipLeadingRows: 4},
|
||||
},
|
||||
{
|
||||
SourceFormat: Bigtable,
|
||||
Options: &BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: true,
|
||||
ReadRowkeyAsString: true,
|
||||
ColumnFamilies: []*BigtableColumnFamily{
|
||||
{
|
||||
FamilyID: "f1",
|
||||
Encoding: "TEXT",
|
||||
OnlyReadLatest: true,
|
||||
Type: "FLOAT",
|
||||
Columns: []*BigtableColumn{
|
||||
{
|
||||
Qualifier: "valid-utf-8",
|
||||
FieldName: "fn",
|
||||
OnlyReadLatest: true,
|
||||
Encoding: "BINARY",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
q := want.toBQ()
|
||||
got, err := bqToExternalDataConfig(&q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("#%d: got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
o := CSVOptions{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := o.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQualifier(t *testing.T) {
|
||||
b := BigtableColumn{Qualifier: "a"}
|
||||
q := b.toBQ()
|
||||
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" {
|
||||
t.Errorf("got (%q, %q), want (%q, %q)",
|
||||
q.QualifierString, q.QualifierEncoded, b.Qualifier, "")
|
||||
}
|
||||
b2, err := bqToBigtableColumn(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := b2.Qualifier, b.Qualifier; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
|
||||
const (
|
||||
invalidUTF8 = "\xDF\xFF"
|
||||
invalidEncoded = "3/8"
|
||||
)
|
||||
b = BigtableColumn{Qualifier: invalidUTF8}
|
||||
q = b.toBQ()
|
||||
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded {
|
||||
t.Errorf("got (%q, %q), want (%q, %q)",
|
||||
q.QualifierString, "", b.Qualifier, invalidEncoded)
|
||||
}
|
||||
b2, err = bqToBigtableColumn(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := b2.Qualifier, b.Qualifier; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
105
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
105
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
|
@ -1,105 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// ExtractConfig holds the configuration for an extract job.
|
||||
type ExtractConfig struct {
|
||||
// Src is the table from which data will be extracted.
|
||||
Src *Table
|
||||
|
||||
// Dst is the destination into which the data will be extracted.
|
||||
Dst *GCSReference
|
||||
|
||||
// DisableHeader disables the printing of a header row in exported data.
|
||||
DisableHeader bool
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
|
||||
var printHeader *bool
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
printHeader = &f
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: e.Labels,
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
DestinationUris: append([]string{}, e.Dst.URIs...),
|
||||
Compression: string(e.Dst.Compression),
|
||||
DestinationFormat: string(e.Dst.DestinationFormat),
|
||||
FieldDelimiter: e.Dst.FieldDelimiter,
|
||||
SourceTable: e.Src.toBQ(),
|
||||
PrintHeader: printHeader,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
|
||||
qe := q.Extract
|
||||
return &ExtractConfig{
|
||||
Labels: q.Labels,
|
||||
Dst: &GCSReference{
|
||||
URIs: qe.DestinationUris,
|
||||
Compression: Compression(qe.Compression),
|
||||
DestinationFormat: DataFormat(qe.DestinationFormat),
|
||||
FileConfig: FileConfig{
|
||||
CSVOptions: CSVOptions{
|
||||
FieldDelimiter: qe.FieldDelimiter,
|
||||
},
|
||||
},
|
||||
},
|
||||
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
|
||||
Src: bqToTable(qe.SourceTable, c),
|
||||
}
|
||||
}
|
||||
|
||||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
|
||||
type Extractor struct {
|
||||
JobIDConfig
|
||||
ExtractConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// ExtractorTo returns an Extractor which can be used to extract data from a
|
||||
// BigQuery table into Google Cloud Storage.
|
||||
// The returned Extractor may optionally be further configured before its Run method is called.
|
||||
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
|
||||
return &Extractor{
|
||||
c: t.c,
|
||||
ExtractConfig: ExtractConfig{
|
||||
Src: t,
|
||||
Dst: dst,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates an extract job.
|
||||
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
||||
return e.c.insertJob(ctx, e.newJob(), nil)
|
||||
}
|
||||
|
||||
func (e *Extractor) newJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: e.JobIDConfig.createJobRef(e.c),
|
||||
Configuration: e.ExtractConfig.toBQ(),
|
||||
}
|
||||
}
|
118
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
118
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
|
@ -1,118 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultExtractJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
SourceTable: &bq.TableReference{
|
||||
ProjectId: "client-project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
DestinationUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultGCS() *GCSReference {
|
||||
return &GCSReference{
|
||||
URIs: []string{"uri"},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtract(t *testing.T) {
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
dst *GCSReference
|
||||
src *Table
|
||||
config ExtractConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: defaultExtractJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{
|
||||
DisableHeader: true,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
f := false
|
||||
j.Configuration.Extract.PrintHeader = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Compression = Gzip
|
||||
g.DestinationFormat = JSON
|
||||
g.FieldDelimiter = "\t"
|
||||
return g
|
||||
}(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Extract.Compression = "GZIP"
|
||||
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Extract.FieldDelimiter = "\t"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
ext := tc.src.ExtractorTo(tc.dst)
|
||||
tc.config.Src = ext.Src
|
||||
tc.config.Dst = ext.Dst
|
||||
ext.ExtractConfig = tc.config
|
||||
got := ext.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc, &ext.ExtractConfig,
|
||||
cmp.AllowUnexported(Table{}, Client{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
135
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
135
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
|
@ -1,135 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A ReaderSource is a source for a load operation that gets
|
||||
// data from an io.Reader.
|
||||
//
|
||||
// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
|
||||
// its internal io.Reader will be nil, so it cannot be used for a
|
||||
// subsequent load operation.
|
||||
type ReaderSource struct {
|
||||
r io.Reader
|
||||
FileConfig
|
||||
}
|
||||
|
||||
// NewReaderSource creates a ReaderSource from an io.Reader. You may
|
||||
// optionally configure properties on the ReaderSource that describe the
|
||||
// data being read, before passing it to Table.LoaderFrom.
|
||||
func NewReaderSource(r io.Reader) *ReaderSource {
|
||||
return &ReaderSource{r: r}
|
||||
}
|
||||
|
||||
func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
|
||||
r.FileConfig.populateLoadConfig(lc)
|
||||
return r.r
|
||||
}
|
||||
|
||||
// FileConfig contains configuration options that pertain to files, typically
|
||||
// text files that require interpretation to be used as a BigQuery table. A
|
||||
// file may live in Google Cloud Storage (see GCSReference), or it may be
|
||||
// loaded into a table via the Table.LoaderFromReader.
|
||||
type FileConfig struct {
|
||||
// SourceFormat is the format of the GCS data to be read.
|
||||
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// Indicates if we should automatically infer the options and
|
||||
// schema for CSV and JSON sources.
|
||||
AutoDetect bool
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be
|
||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values
|
||||
// at the end of a line. For JSON this ignores named values that do not
|
||||
// match any column name. If this field is not set, records containing
|
||||
// unknown values are treated as bad records. The MaxBadRecords field can
|
||||
// be used to customize how bad records are handled.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// Schema describes the data. It is required when reading CSV or JSON data,
|
||||
// unless the data is being loaded into a table that already exists.
|
||||
Schema Schema
|
||||
|
||||
// Additional options for CSV files.
|
||||
CSVOptions
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
||||
conf.SkipLeadingRows = fc.SkipLeadingRows
|
||||
conf.SourceFormat = string(fc.SourceFormat)
|
||||
conf.Autodetect = fc.AutoDetect
|
||||
conf.AllowJaggedRows = fc.AllowJaggedRows
|
||||
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
|
||||
conf.Encoding = string(fc.Encoding)
|
||||
conf.FieldDelimiter = fc.FieldDelimiter
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.toBQ()
|
||||
}
|
||||
conf.Quote = fc.quote()
|
||||
}
|
||||
|
||||
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
|
||||
fc.SourceFormat = DataFormat(conf.SourceFormat)
|
||||
fc.AutoDetect = conf.Autodetect
|
||||
fc.MaxBadRecords = conf.MaxBadRecords
|
||||
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
|
||||
fc.Schema = bqToSchema(conf.Schema)
|
||||
fc.SkipLeadingRows = conf.SkipLeadingRows
|
||||
fc.AllowJaggedRows = conf.AllowJaggedRows
|
||||
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
|
||||
fc.Encoding = Encoding(conf.Encoding)
|
||||
fc.FieldDelimiter = conf.FieldDelimiter
|
||||
fc.CSVOptions.setQuote(conf.Quote)
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
|
||||
format := fc.SourceFormat
|
||||
if format == "" {
|
||||
// Format must be explicitly set for external data sources.
|
||||
format = CSV
|
||||
}
|
||||
conf.Autodetect = fc.AutoDetect
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
conf.SourceFormat = string(format)
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.toBQ()
|
||||
}
|
||||
if format == CSV {
|
||||
fc.CSVOptions.populateExternalDataConfig(conf)
|
||||
}
|
||||
}
|
||||
|
||||
// Encoding specifies the character encoding of data to be loaded into BigQuery.
|
||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
|
||||
// for more details about how this is used.
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
UTF_8 Encoding = "UTF-8"
|
||||
ISO_8859_1 Encoding = "ISO-8859-1"
|
||||
)
|
98
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
98
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
|
@ -1,98 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
hyphen = "-"
|
||||
fc = FileConfig{
|
||||
SourceFormat: CSV,
|
||||
AutoDetect: true,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
},
|
||||
CSVOptions: CSVOptions{
|
||||
Quote: hyphen,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestFileConfigPopulateLoadConfig(t *testing.T) {
|
||||
want := &bq.JobConfigurationLoad{
|
||||
SourceFormat: "CSV",
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Autodetect: true,
|
||||
Encoding: "UTF-8",
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}},
|
||||
Quote: &hyphen,
|
||||
}
|
||||
got := &bq.JobConfigurationLoad{}
|
||||
fc.populateLoadConfig(got)
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileConfigPopulateExternalDataConfig(t *testing.T) {
|
||||
got := &bq.ExternalDataConfiguration{}
|
||||
fc.populateExternalDataConfig(got)
|
||||
|
||||
want := &bq.ExternalDataConfiguration{
|
||||
SourceFormat: "CSV",
|
||||
Autodetect: true,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}},
|
||||
CsvOptions: &bq.CsvOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
FieldDelimiter: "\t",
|
||||
Quote: &hyphen,
|
||||
SkipLeadingRows: 8,
|
||||
},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
73
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
73
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
|
@ -1,73 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
|
||||
// an input or output to a BigQuery operation.
|
||||
type GCSReference struct {
|
||||
// URIs refer to Google Cloud Storage objects.
|
||||
URIs []string
|
||||
|
||||
FileConfig
|
||||
|
||||
// DestinationFormat is the format to use when writing exported files.
|
||||
// Allowed values are: CSV, Avro, JSON. The default is CSV.
|
||||
// CSV is not supported for tables with nested or repeated fields.
|
||||
DestinationFormat DataFormat
|
||||
|
||||
// Compression specifies the type of compression to apply when writing data
|
||||
// to Google Cloud Storage, or using this GCSReference as an ExternalData
|
||||
// source with CSV or JSON SourceFormat. Default is None.
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
|
||||
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
|
||||
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
|
||||
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
|
||||
// For more information about the treatment of wildcards and multiple URIs,
|
||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
|
||||
func NewGCSReference(uri ...string) *GCSReference {
|
||||
return &GCSReference{URIs: uri}
|
||||
}
|
||||
|
||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
None Compression = "NONE"
|
||||
Gzip Compression = "GZIP"
|
||||
)
|
||||
|
||||
func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
|
||||
lc.SourceUris = gcs.URIs
|
||||
gcs.FileConfig.populateLoadConfig(lc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
|
||||
conf := bq.ExternalDataConfiguration{
|
||||
Compression: string(gcs.Compression),
|
||||
SourceUris: append([]string{}, gcs.URIs...),
|
||||
}
|
||||
gcs.FileConfig.populateExternalDataConfig(&conf)
|
||||
return conf
|
||||
}
|
1858
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
1858
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
File diff suppressed because it is too large
Load diff
215
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
215
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
|
@ -1,215 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
|
||||
it := &RowIterator{
|
||||
ctx: ctx,
|
||||
table: t,
|
||||
pf: pf,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.rows) },
|
||||
func() interface{} { r := it.rows; it.rows = nil; return r })
|
||||
return it
|
||||
}
|
||||
|
||||
// A RowIterator provides access to the result of a BigQuery lookup.
|
||||
type RowIterator struct {
|
||||
ctx context.Context
|
||||
table *Table
|
||||
pf pageFetcher
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// StartIndex can be set before the first call to Next. If PageInfo().Token
|
||||
// is also set, StartIndex is ignored.
|
||||
StartIndex uint64
|
||||
|
||||
// The schema of the table. Available after the first call to Next.
|
||||
Schema Schema
|
||||
|
||||
// The total number of rows in the result. Available after the first call to Next.
|
||||
// May be zero just after rows were inserted.
|
||||
TotalRows uint64
|
||||
|
||||
rows [][]Value
|
||||
structLoader structLoader // used to populate a pointer to a struct
|
||||
}
|
||||
|
||||
// Next loads the next row into dst. Its return value is iterator.Done if there
|
||||
// are no more results. Once Next returns iterator.Done, all subsequent calls
|
||||
// will return iterator.Done.
|
||||
//
|
||||
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
|
||||
//
|
||||
// If dst is a *[]Value, it will be set to to new []Value whose i'th element
|
||||
// will be populated with the i'th column of the row.
|
||||
//
|
||||
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
|
||||
// for each schema column name, the map key of that name will be set to the column's
|
||||
// value. STRUCT types (RECORD types or nested schemas) become nested maps.
|
||||
//
|
||||
// If dst is pointer to a struct, each column in the schema will be matched
|
||||
// with an exported field of the struct that has the same name, ignoring case.
|
||||
// Unmatched schema columns and struct fields will be ignored.
|
||||
//
|
||||
// Each BigQuery column type corresponds to one or more Go types; a matching struct
|
||||
// field must be of the correct type. The correspondences are:
|
||||
//
|
||||
// STRING string
|
||||
// BOOL bool
|
||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
|
||||
// FLOAT float32, float64
|
||||
// BYTES []byte
|
||||
// TIMESTAMP time.Time
|
||||
// DATE civil.Date
|
||||
// TIME civil.Time
|
||||
// DATETIME civil.DateTime
|
||||
//
|
||||
// A repeated field corresponds to a slice or array of the element type. A STRUCT
|
||||
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
|
||||
// All calls to Next on the same iterator must use the same struct type.
|
||||
//
|
||||
// It is an error to attempt to read a BigQuery NULL value into a struct field,
|
||||
// unless the field is of type []byte or is one of the special Null types: NullInt64,
|
||||
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or
|
||||
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
|
||||
// table with NULLs.
|
||||
func (it *RowIterator) Next(dst interface{}) error {
|
||||
var vl ValueLoader
|
||||
switch dst := dst.(type) {
|
||||
case ValueLoader:
|
||||
vl = dst
|
||||
case *[]Value:
|
||||
vl = (*valueList)(dst)
|
||||
case *map[string]Value:
|
||||
vl = (*valueMap)(dst)
|
||||
default:
|
||||
if !isStructPtr(dst) {
|
||||
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
|
||||
}
|
||||
}
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return err
|
||||
}
|
||||
row := it.rows[0]
|
||||
it.rows = it.rows[1:]
|
||||
|
||||
if vl == nil {
|
||||
// This can only happen if dst is a pointer to a struct. We couldn't
|
||||
// set vl above because we need the schema.
|
||||
if err := it.structLoader.set(dst, it.Schema); err != nil {
|
||||
return err
|
||||
}
|
||||
vl = &it.structLoader
|
||||
}
|
||||
return vl.Load(row, it.Schema)
|
||||
}
|
||||
|
||||
func isStructPtr(x interface{}) bool {
|
||||
t := reflect.TypeOf(x)
|
||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.rows = append(it.rows, res.rows...)
|
||||
it.Schema = res.schema
|
||||
it.TotalRows = res.totalRows
|
||||
return res.pageToken, nil
|
||||
}
|
||||
|
||||
// A pageFetcher returns a page of rows from a destination table.
|
||||
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
|
||||
|
||||
type fetchPageResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
// fetchPage gets a page of rows from t.
|
||||
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
errc := make(chan error, 1)
|
||||
if schema != nil {
|
||||
errc <- nil
|
||||
} else {
|
||||
go func() {
|
||||
var bqt *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
})
|
||||
if err == nil && bqt.Schema != nil {
|
||||
schema = bqToSchema(bqt.Schema)
|
||||
}
|
||||
errc <- err
|
||||
}()
|
||||
}
|
||||
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
call.PageToken(pageToken)
|
||||
} else {
|
||||
call.StartIndex(startIndex)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(pageSize)
|
||||
}
|
||||
var res *bq.TableDataList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = <-errc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows, err := convertRows(res.Rows, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fetchPageResult{
|
||||
pageToken: res.PageToken,
|
||||
rows: rows,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: schema,
|
||||
}, nil
|
||||
}
|
363
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
363
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
|
@ -1,363 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type fetchResponse struct {
|
||||
result *fetchPageResult // The result to return.
|
||||
err error // The error to return.
|
||||
}
|
||||
|
||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
|
||||
type pageFetcherStub struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
err error
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) {
|
||||
call, ok := pf.fetchResponses[pageToken]
|
||||
if !ok {
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", pageToken)
|
||||
}
|
||||
return call.result, call.err
|
||||
}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
var (
|
||||
iiSchema = Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
siSchema = Schema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
)
|
||||
fetchFailure := errors.New("fetch failure")
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
pageToken string
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
wantErr error
|
||||
wantSchema Schema
|
||||
wantTotalRows uint64
|
||||
}{
|
||||
{
|
||||
desc: "Iteration over single empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
schema: Schema{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
totalRows: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantSchema: iiSchema,
|
||||
wantTotalRows: 4,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page with different schema",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{"1", 2}, {"11", 12}},
|
||||
schema: siSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{"1", 2}, {"11", 12}},
|
||||
wantSchema: siSchema,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over two pages",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
totalRows: 4,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
totalRows: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
wantTotalRows: 4,
|
||||
},
|
||||
{
|
||||
desc: "Server response includes empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Fetch error",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
// We returns some data from this fetch, but also an error.
|
||||
// So the end result should include only data from the previous fetch.
|
||||
err: fetchFailure,
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantErr: fetchFailure,
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip over an entire page",
|
||||
pageToken: "a",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip beyond all data",
|
||||
pageToken: "b",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &fetchPageResult{},
|
||||
},
|
||||
},
|
||||
// In this test case, Next will return false on its first call,
|
||||
// so we won't even attempt to call Get.
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
it.PageInfo().Token = tc.pageToken
|
||||
values, schema, totalRows, err := consumeRowIterator(it)
|
||||
if err != tc.wantErr {
|
||||
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
|
||||
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
|
||||
}
|
||||
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
|
||||
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
|
||||
}
|
||||
if totalRows != tc.wantTotalRows {
|
||||
t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
|
||||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) {
|
||||
var (
|
||||
got [][]Value
|
||||
schema Schema
|
||||
totalRows uint64
|
||||
)
|
||||
for {
|
||||
var vls []Value
|
||||
err := it.Next(&vls)
|
||||
if err == iterator.Done {
|
||||
return got, schema, totalRows, nil
|
||||
}
|
||||
if err != nil {
|
||||
return got, schema, totalRows, err
|
||||
}
|
||||
got = append(got, vls)
|
||||
schema = it.Schema
|
||||
totalRows = it.TotalRows
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextDuringErrorState(t *testing.T) {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {err: errors.New("bang")},
|
||||
},
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error after calling Next")
|
||||
}
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error calling Next again when iterator has a non-nil error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextAfterFinished(t *testing.T) {
|
||||
testCases := []struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
|
||||
values, _, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
|
||||
}
|
||||
// Try calling Get again.
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Errorf("Expected Done calling Next when there are no more values")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIteratorNextTypes(t *testing.T) {
|
||||
it := newRowIterator(context.Background(), nil, nil)
|
||||
for _, v := range []interface{}{3, "s", []int{}, &[]int{},
|
||||
map[string]Value{}, &map[string]interface{}{},
|
||||
struct{}{},
|
||||
} {
|
||||
if err := it.Next(v); err == nil {
|
||||
t.Errorf("%v: want error, got nil", v)
|
||||
}
|
||||
}
|
||||
}
|
699
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
699
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
|
@ -1,699 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// A Job represents an operation which has been submitted to BigQuery for processing.
|
||||
type Job struct {
|
||||
c *Client
|
||||
projectID string
|
||||
jobID string
|
||||
location string
|
||||
|
||||
config *bq.JobConfiguration
|
||||
lastStatus *JobStatus
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
//
|
||||
// For jobs whose location is other than "US" or "EU", set Client.Location or use
|
||||
// JobFromIDLocation.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
return c.JobFromIDLocation(ctx, id, c.Location)
|
||||
}
|
||||
|
||||
// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package (for example, it may have
|
||||
// been created in the BigQuery console), but it must exist in the specified location.
|
||||
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (*Job, error) {
|
||||
bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToJob(bqjob, c)
|
||||
}
|
||||
|
||||
// ID returns the job's ID.
|
||||
func (j *Job) ID() string {
|
||||
return j.jobID
|
||||
}
|
||||
|
||||
// Location returns the job's location.
|
||||
func (j *Job) Location() string {
|
||||
return j.location
|
||||
}
|
||||
|
||||
// State is one of a sequence of states that a Job progresses through as it is processed.
|
||||
type State int
|
||||
|
||||
const (
|
||||
StateUnspecified State = iota // used only as a default in JobIterator
|
||||
Pending
|
||||
Running
|
||||
Done
|
||||
)
|
||||
|
||||
// JobStatus contains the current State of a job, and errors encountered while processing that job.
|
||||
type JobStatus struct {
|
||||
State State
|
||||
|
||||
err error
|
||||
|
||||
// All errors encountered during the running of the job.
|
||||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
|
||||
Errors []*Error
|
||||
|
||||
// Statistics about the job.
|
||||
Statistics *JobStatistics
|
||||
}
|
||||
|
||||
// JobConfig contains configuration information for a job. It is implemented by
|
||||
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
|
||||
type JobConfig interface {
|
||||
isJobConfig()
|
||||
}
|
||||
|
||||
func (*CopyConfig) isJobConfig() {}
|
||||
func (*ExtractConfig) isJobConfig() {}
|
||||
func (*LoadConfig) isJobConfig() {}
|
||||
func (*QueryConfig) isJobConfig() {}
|
||||
|
||||
// Config returns the configuration information for j.
|
||||
func (j *Job) Config() (JobConfig, error) {
|
||||
return bqToJobConfig(j.config, j.c)
|
||||
}
|
||||
|
||||
func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
|
||||
switch {
|
||||
case q == nil:
|
||||
return nil, nil
|
||||
case q.Copy != nil:
|
||||
return bqToCopyConfig(q, c), nil
|
||||
case q.Extract != nil:
|
||||
return bqToExtractConfig(q, c), nil
|
||||
case q.Load != nil:
|
||||
return bqToLoadConfig(q, c), nil
|
||||
case q.Query != nil:
|
||||
return bqToQueryConfig(q, c)
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// JobIDConfig describes how to create an ID for a job.
|
||||
type JobIDConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Location is the location for the job.
|
||||
Location string
|
||||
}
|
||||
|
||||
// createJobRef creates a JobReference.
|
||||
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
|
||||
// We don't check whether projectID is empty; the server will return an
|
||||
// error when it encounters the resulting JobReference.
|
||||
loc := j.Location
|
||||
if loc == "" { // Use Client.Location as a default.
|
||||
loc = c.Location
|
||||
}
|
||||
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc}
|
||||
if j.JobID == "" {
|
||||
jr.JobId = randomIDFn()
|
||||
} else if j.AddJobIDSuffix {
|
||||
jr.JobId = j.JobID + "-" + randomIDFn()
|
||||
} else {
|
||||
jr.JobId = j.JobID
|
||||
}
|
||||
return jr
|
||||
}
|
||||
|
||||
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var (
|
||||
rngMu sync.Mutex
|
||||
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
|
||||
)
|
||||
|
||||
// For testing.
|
||||
var randomIDFn = randomID
|
||||
|
||||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
|
||||
// suffixes.
|
||||
const randomIDLen = 27
|
||||
|
||||
func randomID() string {
|
||||
// This is used for both job IDs and insert IDs.
|
||||
var b [randomIDLen]byte
|
||||
rngMu.Lock()
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] = alphanum[rng.Intn(len(alphanum))]
|
||||
}
|
||||
rngMu.Unlock()
|
||||
return string(b[:])
|
||||
}
|
||||
|
||||
// Done reports whether the job has completed.
|
||||
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
|
||||
func (s *JobStatus) Done() bool {
|
||||
return s.State == Done
|
||||
}
|
||||
|
||||
// Err returns the error that caused the job to complete unsuccesfully (if any).
|
||||
func (s *JobStatus) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := j.setStatus(bqjob.Status); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.setStatistics(bqjob.Statistics, j.c)
|
||||
return j.lastStatus, nil
|
||||
}
|
||||
|
||||
// LastStatus returns the most recently retrieved status of the job. The status is
|
||||
// retrieved when a new job is created, or when JobFromID or Job.Status is called.
|
||||
// Call Job.Status to get the most up-to-date information about a job.
|
||||
func (j *Job) LastStatus() *JobStatus {
|
||||
return j.lastStatus
|
||||
}
|
||||
|
||||
// Cancel requests that a job be cancelled. This method returns without waiting for
|
||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
|
||||
// Cancelled jobs may still incur costs.
|
||||
func (j *Job) Cancel(ctx context.Context) error {
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
|
||||
Location(j.location).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := call.Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Wait blocks until the job or the context is done. It returns the final status
|
||||
// of the job.
|
||||
// If an error occurs while retrieving the status, Wait returns that error. But
|
||||
// Wait returns nil if the status was retrieved successfully, even if
|
||||
// status.Err() != nil. So callers must check both errors. See the example.
|
||||
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
||||
if j.isQuery() {
|
||||
// We can avoid polling for query jobs.
|
||||
if _, err := j.waitForQuery(ctx, j.projectID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: extra RPC even if you just want to wait for the query to finish.
|
||||
js, err := j.Status(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return js, nil
|
||||
}
|
||||
// Non-query jobs must poll.
|
||||
var js *JobStatus
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
js, err = j.Status(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if js.Done() {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return js, nil
|
||||
}
|
||||
|
||||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
return j.read(ctx, j.waitForQuery, fetchPage)
|
||||
}
|
||||
|
||||
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) {
|
||||
if !j.isQuery() {
|
||||
return nil, errors.New("bigquery: cannot read from a non-query job")
|
||||
}
|
||||
destTable := j.config.Query.DestinationTable
|
||||
// The destination table should only be nil if there was a query error.
|
||||
projectID := j.projectID
|
||||
if destTable != nil && projectID != destTable.ProjectId {
|
||||
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
|
||||
}
|
||||
schema, err := waitForQuery(ctx, projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if destTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
}
|
||||
dt := bqToTable(destTable, j.c)
|
||||
it := newRowIterator(ctx, dt, pf)
|
||||
it.Schema = schema
|
||||
return it, nil
|
||||
}
|
||||
|
||||
// waitForQuery waits for the query job to complete and returns its schema.
|
||||
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
|
||||
// Use GetQueryResults only to wait for completion, not to read results.
|
||||
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
|
||||
setClientHeader(call.Header())
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Multiplier: 2,
|
||||
Max: 60 * time.Second,
|
||||
}
|
||||
var res *bq.GetQueryResultsResponse
|
||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
res, err = call.Do()
|
||||
if err != nil {
|
||||
return !retryableError(err), err
|
||||
}
|
||||
if !res.JobComplete { // GetQueryResults may return early without error; retry.
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToSchema(res.Schema), nil
|
||||
}
|
||||
|
||||
// JobStatistics contains statistics about a job.
|
||||
type JobStatistics struct {
|
||||
CreationTime time.Time
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
TotalBytesProcessed int64
|
||||
|
||||
Details Statistics
|
||||
}
|
||||
|
||||
// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
|
||||
type Statistics interface {
|
||||
implementsStatistics()
|
||||
}
|
||||
|
||||
// ExtractStatistics contains statistics about an extract job.
|
||||
type ExtractStatistics struct {
|
||||
// The number of files per destination URI or URI pattern specified in the
|
||||
// extract configuration. These values will be in the same order as the
|
||||
// URIs specified in the 'destinationUris' field.
|
||||
DestinationURIFileCounts []int64
|
||||
}
|
||||
|
||||
// LoadStatistics contains statistics about a load job.
|
||||
type LoadStatistics struct {
|
||||
// The number of bytes of source data in a load job.
|
||||
InputFileBytes int64
|
||||
|
||||
// The number of source files in a load job.
|
||||
InputFiles int64
|
||||
|
||||
// Size of the loaded data in bytes. Note that while a load job is in the
|
||||
// running state, this value may change.
|
||||
OutputBytes int64
|
||||
|
||||
// The number of rows imported in a load job. Note that while an import job is
|
||||
// in the running state, this value may change.
|
||||
OutputRows int64
|
||||
}
|
||||
|
||||
// QueryStatistics contains statistics about a query job.
|
||||
type QueryStatistics struct {
|
||||
// Billing tier for the job.
|
||||
BillingTier int64
|
||||
|
||||
// Whether the query result was fetched from the query cache.
|
||||
CacheHit bool
|
||||
|
||||
// The type of query statement, if valid.
|
||||
StatementType string
|
||||
|
||||
// Total bytes billed for the job.
|
||||
TotalBytesBilled int64
|
||||
|
||||
// Total bytes processed for the job.
|
||||
TotalBytesProcessed int64
|
||||
|
||||
// Describes execution plan for the query.
|
||||
QueryPlan []*ExplainQueryStage
|
||||
|
||||
// The number of rows affected by a DML statement. Present only for DML
|
||||
// statements INSERT, UPDATE or DELETE.
|
||||
NumDMLAffectedRows int64
|
||||
|
||||
// ReferencedTables: [Output-only, Experimental] Referenced tables for
|
||||
// the job. Queries that reference more than 50 tables will not have a
|
||||
// complete list.
|
||||
ReferencedTables []*Table
|
||||
|
||||
// The schema of the results. Present only for successful dry run of
|
||||
// non-legacy SQL queries.
|
||||
Schema Schema
|
||||
|
||||
// Standard SQL: list of undeclared query parameter names detected during a
|
||||
// dry run validation.
|
||||
UndeclaredQueryParameterNames []string
|
||||
}
|
||||
|
||||
// ExplainQueryStage describes one stage of a query.
|
||||
type ExplainQueryStage struct {
|
||||
// Relative amount of the total time the average shard spent on CPU-bound tasks.
|
||||
ComputeRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
|
||||
ComputeRatioMax float64
|
||||
|
||||
// Unique ID for stage within plan.
|
||||
ID int64
|
||||
|
||||
// Human-readable name for stage.
|
||||
Name string
|
||||
|
||||
// Relative amount of the total time the average shard spent reading input.
|
||||
ReadRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent reading input.
|
||||
ReadRatioMax float64
|
||||
|
||||
// Number of records read into the stage.
|
||||
RecordsRead int64
|
||||
|
||||
// Number of records written by the stage.
|
||||
RecordsWritten int64
|
||||
|
||||
// Current status for the stage.
|
||||
Status string
|
||||
|
||||
// List of operations within the stage in dependency order (approximately
|
||||
// chronological).
|
||||
Steps []*ExplainQueryStep
|
||||
|
||||
// Relative amount of the total time the average shard spent waiting to be scheduled.
|
||||
WaitRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent waiting to be scheduled.
|
||||
WaitRatioMax float64
|
||||
|
||||
// Relative amount of the total time the average shard spent on writing output.
|
||||
WriteRatioAvg float64
|
||||
|
||||
// Relative amount of the total time the slowest shard spent on writing output.
|
||||
WriteRatioMax float64
|
||||
}
|
||||
|
||||
// ExplainQueryStep describes one step of a query stage.
|
||||
type ExplainQueryStep struct {
|
||||
// Machine-readable operation type.
|
||||
Kind string
|
||||
|
||||
// Human-readable stage descriptions.
|
||||
Substeps []string
|
||||
}
|
||||
|
||||
func (*ExtractStatistics) implementsStatistics() {}
|
||||
func (*LoadStatistics) implementsStatistics() {}
|
||||
func (*QueryStatistics) implementsStatistics() {}
|
||||
|
||||
// Jobs lists jobs within a project.
|
||||
func (c *Client) Jobs(ctx context.Context) *JobIterator {
|
||||
it := &JobIterator{
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
ProjectID: c.projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// JobIterator iterates over jobs in a project.
|
||||
type JobIterator struct {
|
||||
ProjectID string // Project ID of the jobs to list. Default is the client's project.
|
||||
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
|
||||
State State // List only jobs in the given state. Defaults to all states.
|
||||
|
||||
ctx context.Context
|
||||
c *Client
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*Job
|
||||
}
|
||||
|
||||
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *JobIterator) Next() (*Job, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
var st string
|
||||
switch it.State {
|
||||
case StateUnspecified:
|
||||
st = ""
|
||||
case Pending:
|
||||
st = "pending"
|
||||
case Running:
|
||||
st = "running"
|
||||
case Done:
|
||||
st = "done"
|
||||
default:
|
||||
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
|
||||
}
|
||||
|
||||
req := it.c.bqs.Jobs.List(it.ProjectID).
|
||||
Context(it.ctx).
|
||||
PageToken(pageToken).
|
||||
Projection("full").
|
||||
AllUsers(it.AllUsers)
|
||||
if st != "" {
|
||||
req.StateFilter(st)
|
||||
}
|
||||
setClientHeader(req.Header())
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, j := range res.Jobs {
|
||||
job, err := convertListedJob(j, it.c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, job)
|
||||
}
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
|
||||
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
|
||||
}
|
||||
|
||||
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||
var job *bq.Job
|
||||
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
|
||||
if location != "" {
|
||||
call = call.Location(location)
|
||||
}
|
||||
if len(fields) > 0 {
|
||||
call = call.Fields(fields...)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
job, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
|
||||
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c)
|
||||
}
|
||||
|
||||
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) {
|
||||
j := &Job{
|
||||
projectID: qr.ProjectId,
|
||||
jobID: qr.JobId,
|
||||
location: qr.Location,
|
||||
c: c,
|
||||
}
|
||||
j.setConfig(qc)
|
||||
if err := j.setStatus(qs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.setStatistics(qt, c)
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (j *Job) setConfig(config *bq.JobConfiguration) {
|
||||
if config == nil {
|
||||
return
|
||||
}
|
||||
j.config = config
|
||||
}
|
||||
|
||||
func (j *Job) isQuery() bool {
|
||||
return j.config != nil && j.config.Query != nil
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func (j *Job) setStatus(qs *bq.JobStatus) error {
|
||||
if qs == nil {
|
||||
return nil
|
||||
}
|
||||
state, ok := stateMap[qs.State]
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected job state: %v", qs.State)
|
||||
}
|
||||
j.lastStatus = &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := bqToError(qs.ErrorResult); state == Done && err != nil {
|
||||
j.lastStatus.err = err
|
||||
}
|
||||
for _, ep := range qs.Errors {
|
||||
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
|
||||
if s == nil || j.lastStatus == nil {
|
||||
return
|
||||
}
|
||||
js := &JobStatistics{
|
||||
CreationTime: unixMillisToTime(s.CreationTime),
|
||||
StartTime: unixMillisToTime(s.StartTime),
|
||||
EndTime: unixMillisToTime(s.EndTime),
|
||||
TotalBytesProcessed: s.TotalBytesProcessed,
|
||||
}
|
||||
switch {
|
||||
case s.Extract != nil:
|
||||
js.Details = &ExtractStatistics{
|
||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
|
||||
}
|
||||
case s.Load != nil:
|
||||
js.Details = &LoadStatistics{
|
||||
InputFileBytes: s.Load.InputFileBytes,
|
||||
InputFiles: s.Load.InputFiles,
|
||||
OutputBytes: s.Load.OutputBytes,
|
||||
OutputRows: s.Load.OutputRows,
|
||||
}
|
||||
case s.Query != nil:
|
||||
var names []string
|
||||
for _, qp := range s.Query.UndeclaredQueryParameters {
|
||||
names = append(names, qp.Name)
|
||||
}
|
||||
var tables []*Table
|
||||
for _, tr := range s.Query.ReferencedTables {
|
||||
tables = append(tables, bqToTable(tr, c))
|
||||
}
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: bqToSchema(s.Query.Schema),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
}
|
||||
j.lastStatus.Statistics = js
|
||||
}
|
||||
|
||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
var res []*ExplainQueryStage
|
||||
for _, s := range stages {
|
||||
var steps []*ExplainQueryStep
|
||||
for _, p := range s.Steps {
|
||||
steps = append(steps, &ExplainQueryStep{
|
||||
Kind: p.Kind,
|
||||
Substeps: p.Substeps,
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
95
vendor/cloud.google.com/go/bigquery/job_test.go
generated
vendored
95
vendor/cloud.google.com/go/bigquery/job_test.go
generated
vendored
|
@ -1,95 +0,0 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestCreateJobRef(t *testing.T) {
|
||||
defer fixRandomID("RANDOM")()
|
||||
cNoLoc := &Client{projectID: "projectID"}
|
||||
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"}
|
||||
for _, test := range []struct {
|
||||
in JobIDConfig
|
||||
client *Client
|
||||
want *bq.JobReference
|
||||
}{
|
||||
{
|
||||
in: JobIDConfig{JobID: "foo"},
|
||||
want: &bq.JobReference{JobId: "foo"},
|
||||
},
|
||||
{
|
||||
in: JobIDConfig{},
|
||||
want: &bq.JobReference{JobId: "RANDOM"},
|
||||
},
|
||||
{
|
||||
in: JobIDConfig{AddJobIDSuffix: true},
|
||||
want: &bq.JobReference{JobId: "RANDOM"},
|
||||
},
|
||||
{
|
||||
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true},
|
||||
want: &bq.JobReference{JobId: "foo-RANDOM"},
|
||||
},
|
||||
{
|
||||
in: JobIDConfig{JobID: "foo", Location: "loc"},
|
||||
want: &bq.JobReference{JobId: "foo", Location: "loc"},
|
||||
},
|
||||
{
|
||||
in: JobIDConfig{JobID: "foo"},
|
||||
client: cLoc,
|
||||
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"},
|
||||
},
|
||||
{
|
||||
in: JobIDConfig{JobID: "foo", Location: "loc"},
|
||||
client: cLoc,
|
||||
want: &bq.JobReference{JobId: "foo", Location: "loc"},
|
||||
},
|
||||
} {
|
||||
client := test.client
|
||||
if client == nil {
|
||||
client = cNoLoc
|
||||
}
|
||||
got := test.in.createJobRef(client)
|
||||
test.want.ProjectId = "projectID"
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fixRandomID(s string) func() {
|
||||
prev := randomIDFn
|
||||
randomIDFn = func() string { return s }
|
||||
return func() { randomIDFn = prev }
|
||||
}
|
||||
|
||||
func checkJob(t *testing.T, i int, got, want *bq.Job) {
|
||||
if got.JobReference == nil {
|
||||
t.Errorf("#%d: empty job reference", i)
|
||||
return
|
||||
}
|
||||
if got.JobReference.JobId == "" {
|
||||
t.Errorf("#%d: empty job ID", i)
|
||||
return
|
||||
}
|
||||
d := testutil.Diff(got, want)
|
||||
if d != "" {
|
||||
t.Errorf("#%d: (got=-, want=+) %s", i, d)
|
||||
}
|
||||
}
|
137
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
137
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
|
@ -1,137 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// LoadConfig holds the configuration for a load job.
|
||||
type LoadConfig struct {
|
||||
// Src is the source from which data will be loaded.
|
||||
Src LoadSource
|
||||
|
||||
// Dst is the table into which the data will be loaded.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// If non-nil, the destination table is partitioned by time.
|
||||
TimePartitioning *TimePartitioning
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
|
||||
// SchemaUpdateOptions allows the schema of the destination table to be
|
||||
// updated as a side effect of the load job.
|
||||
SchemaUpdateOptions []string
|
||||
}
|
||||
|
||||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||
config := &bq.JobConfiguration{
|
||||
Labels: l.Labels,
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
DestinationTable: l.Dst.toBQ(),
|
||||
TimePartitioning: l.TimePartitioning.toBQ(),
|
||||
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
|
||||
SchemaUpdateOptions: l.SchemaUpdateOptions,
|
||||
},
|
||||
}
|
||||
media := l.Src.populateLoadConfig(config.Load)
|
||||
return config, media
|
||||
}
|
||||
|
||||
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
|
||||
lc := &LoadConfig{
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
||||
Dst: bqToTable(q.Load.DestinationTable, c),
|
||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
||||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
|
||||
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
|
||||
}
|
||||
var fc *FileConfig
|
||||
if len(q.Load.SourceUris) == 0 {
|
||||
s := NewReaderSource(nil)
|
||||
fc = &s.FileConfig
|
||||
lc.Src = s
|
||||
} else {
|
||||
s := NewGCSReference(q.Load.SourceUris...)
|
||||
fc = &s.FileConfig
|
||||
lc.Src = s
|
||||
}
|
||||
bqPopulateFileConfig(q.Load, fc)
|
||||
return lc
|
||||
}
|
||||
|
||||
// A Loader loads data from Google Cloud Storage into a BigQuery table.
|
||||
type Loader struct {
|
||||
JobIDConfig
|
||||
LoadConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// A LoadSource represents a source of data that can be loaded into
|
||||
// a BigQuery table.
|
||||
//
|
||||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
|
||||
// objects, and ReaderSource, for data read from an io.Reader.
|
||||
type LoadSource interface {
|
||||
// populates config, returns media
|
||||
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
|
||||
}
|
||||
|
||||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
|
||||
// The returned Loader may optionally be further configured before its Run method is called.
|
||||
// See GCSReference and ReaderSource for additional configuration options that
|
||||
// affect loading.
|
||||
func (t *Table) LoaderFrom(src LoadSource) *Loader {
|
||||
return &Loader{
|
||||
c: t.c,
|
||||
LoadConfig: LoadConfig{
|
||||
Src: src,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a load job.
|
||||
func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
||||
job, media := l.newJob()
|
||||
return l.c.insertJob(ctx, job, media)
|
||||
}
|
||||
|
||||
func (l *Loader) newJob() (*bq.Job, io.Reader) {
|
||||
config, media := l.LoadConfig.toBQ()
|
||||
return &bq.Job{
|
||||
JobReference: l.JobIDConfig.createJobRef(l.c),
|
||||
Configuration: config,
|
||||
}, media
|
||||
}
|
260
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
260
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
|
@ -1,260 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultLoadJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "client-project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
SourceUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stringFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{Name: "fieldname", Type: StringFieldType}
|
||||
}
|
||||
|
||||
func nestedFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: "nested",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{stringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func bqStringFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "fieldname",
|
||||
Type: "STRING",
|
||||
}
|
||||
}
|
||||
|
||||
func bqNestedFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "nested",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{projectID: "client-project-id"}
|
||||
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src LoadSource
|
||||
jobID string
|
||||
location string
|
||||
config LoadConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: defaultLoadJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
location: "loc",
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.JobReference.Location = "loc"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
jobID: "ajob",
|
||||
config: LoadConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
||||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
|
||||
},
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: 1234,
|
||||
}
|
||||
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "client-project-id",
|
||||
}
|
||||
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.MaxBadRecords = 1
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.IgnoreUnknownValues = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.MaxBadRecords = 1
|
||||
j.Configuration.Load.AllowJaggedRows = true
|
||||
j.Configuration.Load.AllowQuotedNewlines = true
|
||||
j.Configuration.Load.IgnoreUnknownValues = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Schema = Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
}
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.Schema = &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.SkipLeadingRows = 1
|
||||
g.SourceFormat = JSON
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = "\t"
|
||||
g.Quote = "-"
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
// Quote is left unset in GCSReference, so should be nil here.
|
||||
j.Configuration.Load.Quote = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.ForceZeroQuote = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
empty := ""
|
||||
j.Configuration.Load.Quote = &empty
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *ReaderSource {
|
||||
r := NewReaderSource(strings.NewReader("foo"))
|
||||
r.SkipLeadingRows = 1
|
||||
r.SourceFormat = JSON
|
||||
r.Encoding = UTF_8
|
||||
r.FieldDelimiter = "\t"
|
||||
r.Quote = "-"
|
||||
return r
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SourceUris = nil
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
loader := tc.dst.LoaderFrom(tc.src)
|
||||
loader.JobID = tc.jobID
|
||||
loader.Location = tc.location
|
||||
tc.config.Src = tc.src
|
||||
tc.config.Dst = tc.dst
|
||||
loader.LoadConfig = tc.config
|
||||
got, _ := loader.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig,
|
||||
cmp.AllowUnexported(Table{}, Client{}),
|
||||
cmpopts.IgnoreUnexported(ReaderSource{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
299
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
299
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
|
@ -1,299 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
)
|
||||
|
||||
// NullInt64 represents a BigQuery INT64 that may be NULL.
|
||||
type NullInt64 struct {
|
||||
Int64 int64
|
||||
Valid bool // Valid is true if Int64 is not NULL.
|
||||
}
|
||||
|
||||
func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) }
|
||||
|
||||
// NullString represents a BigQuery STRING that may be NULL.
|
||||
type NullString struct {
|
||||
StringVal string
|
||||
Valid bool // Valid is true if StringVal is not NULL.
|
||||
}
|
||||
|
||||
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
|
||||
|
||||
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
|
||||
type NullFloat64 struct {
|
||||
Float64 float64
|
||||
Valid bool // Valid is true if Float64 is not NULL.
|
||||
}
|
||||
|
||||
func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) }
|
||||
|
||||
// NullBool represents a BigQuery BOOL that may be NULL.
|
||||
type NullBool struct {
|
||||
Bool bool
|
||||
Valid bool // Valid is true if Bool is not NULL.
|
||||
}
|
||||
|
||||
func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) }
|
||||
|
||||
// NullTimestamp represents a BigQuery TIMESTAMP that may be null.
|
||||
type NullTimestamp struct {
|
||||
Timestamp time.Time
|
||||
Valid bool // Valid is true if Time is not NULL.
|
||||
}
|
||||
|
||||
func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) }
|
||||
|
||||
// NullDate represents a BigQuery DATE that may be null.
|
||||
type NullDate struct {
|
||||
Date civil.Date
|
||||
Valid bool // Valid is true if Date is not NULL.
|
||||
}
|
||||
|
||||
func (n NullDate) String() string { return nullstr(n.Valid, n.Date) }
|
||||
|
||||
// NullTime represents a BigQuery TIME that may be null.
|
||||
type NullTime struct {
|
||||
Time civil.Time
|
||||
Valid bool // Valid is true if Time is not NULL.
|
||||
}
|
||||
|
||||
func (n NullTime) String() string {
|
||||
if !n.Valid {
|
||||
return "<null>"
|
||||
}
|
||||
return CivilTimeString(n.Time)
|
||||
}
|
||||
|
||||
// NullDateTime represents a BigQuery DATETIME that may be null.
|
||||
type NullDateTime struct {
|
||||
DateTime civil.DateTime
|
||||
Valid bool // Valid is true if DateTime is not NULL.
|
||||
}
|
||||
|
||||
func (n NullDateTime) String() string {
|
||||
if !n.Valid {
|
||||
return "<null>"
|
||||
}
|
||||
return CivilDateTimeString(n.DateTime)
|
||||
}
|
||||
|
||||
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
|
||||
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
|
||||
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
|
||||
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
|
||||
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
|
||||
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
|
||||
|
||||
func (n NullTime) MarshalJSON() ([]byte, error) {
|
||||
if !n.Valid {
|
||||
return jsonNull, nil
|
||||
}
|
||||
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
|
||||
}
|
||||
|
||||
func (n NullDateTime) MarshalJSON() ([]byte, error) {
|
||||
if !n.Valid {
|
||||
return jsonNull, nil
|
||||
}
|
||||
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil
|
||||
}
|
||||
|
||||
func nullstr(valid bool, v interface{}) string {
|
||||
if !valid {
|
||||
return "NULL"
|
||||
}
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
func nulljson(valid bool, v interface{}) ([]byte, error) {
|
||||
if !valid {
|
||||
return jsonNull, nil
|
||||
}
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (n *NullInt64) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Int64 = 0
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Int64); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Float64 = 0
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Float64); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullBool) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Bool = false
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Bool); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullString) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.StringVal = ""
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.StringVal); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Timestamp = time.Time{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Timestamp); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullDate) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Date = civil.Date{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Date); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullTime) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Time = civil.Time{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, err := strconv.Unquote(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t, err := civil.ParseTime(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.Time = t
|
||||
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.DateTime = civil.DateTime{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, err := strconv.Unquote(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dt, err := parseCivilDateTime(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.DateTime = dt
|
||||
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
|
||||
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
|
||||
typeOfNullBool = reflect.TypeOf(NullBool{})
|
||||
typeOfNullString = reflect.TypeOf(NullString{})
|
||||
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
|
||||
typeOfNullDate = reflect.TypeOf(NullDate{})
|
||||
typeOfNullTime = reflect.TypeOf(NullTime{})
|
||||
typeOfNullDateTime = reflect.TypeOf(NullDateTime{})
|
||||
)
|
||||
|
||||
func nullableFieldType(t reflect.Type) FieldType {
|
||||
switch t {
|
||||
case typeOfNullInt64:
|
||||
return IntegerFieldType
|
||||
case typeOfNullFloat64:
|
||||
return FloatFieldType
|
||||
case typeOfNullBool:
|
||||
return BooleanFieldType
|
||||
case typeOfNullString:
|
||||
return StringFieldType
|
||||
case typeOfNullTimestamp:
|
||||
return TimestampFieldType
|
||||
case typeOfNullDate:
|
||||
return DateFieldType
|
||||
case typeOfNullTime:
|
||||
return TimeFieldType
|
||||
case typeOfNullDateTime:
|
||||
return DateTimeFieldType
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
73
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
73
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
|
@ -1,73 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000}
|
||||
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime}
|
||||
)
|
||||
|
||||
func TestNullsJSON(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want string
|
||||
}{
|
||||
{&NullInt64{Valid: true, Int64: 3}, `3`},
|
||||
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
|
||||
{&NullBool{Valid: true, Bool: true}, `true`},
|
||||
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
|
||||
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
|
||||
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
|
||||
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
|
||||
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`},
|
||||
|
||||
{&NullInt64{}, `null`},
|
||||
{&NullFloat64{}, `null`},
|
||||
{&NullBool{}, `null`},
|
||||
{&NullString{}, `null`},
|
||||
{&NullTimestamp{}, `null`},
|
||||
{&NullDate{}, `null`},
|
||||
{&NullTime{}, `null`},
|
||||
{&NullDateTime{}, `null`},
|
||||
} {
|
||||
bytes, err := json.Marshal(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := string(bytes), test.want; got != want {
|
||||
t.Errorf("%#v: got %s, want %s", test.in, got, want)
|
||||
}
|
||||
|
||||
typ := reflect.Indirect(reflect.ValueOf(test.in)).Type()
|
||||
value := reflect.New(typ).Interface()
|
||||
err = json.Unmarshal(bytes, value)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !testutil.Equal(value, test.in) {
|
||||
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in)
|
||||
}
|
||||
}
|
||||
}
|
346
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
346
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
|
@ -1,346 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/fields"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
|
||||
timestampFormat = "2006-01-02 15:04:05.999999-07:00"
|
||||
|
||||
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
|
||||
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
|
||||
)
|
||||
|
||||
const nullableTagOption = "nullable"
|
||||
|
||||
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
|
||||
name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
|
||||
if err != nil {
|
||||
return "", false, nil, err
|
||||
}
|
||||
if name != "" && !validFieldName.MatchString(name) {
|
||||
return "", false, nil, errInvalidFieldName
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if opt != nullableTagOption {
|
||||
return "", false, nil, fmt.Errorf(
|
||||
"bigquery: invalid tag option %q. The only valid option is %q",
|
||||
opt, nullableTagOption)
|
||||
}
|
||||
}
|
||||
return name, keep, opts, nil
|
||||
}
|
||||
|
||||
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
|
||||
|
||||
var (
|
||||
int64ParamType = &bq.QueryParameterType{Type: "INT64"}
|
||||
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"}
|
||||
boolParamType = &bq.QueryParameterType{Type: "BOOL"}
|
||||
stringParamType = &bq.QueryParameterType{Type: "STRING"}
|
||||
bytesParamType = &bq.QueryParameterType{Type: "BYTES"}
|
||||
dateParamType = &bq.QueryParameterType{Type: "DATE"}
|
||||
timeParamType = &bq.QueryParameterType{Type: "TIME"}
|
||||
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"}
|
||||
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
|
||||
)
|
||||
|
||||
var (
|
||||
typeOfDate = reflect.TypeOf(civil.Date{})
|
||||
typeOfTime = reflect.TypeOf(civil.Time{})
|
||||
typeOfDateTime = reflect.TypeOf(civil.DateTime{})
|
||||
typeOfGoTime = reflect.TypeOf(time.Time{})
|
||||
)
|
||||
|
||||
// A QueryParameter is a parameter to a query.
|
||||
type QueryParameter struct {
|
||||
// Name is used for named parameter mode.
|
||||
// It must match the name in the query case-insensitively.
|
||||
Name string
|
||||
|
||||
// Value is the value of the parameter.
|
||||
//
|
||||
// When you create a QueryParameter to send to BigQuery, the following Go types
|
||||
// are supported, with their corresponding Bigquery types:
|
||||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
|
||||
// Note that uint, uint64 and uintptr are not supported, because
|
||||
// they may contain values that cannot fit into a 64-bit signed integer.
|
||||
// float32, float64: FLOAT64
|
||||
// bool: BOOL
|
||||
// string: STRING
|
||||
// []byte: BYTES
|
||||
// time.Time: TIMESTAMP
|
||||
// Arrays and slices of the above.
|
||||
// Structs of the above. Only the exported fields are used.
|
||||
//
|
||||
// When a QueryParameter is returned inside a QueryConfig from a call to
|
||||
// Job.Config:
|
||||
// Integers are of type int64.
|
||||
// Floating-point values are of type float64.
|
||||
// Arrays are of type []interface{}, regardless of the array element type.
|
||||
// Structs are of type map[string]interface{}.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
|
||||
pv, err := paramValue(reflect.ValueOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pt, err := paramType(reflect.TypeOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameter{
|
||||
Name: p.Name,
|
||||
ParameterValue: &pv,
|
||||
ParameterType: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
|
||||
if t == nil {
|
||||
return nil, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
return dateParamType, nil
|
||||
case typeOfTime:
|
||||
return timeParamType, nil
|
||||
case typeOfDateTime:
|
||||
return dateTimeParamType, nil
|
||||
case typeOfGoTime:
|
||||
return timestampParamType, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return int64ParamType, nil
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return float64ParamType, nil
|
||||
|
||||
case reflect.Bool:
|
||||
return boolParamType, nil
|
||||
|
||||
case reflect.String:
|
||||
return stringParamType, nil
|
||||
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return bytesParamType, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
et, err := paramType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
break
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
var fts []*bq.QueryParameterTypeStructTypes
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range fields {
|
||||
pt, err := paramType(f.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fts = append(fts, &bq.QueryParameterTypeStructTypes{
|
||||
Name: f.Name,
|
||||
Type: pt,
|
||||
})
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
|
||||
}
|
||||
|
||||
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
||||
var res bq.QueryParameterValue
|
||||
if !v.IsValid() {
|
||||
return res, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
t := v.Type()
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
res.Value = v.Interface().(civil.Date).String()
|
||||
return res, nil
|
||||
|
||||
case typeOfTime:
|
||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||
// (If we send nanoseconds, then when we try to read the result we get "query job
|
||||
// missing destination table").
|
||||
res.Value = CivilTimeString(v.Interface().(civil.Time))
|
||||
return res, nil
|
||||
|
||||
case typeOfDateTime:
|
||||
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
|
||||
return res, nil
|
||||
|
||||
case typeOfGoTime:
|
||||
res.Value = v.Interface().(time.Time).Format(timestampFormat)
|
||||
return res, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
var vals []*bq.QueryParameterValue
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := paramValue(v.Index(i))
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
vals = append(vals, &val)
|
||||
}
|
||||
return bq.QueryParameterValue{ArrayValues: vals}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
|
||||
}
|
||||
t = t.Elem()
|
||||
v = v.Elem()
|
||||
if !v.IsValid() {
|
||||
// nil pointer becomes empty value
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues = map[string]bq.QueryParameterValue{}
|
||||
for _, f := range fields {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
fp, err := paramValue(fv)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues[f.Name] = fp
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
// None of the above: assume a scalar type. (If it's not a valid type,
|
||||
// paramType will catch the error.)
|
||||
res.Value = fmt.Sprint(v.Interface())
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
|
||||
p := QueryParameter{Name: q.Name}
|
||||
val, err := convertParamValue(q.ParameterValue, q.ParameterType)
|
||||
if err != nil {
|
||||
return QueryParameter{}, err
|
||||
}
|
||||
p.Value = val
|
||||
return p, nil
|
||||
}
|
||||
|
||||
var paramTypeToFieldType = map[string]FieldType{
|
||||
int64ParamType.Type: IntegerFieldType,
|
||||
float64ParamType.Type: FloatFieldType,
|
||||
boolParamType.Type: BooleanFieldType,
|
||||
stringParamType.Type: StringFieldType,
|
||||
bytesParamType.Type: BytesFieldType,
|
||||
dateParamType.Type: DateFieldType,
|
||||
timeParamType.Type: TimeFieldType,
|
||||
}
|
||||
|
||||
// Convert a parameter value from the service to a Go value. This is similar to, but
|
||||
// not quite the same as, converting data values.
|
||||
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
|
||||
switch qtype.Type {
|
||||
case "ARRAY":
|
||||
if qval == nil {
|
||||
return []interface{}(nil), nil
|
||||
}
|
||||
return convertParamArray(qval.ArrayValues, qtype.ArrayType)
|
||||
case "STRUCT":
|
||||
if qval == nil {
|
||||
return map[string]interface{}(nil), nil
|
||||
}
|
||||
return convertParamStruct(qval.StructValues, qtype.StructTypes)
|
||||
case "TIMESTAMP":
|
||||
return time.Parse(timestampFormat, qval.Value)
|
||||
case "DATETIME":
|
||||
return parseCivilDateTime(qval.Value)
|
||||
default:
|
||||
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
|
||||
}
|
||||
}
|
||||
|
||||
// convertParamArray converts a query parameter array value to a Go value. It
|
||||
// always returns a []interface{}.
|
||||
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
|
||||
var vals []interface{}
|
||||
for _, el := range elVals {
|
||||
val, err := convertParamValue(el, elType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals = append(vals, val)
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// convertParamValue converts a query parameter struct value into a Go value. It
|
||||
// always returns a map[string]interface{}.
|
||||
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
|
||||
vals := map[string]interface{}{}
|
||||
for _, st := range sTypes {
|
||||
if sv, ok := sVals[st.Name]; ok {
|
||||
val, err := convertParamValue(&sv, st.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals[st.Name] = val
|
||||
} else {
|
||||
vals[st.Name] = nil
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
361
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
361
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
|
@ -1,361 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var scalarTests = []struct {
|
||||
val interface{} // The Go value
|
||||
wantVal string // paramValue's desired output
|
||||
wantType *bq.QueryParameterType // paramType's desired output
|
||||
}{
|
||||
{int64(0), "0", int64ParamType},
|
||||
{3.14, "3.14", float64ParamType},
|
||||
{3.14159e-87, "3.14159e-87", float64ParamType},
|
||||
{true, "true", boolParamType},
|
||||
{"string", "string", stringParamType},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType},
|
||||
{math.NaN(), "NaN", float64ParamType},
|
||||
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo"
|
||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||
"2016-03-20 04:22:09.000005-01:02",
|
||||
timestampParamType},
|
||||
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType},
|
||||
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType},
|
||||
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}},
|
||||
"2016-03-20 04:05:06.789000",
|
||||
dateTimeParamType},
|
||||
}
|
||||
|
||||
type (
|
||||
S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
|
||||
s1ParamType = &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}
|
||||
|
||||
s1ParamValue = bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
|
||||
s1ParamReturnValue = map[string]interface{}{
|
||||
"A": int64(1),
|
||||
"B": map[string]interface{}{"D": "s"},
|
||||
"C": true,
|
||||
}
|
||||
)
|
||||
|
||||
func sval(s string) bq.QueryParameterValue {
|
||||
return bq.QueryParameterValue{Value: s}
|
||||
}
|
||||
|
||||
func TestParamValueScalar(t *testing.T) {
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Errorf("%v: got %v, want nil", test.val, err)
|
||||
continue
|
||||
}
|
||||
want := sval(test.wantVal)
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueArray(t *testing.T) {
|
||||
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{
|
||||
{Value: "1"},
|
||||
{Value: "2"},
|
||||
},
|
||||
}
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want bq.QueryParameterValue
|
||||
}{
|
||||
{[]int(nil), bq.QueryParameterValue{}},
|
||||
{[]int{}, bq.QueryParameterValue{}},
|
||||
{[]int{1, 2}, qpv},
|
||||
{[2]int{1, 2}, qpv},
|
||||
} {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueStruct(t *testing.T) {
|
||||
got, err := paramValue(reflect.ValueOf(s1))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, s1ParamValue) {
|
||||
t.Errorf("got %+v\nwant %+v", got, s1ParamValue)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueErrors(t *testing.T) {
|
||||
// paramValue lets a few invalid types through, but paramType catches them.
|
||||
// Since we never call one without the other that's fine.
|
||||
for _, val := range []interface{}{nil, new([]int)} {
|
||||
_, err := paramValue(reflect.ValueOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamType(t *testing.T) {
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.wantType) {
|
||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType)
|
||||
}
|
||||
}
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want *bq.QueryParameterType
|
||||
}{
|
||||
{uint32(32767), int64ParamType},
|
||||
{[]byte("foo"), bytesParamType},
|
||||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
|
||||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
|
||||
{S1{}, s1ParamType},
|
||||
} {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamTypeErrors(t *testing.T) {
|
||||
for _, val := range []interface{}{
|
||||
nil, uint(0), new([]int), make(chan int),
|
||||
} {
|
||||
_, err := paramType(reflect.TypeOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertParamValue(t *testing.T) {
|
||||
// Scalars.
|
||||
for _, test := range scalarTests {
|
||||
pval, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ptype, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := convertParamValue(&pval, ptype)
|
||||
if err != nil {
|
||||
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.val) {
|
||||
t.Errorf("%#v: got %#v", test.val, got)
|
||||
}
|
||||
}
|
||||
// Arrays.
|
||||
for _, test := range []struct {
|
||||
pval *bq.QueryParameterValue
|
||||
want []interface{}
|
||||
}{
|
||||
{
|
||||
&bq.QueryParameterValue{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
&bq.QueryParameterValue{
|
||||
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}},
|
||||
},
|
||||
[]interface{}{int64(1), int64(2)},
|
||||
},
|
||||
} {
|
||||
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}
|
||||
got, err := convertParamValue(test.pval, ptype)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v: %v", test.pval, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want)
|
||||
}
|
||||
}
|
||||
// Structs.
|
||||
got, err := convertParamValue(&s1ParamValue, s1ParamType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, s1ParamReturnValue) {
|
||||
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
roundToMicros := cmp.Transformer("RoundToMicros",
|
||||
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
|
||||
c := getClient(t)
|
||||
for _, test := range scalarTests {
|
||||
gotData, gotParam, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(gotData, test.val, roundToMicros) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
|
||||
}
|
||||
if !testutil.Equal(gotParam, test.val, roundToMicros) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_OtherParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
wantData interface{}
|
||||
wantParam interface{}
|
||||
}{
|
||||
{[]int(nil), []Value(nil), []interface{}(nil)},
|
||||
{[]int{}, []Value(nil), []interface{}(nil)},
|
||||
{
|
||||
[]int{1, 2},
|
||||
[]Value{int64(1), int64(2)},
|
||||
[]interface{}{int64(1), int64(2)},
|
||||
},
|
||||
{
|
||||
[3]int{1, 2, 3},
|
||||
[]Value{int64(1), int64(2), int64(3)},
|
||||
[]interface{}{int64(1), int64(2), int64(3)},
|
||||
},
|
||||
{
|
||||
S1{},
|
||||
[]Value{int64(0), nil, false},
|
||||
map[string]interface{}{
|
||||
"A": int64(0),
|
||||
"B": nil,
|
||||
"C": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
s1,
|
||||
[]Value{int64(1), []Value{"s"}, true},
|
||||
s1ParamReturnValue,
|
||||
},
|
||||
} {
|
||||
gotData, gotParam, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(gotData, test.wantData) {
|
||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
|
||||
test.val, gotData, gotData, test.wantData, test.wantData)
|
||||
}
|
||||
if !testutil.Equal(gotParam, test.wantParam) {
|
||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
|
||||
test.val, gotParam, gotParam, test.wantParam, test.wantParam)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// paramRoundTrip passes x as a query parameter to BigQuery. It returns
|
||||
// the resulting data value from running the query and the parameter value from
|
||||
// the returned job configuration.
|
||||
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) {
|
||||
ctx := context.Background()
|
||||
q := c.Query("select ?")
|
||||
q.Parameters = []QueryParameter{{Value: x}}
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var val []Value
|
||||
err = it.Next(&val)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(val) != 1 {
|
||||
return nil, nil, errors.New("wrong number of values")
|
||||
}
|
||||
conf, err := job.Config()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
|
||||
}
|
294
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
294
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
|
@ -1,294 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// QueryConfig holds the configuration for a query job.
|
||||
type QueryConfig struct {
|
||||
// Dst is the table into which the results of the query will be written.
|
||||
// If this field is nil, a temporary table will be created.
|
||||
Dst *Table
|
||||
|
||||
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
|
||||
Q string
|
||||
|
||||
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
|
||||
// If DefaultProjectID is set, DefaultDatasetID must also be set.
|
||||
DefaultProjectID string
|
||||
DefaultDatasetID string
|
||||
|
||||
// TableDefinitions describes data sources outside of BigQuery.
|
||||
// The map keys may be used as table names in the query string.
|
||||
//
|
||||
// When a QueryConfig is returned from Job.Config, the map values
|
||||
// are always of type *ExternalDataConfig.
|
||||
TableDefinitions map[string]ExternalData
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteEmpty.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// DisableQueryCache prevents results being fetched from the query cache.
|
||||
// If this field is false, results are fetched from the cache if they are available.
|
||||
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
|
||||
// Cached results are only available when TableID is unspecified in the query's destination Table.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
|
||||
DisableQueryCache bool
|
||||
|
||||
// DisableFlattenedResults prevents results being flattened.
|
||||
// If this field is false, results from nested and repeated fields are flattened.
|
||||
// DisableFlattenedResults implies AllowLargeResults
|
||||
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
|
||||
DisableFlattenedResults bool
|
||||
|
||||
// AllowLargeResults allows the query to produce arbitrarily large result tables.
|
||||
// The destination must be a table.
|
||||
// When using this option, queries will take longer to execute, even if the result set is small.
|
||||
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
|
||||
AllowLargeResults bool
|
||||
|
||||
// Priority specifies the priority with which to schedule the query.
|
||||
// The default priority is InteractivePriority.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
|
||||
Priority QueryPriority
|
||||
|
||||
// MaxBillingTier sets the maximum billing tier for a Query.
|
||||
// Queries that have resource usage beyond this tier will fail (without
|
||||
// incurring a charge). If this field is zero, the project default will be used.
|
||||
MaxBillingTier int
|
||||
|
||||
// MaxBytesBilled limits the number of bytes billed for
|
||||
// this job. Queries that would exceed this limit will fail (without incurring
|
||||
// a charge).
|
||||
// If this field is less than 1, the project default will be
|
||||
// used.
|
||||
MaxBytesBilled int64
|
||||
|
||||
// UseStandardSQL causes the query to use standard SQL. The default.
|
||||
// Deprecated: use UseLegacySQL.
|
||||
UseStandardSQL bool
|
||||
|
||||
// UseLegacySQL causes the query to use legacy SQL.
|
||||
UseLegacySQL bool
|
||||
|
||||
// Parameters is a list of query parameters. The presence of parameters
|
||||
// implies the use of standard SQL.
|
||||
// If the query uses positional syntax ("?"), then no parameter may have a name.
|
||||
// If the query uses named syntax ("@p"), then all parameters must have names.
|
||||
// It is illegal to mix positional and named syntax.
|
||||
Parameters []QueryParameter
|
||||
|
||||
// TimePartitioning specifies time-based partitioning
|
||||
// for the destination table.
|
||||
TimePartitioning *TimePartitioning
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// If true, don't actually run this job. A valid query will return a mostly
|
||||
// empty response with some processing statistics, while an invalid query will
|
||||
// return the same error it would if it wasn't a dry run.
|
||||
//
|
||||
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
|
||||
// call LastStatus on the returned job to get statistics. Calling Status on a
|
||||
// dry-run job will fail.
|
||||
DryRun bool
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
}
|
||||
|
||||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
qconf := &bq.JobConfigurationQuery{
|
||||
Query: qc.Q,
|
||||
CreateDisposition: string(qc.CreateDisposition),
|
||||
WriteDisposition: string(qc.WriteDisposition),
|
||||
AllowLargeResults: qc.AllowLargeResults,
|
||||
Priority: string(qc.Priority),
|
||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||
TimePartitioning: qc.TimePartitioning.toBQ(),
|
||||
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
|
||||
}
|
||||
if len(qc.TableDefinitions) > 0 {
|
||||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
}
|
||||
for name, data := range qc.TableDefinitions {
|
||||
qconf.TableDefinitions[name] = data.toBQ()
|
||||
}
|
||||
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
|
||||
qconf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: qc.DefaultDatasetID,
|
||||
ProjectId: qc.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
if tier := int64(qc.MaxBillingTier); tier > 0 {
|
||||
qconf.MaximumBillingTier = &tier
|
||||
}
|
||||
f := false
|
||||
if qc.DisableQueryCache {
|
||||
qconf.UseQueryCache = &f
|
||||
}
|
||||
if qc.DisableFlattenedResults {
|
||||
qconf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
qconf.AllowLargeResults = true
|
||||
}
|
||||
if qc.UseStandardSQL && qc.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
|
||||
}
|
||||
if qc.UseLegacySQL {
|
||||
qconf.UseLegacySql = true
|
||||
} else {
|
||||
qconf.UseLegacySql = false
|
||||
qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
if qc.Dst != nil && !qc.Dst.implicitTable() {
|
||||
qconf.DestinationTable = qc.Dst.toBQ()
|
||||
}
|
||||
for _, p := range qc.Parameters {
|
||||
qp, err := p.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qconf.QueryParameters = append(qconf.QueryParameters, qp)
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: qc.Labels,
|
||||
DryRun: qc.DryRun,
|
||||
Query: qconf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
|
||||
qq := q.Query
|
||||
qc := &QueryConfig{
|
||||
Labels: q.Labels,
|
||||
DryRun: q.DryRun,
|
||||
Q: qq.Query,
|
||||
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
|
||||
AllowLargeResults: qq.AllowLargeResults,
|
||||
Priority: QueryPriority(qq.Priority),
|
||||
MaxBytesBilled: qq.MaximumBytesBilled,
|
||||
UseLegacySQL: qq.UseLegacySql,
|
||||
UseStandardSQL: !qq.UseLegacySql,
|
||||
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
|
||||
}
|
||||
if len(qq.TableDefinitions) > 0 {
|
||||
qc.TableDefinitions = make(map[string]ExternalData)
|
||||
}
|
||||
for name, qedc := range qq.TableDefinitions {
|
||||
edc, err := bqToExternalDataConfig(&qedc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qc.TableDefinitions[name] = edc
|
||||
}
|
||||
if qq.DefaultDataset != nil {
|
||||
qc.DefaultProjectID = qq.DefaultDataset.ProjectId
|
||||
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
|
||||
}
|
||||
if qq.MaximumBillingTier != nil {
|
||||
qc.MaxBillingTier = int(*qq.MaximumBillingTier)
|
||||
}
|
||||
if qq.UseQueryCache != nil && !*qq.UseQueryCache {
|
||||
qc.DisableQueryCache = true
|
||||
}
|
||||
if qq.FlattenResults != nil && !*qq.FlattenResults {
|
||||
qc.DisableFlattenedResults = true
|
||||
}
|
||||
if qq.DestinationTable != nil {
|
||||
qc.Dst = bqToTable(qq.DestinationTable, c)
|
||||
}
|
||||
for _, qp := range qq.QueryParameters {
|
||||
p, err := bqToQueryParameter(qp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qc.Parameters = append(qc.Parameters, p)
|
||||
}
|
||||
return qc, nil
|
||||
}
|
||||
|
||||
// QueryPriority specifies a priority with which a query is to be executed.
|
||||
type QueryPriority string
|
||||
|
||||
const (
|
||||
BatchPriority QueryPriority = "BATCH"
|
||||
InteractivePriority QueryPriority = "INTERACTIVE"
|
||||
)
|
||||
|
||||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
|
||||
type Query struct {
|
||||
JobIDConfig
|
||||
QueryConfig
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Query creates a query with string q.
|
||||
// The returned Query may optionally be further configured before its Run method is called.
|
||||
func (c *Client) Query(q string) *Query {
|
||||
return &Query{
|
||||
client: c,
|
||||
QueryConfig: QueryConfig{Q: q},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a query job.
|
||||
func (q *Query) Run(ctx context.Context) (*Job, error) {
|
||||
job, err := q.newJob()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.insertJob(ctx, job, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (q *Query) newJob() (*bq.Job, error) {
|
||||
config, err := q.QueryConfig.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.Job{
|
||||
JobReference: q.JobIDConfig.createJobRef(q.client),
|
||||
Configuration: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
// It is a shorthand for Query.Run followed by Job.Read.
|
||||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Read(ctx)
|
||||
}
|
402
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
402
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
|
@ -1,402 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultQueryJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "client-project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
Query: "query string",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
UseLegacySql: false,
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var defaultQuery = &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
jobIDConfig JobIDConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: defaultQuery,
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
Labels: map[string]string{"a": "b"},
|
||||
DryRun: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.DryRun = true
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true},
|
||||
src: &QueryConfig{Q: "query string"},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
j.JobReference.JobId = "jobID-RANDOM"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{},
|
||||
src: defaultQuery,
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DestinationTable = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
TableDefinitions: map[string]ExternalData{
|
||||
"atable": func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.Compression = Gzip
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = ";"
|
||||
g.IgnoreUnknownValues = true
|
||||
g.MaxBadRecords = 1
|
||||
g.Quote = "'"
|
||||
g.SkipLeadingRows = 2
|
||||
g.Schema = Schema{{Name: "name", Type: StringFieldType}}
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
td := make(map[string]bq.ExternalDataConfiguration)
|
||||
quote := "'"
|
||||
td["atable"] = bq.ExternalDataConfiguration{
|
||||
Compression: "GZIP",
|
||||
IgnoreUnknownValues: true,
|
||||
MaxBadRecords: 1,
|
||||
SourceFormat: "CSV", // must be explicitly set.
|
||||
SourceUris: []string{"uri"},
|
||||
CsvOptions: &bq.CsvOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
FieldDelimiter: ";",
|
||||
SkipLeadingRows: 2,
|
||||
Quote: "e,
|
||||
},
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{Name: "name", Type: "STRING"},
|
||||
},
|
||||
},
|
||||
}
|
||||
j.Configuration.Query.TableDefinitions = td
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
},
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DestinationTable.ProjectId = "project-id"
|
||||
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableQueryCache: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.UseQueryCache = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
AllowLargeResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableFlattenedResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.FlattenResults = &f
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
Priority: QueryPriority("low"),
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.Priority = "low"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBillingTier: 3,
|
||||
MaxBytesBilled: 5,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
tier := int64(3)
|
||||
j.Configuration.Query.MaximumBillingTier = &tier
|
||||
j.Configuration.Query.MaximumBytesBilled = 5
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
UseStandardSQL: true,
|
||||
},
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
UseLegacySQL: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.UseLegacySql = true
|
||||
j.Configuration.Query.ForceSendFields = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
query := c.Query("")
|
||||
query.JobIDConfig = tc.jobIDConfig
|
||||
query.QueryConfig = *tc.src
|
||||
query.Dst = tc.dst
|
||||
got, err := query.newJob()
|
||||
if err != nil {
|
||||
t.Errorf("#%d: err calling query: %v", i, err)
|
||||
continue
|
||||
}
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
// Round-trip.
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
wantConfig := query.QueryConfig
|
||||
// We set AllowLargeResults to true when DisableFlattenedResults is true.
|
||||
if wantConfig.DisableFlattenedResults {
|
||||
wantConfig.AllowLargeResults = true
|
||||
}
|
||||
// A QueryConfig with neither UseXXXSQL field set is equivalent
|
||||
// to one where UseStandardSQL = true.
|
||||
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL {
|
||||
wantConfig.UseStandardSQL = true
|
||||
}
|
||||
// Treat nil and empty tables the same, and ignore the client.
|
||||
tableEqual := func(t1, t2 *Table) bool {
|
||||
if t1 == nil {
|
||||
t1 = &Table{}
|
||||
}
|
||||
if t2 == nil {
|
||||
t2 = &Table{}
|
||||
}
|
||||
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID
|
||||
}
|
||||
// A table definition that is a GCSReference round-trips as an ExternalDataConfig.
|
||||
// TODO(jba): see if there is a way to express this with a transformer.
|
||||
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig {
|
||||
q := g.toBQ()
|
||||
e, _ := bqToExternalDataConfig(&q)
|
||||
return e
|
||||
}
|
||||
externalDataEqual := func(e1, e2 ExternalData) bool {
|
||||
if r, ok := e1.(*GCSReference); ok {
|
||||
e1 = gcsRefToEDC(r)
|
||||
}
|
||||
if r, ok := e2.(*GCSReference); ok {
|
||||
e2 = gcsRefToEDC(r)
|
||||
}
|
||||
return cmp.Equal(e1, e2)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig,
|
||||
cmp.Comparer(tableEqual),
|
||||
cmp.Comparer(externalDataEqual),
|
||||
)
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguringQuery(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
}
|
||||
|
||||
query := c.Query("q")
|
||||
query.JobID = "ajob"
|
||||
query.DefaultProjectID = "def-project-id"
|
||||
query.DefaultDatasetID = "def-dataset-id"
|
||||
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
|
||||
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
|
||||
// Note: Other configuration fields are tested in other tests above.
|
||||
// A lot of that can be consolidated once Client.Copy is gone.
|
||||
|
||||
want := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
Query: "q",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
UseLegacySql: false,
|
||||
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
|
||||
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
},
|
||||
},
|
||||
JobReference: &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "project-id",
|
||||
},
|
||||
}
|
||||
|
||||
got, err := query.newJob()
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Query.newJob: %v", err)
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("querying: -got +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryLegacySQL(t *testing.T) {
|
||||
c := &Client{projectID: "project-id"}
|
||||
q := c.Query("q")
|
||||
q.UseStandardSQL = true
|
||||
q.UseLegacySQL = true
|
||||
_, err := q.newJob()
|
||||
if err == nil {
|
||||
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
|
||||
}
|
||||
q = c.Query("q")
|
||||
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
|
||||
q.UseLegacySQL = true
|
||||
_, err = q.newJob()
|
||||
if err == nil {
|
||||
t.Error("Parameters and UseLegacySQL: got nil, want error")
|
||||
}
|
||||
}
|
235
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
235
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
|
@ -1,235 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type pageFetcherArgs struct {
|
||||
table *Table
|
||||
schema Schema
|
||||
startIndex uint64
|
||||
pageSize int64
|
||||
pageToken string
|
||||
}
|
||||
|
||||
// pageFetcherReadStub services read requests by returning data from an in-memory list of values.
|
||||
type pageFetcherReadStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
values [][][]Value // contains pages / rows / columns.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
// arguments are recorded for later inspection.
|
||||
calls []pageFetcherArgs
|
||||
}
|
||||
|
||||
func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
|
||||
s.calls = append(s.calls,
|
||||
pageFetcherArgs{t, schema, startIndex, pageSize, pageToken})
|
||||
result := &fetchPageResult{
|
||||
pageToken: s.pageTokens[pageToken],
|
||||
rows: s.values[0],
|
||||
}
|
||||
s.values = s.values[1:]
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func waitForQueryStub(context.Context, string) (Schema, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
// The data for the service stub to return is populated for each test case in the testCases for loop.
|
||||
ctx := context.Background()
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: c,
|
||||
config: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, readFunc := range []func() *RowIterator{
|
||||
func() *RowIterator {
|
||||
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage)
|
||||
},
|
||||
func() *RowIterator {
|
||||
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return it
|
||||
},
|
||||
} {
|
||||
testCases := []struct {
|
||||
data [][][]Value
|
||||
pageTokens map[string]string
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": "a", "a": ""},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
|
||||
},
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": ""}, // no more pages after first one.
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
pf.values = tc.data
|
||||
pf.pageTokens = tc.pageTokens
|
||||
if got, ok := collectValues(t, readFunc()); ok {
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
|
||||
var got [][]Value
|
||||
for {
|
||||
var vals []Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err calling Next: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
got = append(got, vals)
|
||||
}
|
||||
return got, true
|
||||
}
|
||||
|
||||
func TestNoMoreValues(t *testing.T) {
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage)
|
||||
var vals []Value
|
||||
// We expect to retrieve two values and then fail on the next attempt.
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Fatalf("Next: got: %v: want: iterator.Done", err)
|
||||
}
|
||||
}
|
||||
|
||||
var errBang = errors.New("bang!")
|
||||
|
||||
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
|
||||
return nil, errBang
|
||||
}
|
||||
|
||||
func TestReadError(t *testing.T) {
|
||||
// test that service read errors are propagated back to the caller.
|
||||
c := &Client{projectID: "project-id"}
|
||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != errBang {
|
||||
t.Fatalf("Get: got: %v: want: %v", err, errBang)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadTabledataOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{projectID: "project-id"}
|
||||
tr := c.Dataset("dataset-id").Table("table-id")
|
||||
it := tr.read(context.Background(), s.fetchPage)
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []pageFetcherArgs{{
|
||||
table: tr,
|
||||
pageSize: 5,
|
||||
pageToken: "",
|
||||
}}
|
||||
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" {
|
||||
t.Errorf("reading (got=-, want=+):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadQueryOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
tr := &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: c,
|
||||
config: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{DestinationTable: tr},
|
||||
},
|
||||
}
|
||||
it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage)
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
|
||||
want := []pageFetcherArgs{{
|
||||
table: bqToTable(tr, c),
|
||||
pageSize: 5,
|
||||
pageToken: "",
|
||||
}}
|
||||
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want)
|
||||
}
|
||||
}
|
388
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
388
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
|
@ -1,388 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/atomiccache"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Schema describes the fields in a table or query result.
|
||||
type Schema []*FieldSchema
|
||||
|
||||
type FieldSchema struct {
|
||||
// The field name.
|
||||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
|
||||
// and must start with a letter or underscore.
|
||||
// The maximum length is 128 characters.
|
||||
Name string
|
||||
|
||||
// A description of the field. The maximum length is 16,384 characters.
|
||||
Description string
|
||||
|
||||
// Whether the field may contain multiple values.
|
||||
Repeated bool
|
||||
// Whether the field is required. Ignored if Repeated is true.
|
||||
Required bool
|
||||
|
||||
// The field data type. If Type is Record, then this field contains a nested schema,
|
||||
// which is described by Schema.
|
||||
Type FieldType
|
||||
// Describes the nested schema if Type is set to Record.
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
|
||||
tfs := &bq.TableFieldSchema{
|
||||
Description: fs.Description,
|
||||
Name: fs.Name,
|
||||
Type: string(fs.Type),
|
||||
}
|
||||
|
||||
if fs.Repeated {
|
||||
tfs.Mode = "REPEATED"
|
||||
} else if fs.Required {
|
||||
tfs.Mode = "REQUIRED"
|
||||
} // else leave as default, which is interpreted as NULLABLE.
|
||||
|
||||
for _, f := range fs.Schema {
|
||||
tfs.Fields = append(tfs.Fields, f.toBQ())
|
||||
}
|
||||
|
||||
return tfs
|
||||
}
|
||||
|
||||
func (s Schema) toBQ() *bq.TableSchema {
|
||||
var fields []*bq.TableFieldSchema
|
||||
for _, f := range s {
|
||||
fields = append(fields, f.toBQ())
|
||||
}
|
||||
return &bq.TableSchema{Fields: fields}
|
||||
}
|
||||
|
||||
func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
fs := &FieldSchema{
|
||||
Description: tfs.Description,
|
||||
Name: tfs.Name,
|
||||
Repeated: tfs.Mode == "REPEATED",
|
||||
Required: tfs.Mode == "REQUIRED",
|
||||
Type: FieldType(tfs.Type),
|
||||
}
|
||||
|
||||
for _, f := range tfs.Fields {
|
||||
fs.Schema = append(fs.Schema, bqToFieldSchema(f))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func bqToSchema(ts *bq.TableSchema) Schema {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, bqToFieldSchema(f))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type FieldType string
|
||||
|
||||
const (
|
||||
StringFieldType FieldType = "STRING"
|
||||
BytesFieldType FieldType = "BYTES"
|
||||
IntegerFieldType FieldType = "INTEGER"
|
||||
FloatFieldType FieldType = "FLOAT"
|
||||
BooleanFieldType FieldType = "BOOLEAN"
|
||||
TimestampFieldType FieldType = "TIMESTAMP"
|
||||
RecordFieldType FieldType = "RECORD"
|
||||
DateFieldType FieldType = "DATE"
|
||||
TimeFieldType FieldType = "TIME"
|
||||
DateTimeFieldType FieldType = "DATETIME"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
|
||||
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
|
||||
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct")
|
||||
errBadNullable = errors.New(`bigquery: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`)
|
||||
)
|
||||
|
||||
var typeOfByteSlice = reflect.TypeOf([]byte{})
|
||||
|
||||
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
|
||||
// Each exported struct field is mapped to a field in the schema.
|
||||
//
|
||||
// The following BigQuery types are inferred from the corresponding Go types.
|
||||
// (This is the same mapping as that used for RowIterator.Next.) Fields inferred
|
||||
// from these types are marked required (non-nullable).
|
||||
//
|
||||
// STRING string
|
||||
// BOOL bool
|
||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
|
||||
// FLOAT float32, float64
|
||||
// BYTES []byte
|
||||
// TIMESTAMP time.Time
|
||||
// DATE civil.Date
|
||||
// TIME civil.Time
|
||||
// DATETIME civil.DateTime
|
||||
//
|
||||
// A Go slice or array type is inferred to be a BigQuery repeated field of the
|
||||
// element type. The element type must be one of the above listed types.
|
||||
//
|
||||
// Nullable fields are inferred from the NullXXX types, declared in this package:
|
||||
//
|
||||
// STRING NullString
|
||||
// BOOL NullBool
|
||||
// INTEGER NullInt64
|
||||
// FLOAT NullFloat64
|
||||
// TIMESTAMP NullTimestamp
|
||||
// DATE NullDate
|
||||
// TIME NullTime
|
||||
// DATETIME NullDateTime
|
||||
|
||||
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
|
||||
//
|
||||
// A struct field that is of struct type is inferred to be a required field of type
|
||||
// RECORD with a schema inferred recursively. For backwards compatibility, a field of
|
||||
// type pointer to struct is also inferred to be required. To get a nullable RECORD
|
||||
// field, use the "nullable" tag (see below).
|
||||
//
|
||||
// InferSchema returns an error if any of the examined fields is of type uint,
|
||||
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future
|
||||
// versions may handle these cases without error.
|
||||
//
|
||||
// Recursively defined structs are also disallowed.
|
||||
//
|
||||
// Struct fields may be tagged in a way similar to the encoding/json package.
|
||||
// A tag of the form
|
||||
// bigquery:"name"
|
||||
// uses "name" instead of the struct field name as the BigQuery field name.
|
||||
// A tag of the form
|
||||
// bigquery:"-"
|
||||
// omits the field from the inferred schema.
|
||||
// The "nullable" option marks the field as nullable (not required). It is only
|
||||
// needed for []byte and pointer-to-struct fields, and cannot appear on other
|
||||
// fields. In this example, the Go name of the field is retained:
|
||||
// bigquery:",nullable"
|
||||
func InferSchema(st interface{}) (Schema, error) {
|
||||
return inferSchemaReflectCached(reflect.TypeOf(st))
|
||||
}
|
||||
|
||||
// TODO(jba): replace with sync.Map for Go 1.9.
|
||||
var schemaCache atomiccache.Cache
|
||||
|
||||
type cacheVal struct {
|
||||
schema Schema
|
||||
err error
|
||||
}
|
||||
|
||||
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
|
||||
cv := schemaCache.Get(t, func() interface{} {
|
||||
s, err := inferSchemaReflect(t)
|
||||
return cacheVal{s, err}
|
||||
}).(cacheVal)
|
||||
return cv.schema, cv.err
|
||||
}
|
||||
|
||||
func inferSchemaReflect(t reflect.Type) (Schema, error) {
|
||||
rec, err := hasRecursiveType(t, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rec {
|
||||
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
|
||||
}
|
||||
return inferStruct(t)
|
||||
}
|
||||
|
||||
func inferStruct(t reflect.Type) (Schema, error) {
|
||||
switch t.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return nil, errNoStruct
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
return inferFields(t)
|
||||
default:
|
||||
return nil, errNoStruct
|
||||
}
|
||||
}
|
||||
|
||||
// inferFieldSchema infers the FieldSchema for a Go type
|
||||
func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
||||
// Only []byte and struct pointers can be tagged nullable.
|
||||
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
|
||||
return nil, errBadNullable
|
||||
}
|
||||
switch rt {
|
||||
case typeOfByteSlice:
|
||||
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
|
||||
case typeOfGoTime:
|
||||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
|
||||
case typeOfDate:
|
||||
return &FieldSchema{Required: true, Type: DateFieldType}, nil
|
||||
case typeOfTime:
|
||||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
|
||||
case typeOfDateTime:
|
||||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
|
||||
}
|
||||
if ft := nullableFieldType(rt); ft != "" {
|
||||
return &FieldSchema{Required: false, Type: ft}, nil
|
||||
}
|
||||
if isSupportedIntType(rt) || isSupportedUintType(rt) {
|
||||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
|
||||
}
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
et := rt.Elem()
|
||||
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
|
||||
// Multi dimensional slices/arrays are not supported by BigQuery
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
if nullableFieldType(et) != "" {
|
||||
// Repeated nullable types are not supported by BigQuery.
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
f, err := inferFieldSchema(et, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Repeated = true
|
||||
f.Required = false
|
||||
return f, nil
|
||||
case reflect.Ptr:
|
||||
if rt.Elem().Kind() != reflect.Struct {
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
nested, err := inferStruct(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
|
||||
case reflect.String:
|
||||
return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
|
||||
case reflect.Bool:
|
||||
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
|
||||
default:
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
}
|
||||
|
||||
// inferFields extracts all exported field types from struct type.
|
||||
func inferFields(rt reflect.Type) (Schema, error) {
|
||||
var s Schema
|
||||
fields, err := fieldCache.Fields(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, field := range fields {
|
||||
var nullable bool
|
||||
for _, opt := range field.ParsedTag.([]string) {
|
||||
if opt == nullableTagOption {
|
||||
nullable = true
|
||||
break
|
||||
}
|
||||
}
|
||||
f, err := inferFieldSchema(field.Type, nullable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Name = field.Name
|
||||
s = append(s, f)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// isSupportedIntType reports whether t is an int type that can be properly
|
||||
// represented by the BigQuery INTEGER/INT64 type.
|
||||
func isSupportedIntType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isSupportedIntType reports whether t is a uint type that can be properly
|
||||
// represented by the BigQuery INTEGER/INT64 type.
|
||||
func isSupportedUintType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// typeList is a linked list of reflect.Types.
|
||||
type typeList struct {
|
||||
t reflect.Type
|
||||
next *typeList
|
||||
}
|
||||
|
||||
func (l *typeList) has(t reflect.Type) bool {
|
||||
for l != nil {
|
||||
if l.t == t {
|
||||
return true
|
||||
}
|
||||
l = l.next
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
|
||||
// via exported fields. (Schema inference ignores unexported fields.)
|
||||
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
|
||||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return false, nil
|
||||
}
|
||||
if seen.has(t) {
|
||||
return true, nil
|
||||
}
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
seen = &typeList{t, seen}
|
||||
// Because seen is a linked list, additions to it from one field's
|
||||
// recursive call will not affect the value for subsequent fields' calls.
|
||||
for _, field := range fields {
|
||||
ok, err := hasRecursiveType(field.Type, seen)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
897
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
897
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
|
@ -1,897 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func (fs *FieldSchema) GoString() string {
|
||||
if fs == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}",
|
||||
fs.Name,
|
||||
fs.Description,
|
||||
fs.Repeated,
|
||||
fs.Required,
|
||||
fs.Type,
|
||||
fmt.Sprintf("%#v", fs.Schema),
|
||||
)
|
||||
}
|
||||
|
||||
func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Repeated: repeated,
|
||||
Required: required,
|
||||
Type: FieldType(typ),
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
schema Schema
|
||||
bqSchema *bq.TableSchema
|
||||
}{
|
||||
{
|
||||
// required
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
// repeated
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REPEATED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", true, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nullable, string
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// integer
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "INTEGER", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "INTEGER", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// float
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "FLOAT", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "FLOAT", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// boolean
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "BOOLEAN", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "BOOLEAN", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// timestamp
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "TIMESTAMP", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "TIMESTAMP", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// civil times
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "f1", "TIME", ""),
|
||||
bqTableFieldSchema("desc", "f2", "DATE", ""),
|
||||
bqTableFieldSchema("desc", "f3", "DATETIME", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "f1", "TIME", false, false),
|
||||
fieldSchema("desc", "f2", "DATE", false, false),
|
||||
fieldSchema("desc", "f3", "DATETIME", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nested
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Mode: "REQUIRED",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("inner field", "inner", "STRING", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
&FieldSchema{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{
|
||||
{
|
||||
Description: "inner field",
|
||||
Name: "inner",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
bqSchema := tc.schema.toBQ()
|
||||
if !testutil.Equal(bqSchema, tc.bqSchema) {
|
||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
|
||||
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
|
||||
}
|
||||
schema := bqToSchema(tc.bqSchema)
|
||||
if !testutil.Equal(schema, tc.schema) {
|
||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type allStrings struct {
|
||||
String string
|
||||
ByteSlice []byte
|
||||
}
|
||||
|
||||
type allSignedIntegers struct {
|
||||
Int64 int64
|
||||
Int32 int32
|
||||
Int16 int16
|
||||
Int8 int8
|
||||
Int int
|
||||
}
|
||||
|
||||
type allUnsignedIntegers struct {
|
||||
Uint32 uint32
|
||||
Uint16 uint16
|
||||
Uint8 uint8
|
||||
}
|
||||
|
||||
type allFloat struct {
|
||||
Float64 float64
|
||||
Float32 float32
|
||||
// NOTE: Complex32 and Complex64 are unsupported by BigQuery
|
||||
}
|
||||
|
||||
type allBoolean struct {
|
||||
Bool bool
|
||||
}
|
||||
|
||||
type allTime struct {
|
||||
Timestamp time.Time
|
||||
Time civil.Time
|
||||
Date civil.Date
|
||||
DateTime civil.DateTime
|
||||
}
|
||||
|
||||
func reqField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Required: true,
|
||||
}
|
||||
}
|
||||
|
||||
func optField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Required: false,
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: allSignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Int64", "INTEGER"),
|
||||
reqField("Int32", "INTEGER"),
|
||||
reqField("Int16", "INTEGER"),
|
||||
reqField("Int8", "INTEGER"),
|
||||
reqField("Int", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allUnsignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Uint32", "INTEGER"),
|
||||
reqField("Uint16", "INTEGER"),
|
||||
reqField("Uint8", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allFloat{},
|
||||
want: Schema{
|
||||
reqField("Float64", "FLOAT"),
|
||||
reqField("Float32", "FLOAT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: &allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allTime{},
|
||||
want: Schema{
|
||||
reqField("Timestamp", "TIMESTAMP"),
|
||||
reqField("Time", "TIME"),
|
||||
reqField("Date", "DATE"),
|
||||
reqField("DateTime", "DATETIME"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allStrings{},
|
||||
want: Schema{
|
||||
reqField("String", "STRING"),
|
||||
reqField("ByteSlice", "BYTES"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type containsNested struct {
|
||||
hidden string
|
||||
NotNested int
|
||||
Nested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
|
||||
type containsDoubleNested struct {
|
||||
NotNested int
|
||||
Nested struct {
|
||||
InsideNested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ptrNested struct {
|
||||
Ptr *struct{ Inside int }
|
||||
}
|
||||
|
||||
type dup struct { // more than one field of the same struct type
|
||||
A, B allBoolean
|
||||
}
|
||||
|
||||
func TestNestedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: containsNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: containsDoubleNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{
|
||||
{
|
||||
Name: "InsideNested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: ptrNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "Ptr",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: dup{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "A",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "B",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeated struct {
|
||||
NotRepeated []byte
|
||||
RepeatedByteSlice [][]byte
|
||||
Slice []int
|
||||
Array [5]bool
|
||||
}
|
||||
|
||||
type nestedRepeated struct {
|
||||
NotRepeated int
|
||||
Repeated []struct {
|
||||
Inside int
|
||||
}
|
||||
RepeatedPtr []*struct{ Inside int }
|
||||
}
|
||||
|
||||
func repField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Repeated: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: repeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "BYTES"),
|
||||
repField("RepeatedByteSlice", "BYTES"),
|
||||
repField("Slice", "INTEGER"),
|
||||
repField("Array", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: nestedRepeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "INTEGER"),
|
||||
{
|
||||
Name: "Repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
{
|
||||
Name: "RepeatedPtr",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type allNulls struct {
|
||||
A NullInt64
|
||||
B NullFloat64
|
||||
C NullBool
|
||||
D NullString
|
||||
E NullTimestamp
|
||||
F NullTime
|
||||
G NullDate
|
||||
H NullDateTime
|
||||
}
|
||||
|
||||
func TestNullInference(t *testing.T) {
|
||||
got, err := InferSchema(allNulls{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := Schema{
|
||||
optField("A", "INTEGER"),
|
||||
optField("B", "FLOAT"),
|
||||
optField("C", "BOOLEAN"),
|
||||
optField("D", "STRING"),
|
||||
optField("E", "TIMESTAMP"),
|
||||
optField("F", "TIME"),
|
||||
optField("G", "DATE"),
|
||||
optField("H", "DATETIME"),
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
}
|
||||
|
||||
type Embedded struct {
|
||||
Embedded int
|
||||
}
|
||||
|
||||
type embedded struct {
|
||||
Embedded2 int
|
||||
}
|
||||
|
||||
type nestedEmbedded struct {
|
||||
Embedded
|
||||
embedded
|
||||
}
|
||||
|
||||
func TestEmbeddedInference(t *testing.T) {
|
||||
got, err := InferSchema(nestedEmbedded{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := Schema{
|
||||
reqField("Embedded", "INTEGER"),
|
||||
reqField("Embedded2", "INTEGER"),
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecursiveInference(t *testing.T) {
|
||||
type List struct {
|
||||
Val int
|
||||
Next *List
|
||||
}
|
||||
|
||||
_, err := InferSchema(List{})
|
||||
if err == nil {
|
||||
t.Fatal("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
type withTags struct {
|
||||
NoTag int
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
SimpleTag int `bigquery:"simple_tag"`
|
||||
UnderscoreTag int `bigquery:"_id"`
|
||||
MixedCase int `bigquery:"MIXEDcase"`
|
||||
Nullable []byte `bigquery:",nullable"`
|
||||
}
|
||||
|
||||
type withTagsNested struct {
|
||||
Nested withTags `bigquery:"nested"`
|
||||
NestedAnonymous struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
PNested *struct{ X int } // not nullable, for backwards compatibility
|
||||
PNestedNullable *struct{ X int } `bigquery:",nullable"`
|
||||
}
|
||||
|
||||
type withTagsRepeated struct {
|
||||
Repeated []withTags `bigquery:"repeated"`
|
||||
RepeatedAnonymous []struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
}
|
||||
|
||||
type withTagsEmbedded struct {
|
||||
withTags
|
||||
}
|
||||
|
||||
var withTagsSchema = Schema{
|
||||
reqField("NoTag", "INTEGER"),
|
||||
reqField("simple_tag", "INTEGER"),
|
||||
reqField("_id", "INTEGER"),
|
||||
reqField("MIXEDcase", "INTEGER"),
|
||||
optField("Nullable", "BYTES"),
|
||||
}
|
||||
|
||||
func TestTagInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: withTags{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
{
|
||||
in: withTagsNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "PNested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("X", "INTEGER")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "PNestedNullable",
|
||||
Required: false,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("X", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsRepeated{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsEmbedded{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagInferenceErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: struct {
|
||||
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupporedStartChar int `bigquery:"øab"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedEndChar int `bigquery:"abø"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedMiddleChar int `bigquery:"aøb"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
StartInt int `bigquery:"1abc"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
Hyphens int `bigquery:"a-b"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if got != want {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := InferSchema(struct {
|
||||
X int `bigquery:",optional"`
|
||||
}{})
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: []byte{},
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: new(int),
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint uint }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint64 uint64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uintptr uintptr }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Complex complex64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Map map[string]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Chan chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Ptr *int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Interface interface{} }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][][]byte }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ SliceOfPointer []*int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ SliceOfNull []NullInt64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ ChanSlice []chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ NestedChan struct{ Chan []chan bool } }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X int `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X bool `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X struct{ N int } `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X []int `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct{ X *[]byte }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ X *[]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ X *int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if got != want {
|
||||
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasRecursiveType(t *testing.T) {
|
||||
type (
|
||||
nonStruct int
|
||||
nonRec struct{ A string }
|
||||
dup struct{ A, B nonRec }
|
||||
rec struct {
|
||||
A int
|
||||
B *rec
|
||||
}
|
||||
recUnexported struct {
|
||||
A int
|
||||
b *rec
|
||||
}
|
||||
hasRec struct {
|
||||
A int
|
||||
R *rec
|
||||
}
|
||||
recSlicePointer struct {
|
||||
A []*recSlicePointer
|
||||
}
|
||||
)
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want bool
|
||||
}{
|
||||
{nonStruct(0), false},
|
||||
{nonRec{}, false},
|
||||
{dup{}, false},
|
||||
{rec{}, true},
|
||||
{recUnexported{}, false},
|
||||
{hasRec{}, true},
|
||||
{&recSlicePointer{}, true},
|
||||
} {
|
||||
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != test.want {
|
||||
t.Errorf("%T: got %t, want %t", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
518
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
518
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
|
@ -1,518 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Table is a reference to a BigQuery table.
|
||||
type Table struct {
|
||||
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
|
||||
// In this case the result will be stored in an ephemeral table.
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
|
||||
// The maximum length is 1,024 characters.
|
||||
TableID string
|
||||
|
||||
c *Client
|
||||
}
|
||||
|
||||
// TableMetadata contains information about a BigQuery table.
|
||||
type TableMetadata struct {
|
||||
// The following fields can be set when creating a table.
|
||||
|
||||
// The user-friendly name for the table.
|
||||
Name string
|
||||
|
||||
// The user-friendly description of the table.
|
||||
Description string
|
||||
|
||||
// The table schema. If provided on create, ViewQuery must be empty.
|
||||
Schema Schema
|
||||
|
||||
// The query to use for a view. If provided on create, Schema must be nil.
|
||||
ViewQuery string
|
||||
|
||||
// Use Legacy SQL for the view query.
|
||||
// At most one of UseLegacySQL and UseStandardSQL can be true.
|
||||
UseLegacySQL bool
|
||||
|
||||
// Use Legacy SQL for the view query. The default.
|
||||
// At most one of UseLegacySQL and UseStandardSQL can be true.
|
||||
// Deprecated: use UseLegacySQL.
|
||||
UseStandardSQL bool
|
||||
|
||||
// If non-nil, the table is partitioned by time.
|
||||
TimePartitioning *TimePartitioning
|
||||
|
||||
// The time when this table expires. If not set, the table will persist
|
||||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
||||
ExpirationTime time.Time
|
||||
|
||||
// User-provided labels.
|
||||
Labels map[string]string
|
||||
|
||||
// Information about a table stored outside of BigQuery.
|
||||
ExternalDataConfig *ExternalDataConfig
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
EncryptionConfig *EncryptionConfig
|
||||
|
||||
// All the fields below are read-only.
|
||||
|
||||
FullID string // An opaque ID uniquely identifying the table.
|
||||
Type TableType
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time
|
||||
|
||||
// The size of the table in bytes.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumBytes int64
|
||||
|
||||
// The number of rows of data in this table.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumRows uint64
|
||||
|
||||
// Contains information regarding this table's streaming buffer, if one is
|
||||
// present. This field will be nil if the table is not being streamed to or if
|
||||
// there is no data in the streaming buffer.
|
||||
StreamingBuffer *StreamingBuffer
|
||||
|
||||
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
|
||||
// ensure that the metadata hasn't changed since it was read.
|
||||
ETag string
|
||||
}
|
||||
|
||||
// TableCreateDisposition specifies the circumstances under which destination table will be created.
|
||||
// Default is CreateIfNeeded.
|
||||
type TableCreateDisposition string
|
||||
|
||||
const (
|
||||
// CreateIfNeeded will create the table if it does not already exist.
|
||||
// Tables are created atomically on successful completion of a job.
|
||||
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
|
||||
|
||||
// CreateNever ensures the table must already exist and will not be
|
||||
// automatically created.
|
||||
CreateNever TableCreateDisposition = "CREATE_NEVER"
|
||||
)
|
||||
|
||||
// TableWriteDisposition specifies how existing data in a destination table is treated.
|
||||
// Default is WriteAppend.
|
||||
type TableWriteDisposition string
|
||||
|
||||
const (
|
||||
// WriteAppend will append to any existing data in the destination table.
|
||||
// Data is appended atomically on successful completion of a job.
|
||||
WriteAppend TableWriteDisposition = "WRITE_APPEND"
|
||||
|
||||
// WriteTruncate overrides the existing data in the destination table.
|
||||
// Data is overwritten atomically on successful completion of a job.
|
||||
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
|
||||
|
||||
// WriteEmpty fails writes if the destination table already contains data.
|
||||
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
|
||||
)
|
||||
|
||||
// TableType is the type of table.
|
||||
type TableType string
|
||||
|
||||
const (
|
||||
RegularTable TableType = "TABLE"
|
||||
ViewTable TableType = "VIEW"
|
||||
ExternalTable TableType = "EXTERNAL"
|
||||
)
|
||||
|
||||
// TimePartitioning describes the time-based date partitioning on a table.
|
||||
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
|
||||
type TimePartitioning struct {
|
||||
// The amount of time to keep the storage for a partition.
|
||||
// If the duration is empty (0), the data in the partitions do not expire.
|
||||
Expiration time.Duration
|
||||
|
||||
// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
|
||||
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
|
||||
// DATE field. Its mode must be NULLABLE or REQUIRED.
|
||||
Field string
|
||||
}
|
||||
|
||||
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(p.Expiration / time.Millisecond),
|
||||
Field: p.Field,
|
||||
}
|
||||
}
|
||||
|
||||
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
|
||||
if q == nil {
|
||||
return nil
|
||||
}
|
||||
return &TimePartitioning{
|
||||
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
|
||||
Field: q.Field,
|
||||
}
|
||||
}
|
||||
|
||||
// EncryptionConfig configures customer-managed encryption on tables.
|
||||
type EncryptionConfig struct {
|
||||
// Describes the Cloud KMS encryption key that will be used to protect
|
||||
// destination BigQuery table. The BigQuery Service Account associated with your
|
||||
// project requires access to this encryption key.
|
||||
KMSKeyName string
|
||||
}
|
||||
|
||||
func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return &bq.EncryptionConfiguration{
|
||||
KmsKeyName: e.KMSKeyName,
|
||||
}
|
||||
}
|
||||
|
||||
func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig {
|
||||
if q == nil {
|
||||
return nil
|
||||
}
|
||||
return &EncryptionConfig{
|
||||
KMSKeyName: q.KmsKeyName,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamingBuffer holds information about the streaming buffer.
|
||||
type StreamingBuffer struct {
|
||||
// A lower-bound estimate of the number of bytes currently in the streaming
|
||||
// buffer.
|
||||
EstimatedBytes uint64
|
||||
|
||||
// A lower-bound estimate of the number of rows currently in the streaming
|
||||
// buffer.
|
||||
EstimatedRows uint64
|
||||
|
||||
// The time of the oldest entry in the streaming buffer.
|
||||
OldestEntryTime time.Time
|
||||
}
|
||||
|
||||
func (t *Table) toBQ() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
}
|
||||
|
||||
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
|
||||
func (t *Table) FullyQualifiedName() string {
|
||||
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
|
||||
func (t *Table) implicitTable() bool {
|
||||
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
|
||||
}
|
||||
|
||||
// Create creates a table in the BigQuery service.
|
||||
// Pass in a TableMetadata value to configure the table.
|
||||
// If tm.View.Query is non-empty, the created table will be of type VIEW.
|
||||
// Expiration can only be set during table creation.
|
||||
// After table creation, a view can be modified only if its table was initially created
|
||||
// with a view.
|
||||
func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
|
||||
table, err := tm.toBQ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
table.TableReference = &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (tm *TableMetadata) toBQ() (*bq.Table, error) {
|
||||
t := &bq.Table{}
|
||||
if tm == nil {
|
||||
return t, nil
|
||||
}
|
||||
if tm.Schema != nil && tm.ViewQuery != "" {
|
||||
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
|
||||
}
|
||||
t.FriendlyName = tm.Name
|
||||
t.Description = tm.Description
|
||||
t.Labels = tm.Labels
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.toBQ()
|
||||
}
|
||||
if tm.ViewQuery != "" {
|
||||
if tm.UseStandardSQL && tm.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
|
||||
if tm.UseLegacySQL {
|
||||
t.View.UseLegacySql = true
|
||||
} else {
|
||||
t.View.UseLegacySql = false
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
} else if tm.UseLegacySQL || tm.UseStandardSQL {
|
||||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
|
||||
}
|
||||
t.TimePartitioning = tm.TimePartitioning.toBQ()
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
}
|
||||
if tm.ExternalDataConfig != nil {
|
||||
edc := tm.ExternalDataConfig.toBQ()
|
||||
t.ExternalDataConfiguration = &edc
|
||||
}
|
||||
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
|
||||
if tm.FullID != "" {
|
||||
return nil, errors.New("cannot set FullID on create")
|
||||
}
|
||||
if tm.Type != "" {
|
||||
return nil, errors.New("cannot set Type on create")
|
||||
}
|
||||
if !tm.CreationTime.IsZero() {
|
||||
return nil, errors.New("cannot set CreationTime on create")
|
||||
}
|
||||
if !tm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("cannot set LastModifiedTime on create")
|
||||
}
|
||||
if tm.NumBytes != 0 {
|
||||
return nil, errors.New("cannot set NumBytes on create")
|
||||
}
|
||||
if tm.NumRows != 0 {
|
||||
return nil, errors.New("cannot set NumRows on create")
|
||||
}
|
||||
if tm.StreamingBuffer != nil {
|
||||
return nil, errors.New("cannot set StreamingBuffer on create")
|
||||
}
|
||||
if tm.ETag != "" {
|
||||
return nil, errors.New("cannot set ETag on create")
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the table.
|
||||
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
|
||||
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var table *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToTableMetadata(table)
|
||||
}
|
||||
|
||||
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
FullID: t.Id,
|
||||
Labels: t.Labels,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
ETag: t.Etag,
|
||||
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration),
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = bqToSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.ViewQuery = t.View.Query
|
||||
md.UseLegacySQL = t.View.UseLegacySql
|
||||
}
|
||||
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
|
||||
if t.StreamingBuffer != nil {
|
||||
md.StreamingBuffer = &StreamingBuffer{
|
||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||
EstimatedRows: t.StreamingBuffer.EstimatedRows,
|
||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
|
||||
}
|
||||
}
|
||||
if t.ExternalDataConfiguration != nil {
|
||||
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md.ExternalDataConfig = edc
|
||||
}
|
||||
return md, nil
|
||||
}
|
||||
|
||||
// Delete deletes the table.
|
||||
func (t *Table) Delete(ctx context.Context) error {
|
||||
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
}
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return t.read(ctx, fetchPage)
|
||||
}
|
||||
|
||||
func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
|
||||
return newRowIterator(ctx, t, pf)
|
||||
}
|
||||
|
||||
// Update modifies specific Table metadata fields.
|
||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
|
||||
bqt := tm.toBQ()
|
||||
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var res *bq.Table
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToTableMetadata(res)
|
||||
}
|
||||
|
||||
func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if tm.Description != nil {
|
||||
t.Description = optional.ToString(tm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if tm.Name != nil {
|
||||
t.FriendlyName = optional.ToString(tm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.toBQ()
|
||||
forceSend("Schema")
|
||||
}
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
forceSend("ExpirationTime")
|
||||
}
|
||||
if tm.ViewQuery != nil {
|
||||
t.View = &bq.ViewDefinition{
|
||||
Query: optional.ToString(tm.ViewQuery),
|
||||
ForceSendFields: []string{"Query"},
|
||||
}
|
||||
}
|
||||
if tm.UseLegacySQL != nil {
|
||||
if t.View == nil {
|
||||
t.View = &bq.ViewDefinition{}
|
||||
}
|
||||
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
labels, forces, nulls := tm.update()
|
||||
t.Labels = labels
|
||||
t.ForceSendFields = append(t.ForceSendFields, forces...)
|
||||
t.NullFields = append(t.NullFields, nulls...)
|
||||
return t
|
||||
}
|
||||
|
||||
// TableMetadataToUpdate is used when updating a table's metadata.
|
||||
// Only non-nil fields will be updated.
|
||||
type TableMetadataToUpdate struct {
|
||||
// The user-friendly description of this table.
|
||||
Description optional.String
|
||||
|
||||
// The user-friendly name for this table.
|
||||
Name optional.String
|
||||
|
||||
// The table's schema.
|
||||
// When updating a schema, you can add columns but not remove them.
|
||||
Schema Schema
|
||||
|
||||
// The time when this table expires.
|
||||
ExpirationTime time.Time
|
||||
|
||||
// The query to use for a view.
|
||||
ViewQuery optional.String
|
||||
|
||||
// Use Legacy SQL for the view query.
|
||||
UseLegacySQL optional.Bool
|
||||
|
||||
labelUpdater
|
||||
}
|
||||
|
||||
// labelUpdater contains common code for updating labels.
|
||||
type labelUpdater struct {
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
|
||||
// SetLabel causes a label to be added or modified on a call to Update.
|
||||
func (u *labelUpdater) SetLabel(name, value string) {
|
||||
if u.setLabels == nil {
|
||||
u.setLabels = map[string]string{}
|
||||
}
|
||||
u.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted on a call to Update.
|
||||
func (u *labelUpdater) DeleteLabel(name string) {
|
||||
if u.deleteLabels == nil {
|
||||
u.deleteLabels = map[string]bool{}
|
||||
}
|
||||
u.deleteLabels[name] = true
|
||||
}
|
||||
|
||||
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
|
||||
if u.setLabels == nil && u.deleteLabels == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
labels = map[string]string{}
|
||||
for k, v := range u.setLabels {
|
||||
labels[k] = v
|
||||
}
|
||||
if len(labels) == 0 && len(u.deleteLabels) > 0 {
|
||||
forces = []string{"Labels"}
|
||||
}
|
||||
for l := range u.deleteLabels {
|
||||
nulls = append(nulls, "Labels."+l)
|
||||
}
|
||||
return labels, forces, nulls
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue