Browse Source

go get -u ./... && go mod tidy && go mod vendor

master
Andrey Kovalev 2 months ago
parent
commit
e5301e0977
  1. 12
      go.mod
  2. 216
      go.sum
  3. 14
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go
  4. 46
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md
  5. 589
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go
  6. 33
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go
  7. 1780
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html
  8. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go
  9. 13
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go
  10. 2
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go
  11. 2
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go
  12. 2
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go
  13. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go
  14. 26
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go
  15. 17
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go
  16. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go
  17. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go
  18. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go
  19. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go
  20. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go
  21. 26
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt
  22. 34
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh
  23. 13
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh
  24. BIN
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a
  25. BIN
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a
  26. BIN
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a
  27. BIN
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a
  28. BIN
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a
  29. 3023
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h
  30. 331
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka_mock.h
  31. 84
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/mockcluster.go
  32. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go
  33. 2
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/select_rdkafka.h
  34. 4
      vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go
  35. 21
      vendor/github.com/mattn/go-colorable/LICENSE
  36. 48
      vendor/github.com/mattn/go-colorable/README.md
  37. 38
      vendor/github.com/mattn/go-colorable/colorable_appengine.go
  38. 38
      vendor/github.com/mattn/go-colorable/colorable_others.go
  39. 1047
      vendor/github.com/mattn/go-colorable/colorable_windows.go
  40. 12
      vendor/github.com/mattn/go-colorable/go.test.sh
  41. 57
      vendor/github.com/mattn/go-colorable/noncolorable.go
  42. 9
      vendor/github.com/mattn/go-isatty/LICENSE
  43. 50
      vendor/github.com/mattn/go-isatty/README.md
  44. 2
      vendor/github.com/mattn/go-isatty/doc.go
  45. 12
      vendor/github.com/mattn/go-isatty/go.test.sh
  46. 20
      vendor/github.com/mattn/go-isatty/isatty_bsd.go
  47. 17
      vendor/github.com/mattn/go-isatty/isatty_others.go
  48. 23
      vendor/github.com/mattn/go-isatty/isatty_plan9.go
  49. 21
      vendor/github.com/mattn/go-isatty/isatty_solaris.go
  50. 20
      vendor/github.com/mattn/go-isatty/isatty_tcgets.go
  51. 125
      vendor/github.com/mattn/go-isatty/isatty_windows.go
  52. 103
      vendor/github.com/rs/zerolog/README.md
  53. 48
      vendor/github.com/rs/zerolog/array.go
  54. 215
      vendor/github.com/rs/zerolog/console.go
  55. 73
      vendor/github.com/rs/zerolog/context.go
  56. 23
      vendor/github.com/rs/zerolog/ctx.go
  57. 12
      vendor/github.com/rs/zerolog/encoder.go
  58. 3
      vendor/github.com/rs/zerolog/encoder_cbor.go
  59. 12
      vendor/github.com/rs/zerolog/encoder_json.go
  60. 87
      vendor/github.com/rs/zerolog/event.go
  61. 7
      vendor/github.com/rs/zerolog/example.jsonl
  62. 41
      vendor/github.com/rs/zerolog/fields.go
  63. 60
      vendor/github.com/rs/zerolog/globals.go
  64. 8
      vendor/github.com/rs/zerolog/internal/cbor/cbor.go
  65. 68
      vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
  66. 32
      vendor/github.com/rs/zerolog/internal/cbor/string.go
  67. 22
      vendor/github.com/rs/zerolog/internal/cbor/time.go
  68. 83
      vendor/github.com/rs/zerolog/internal/cbor/types.go
  69. 6
      vendor/github.com/rs/zerolog/internal/json/string.go
  70. 29
      vendor/github.com/rs/zerolog/internal/json/time.go
  71. 58
      vendor/github.com/rs/zerolog/internal/json/types.go
  72. 101
      vendor/github.com/rs/zerolog/log.go
  73. BIN
      vendor/github.com/rs/zerolog/pretty.png
  74. 2
      vendor/github.com/rs/zerolog/sampler.go
  75. 9
      vendor/github.com/rs/zerolog/syslog.go
  76. 200
      vendor/github.com/rs/zerolog/writer.go
  77. 27
      vendor/golang.org/x/sys/LICENSE
  78. 22
      vendor/golang.org/x/sys/PATENTS
  79. 2
      vendor/golang.org/x/sys/unix/.gitignore
  80. 184
      vendor/golang.org/x/sys/unix/README.md
  81. 86
      vendor/golang.org/x/sys/unix/affinity_linux.go
  82. 13
      vendor/golang.org/x/sys/unix/aliases.go
  83. 17
      vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
  84. 27
      vendor/golang.org/x/sys/unix/asm_bsd_386.s
  85. 27
      vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
  86. 27
      vendor/golang.org/x/sys/unix/asm_bsd_arm.s
  87. 27
      vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
  88. 29
      vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
  89. 27
      vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
  90. 65
      vendor/golang.org/x/sys/unix/asm_linux_386.s
  91. 57
      vendor/golang.org/x/sys/unix/asm_linux_amd64.s
  92. 56
      vendor/golang.org/x/sys/unix/asm_linux_arm.s
  93. 50
      vendor/golang.org/x/sys/unix/asm_linux_arm64.s
  94. 51
      vendor/golang.org/x/sys/unix/asm_linux_loong64.s
  95. 54
      vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
  96. 52
      vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
  97. 42
      vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
  98. 47
      vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
  99. 54
      vendor/golang.org/x/sys/unix/asm_linux_s390x.s
  100. 29
      vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s
  101. Some files were not shown because too many files have changed in this diff Show More

12
go.mod

@ -1,16 +1,16 @@ @@ -1,16 +1,16 @@
module gitea.russia9.dev/Russia9/chatwars-deals
go 1.18
go 1.23
require (
github.com/confluentinc/confluent-kafka-go v1.8.2
github.com/rs/zerolog v1.26.1
github.com/confluentinc/confluent-kafka-go v1.9.2
github.com/rs/zerolog v1.33.0
gopkg.in/tucnak/telebot.v2 v2.5.0
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/stretchr/testify v1.7.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
golang.org/x/sys v0.24.0 // indirect
)

216
go.sum

@ -1,52 +1,236 @@ @@ -1,52 +1,236 @@
github.com/confluentinc/confluent-kafka-go v1.8.2 h1:PBdbvYpyOdFLehj8j+9ba7FL4c4Moxn79gy9cYKxG5E=
github.com/confluentinc/confluent-kafka-go v1.8.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA=
github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ=
github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/confluentinc/confluent-kafka-go v1.9.2 h1:gV/GxhMBUb03tFWkN+7kdhg+zf+QUM+wVkI9zwh770Q=
github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4Fr8=
github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA=
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0=
github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI=
github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ=
github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E=
github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/linkedin/goavro/v2 v2.10.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/linkedin/goavro/v2 v2.11.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/avro.v0 v0.0.0-20171217001914-a730b5802183/go.mod h1:FvqrFXt+jCsyQibeRv4xxEJBL5iG2DDW5aeJwzDiq4A=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g=
gopkg.in/tucnak/telebot.v2 v2.5.0 h1:i+NynLo443Vp+Zn3Gv9JBjh3Z/PaiKAQwcnhNI7y6Po=
gopkg.in/tucnak/telebot.v2 v2.5.0/go.mod h1:BgaIIx50PSRS9pG59JH+geT82cfvoJU/IaI5TJdN3v8=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

14
vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2016-2019 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
import (
"fmt"
)
@ -29,19 +29,19 @@ import ( @@ -29,19 +29,19 @@ import (
//defines and strings in sync.
//
#define MIN_RD_KAFKA_VERSION 0x01060000
#define MIN_RD_KAFKA_VERSION 0x01090000
#ifdef __APPLE__
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
#else
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
#endif
#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION
#ifdef __APPLE__
#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
#else
#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
#endif
#endif
*/

46
vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md generated vendored

@ -62,23 +62,12 @@ these tags should be specified on the **application** build/get/install command. @@ -62,23 +62,12 @@ these tags should be specified on the **application** build/get/install command.
## Generating HTML documentation
To generate one-page HTML documentation run the mk/doc-gen.py script from the
top-level directory. This script requires the beautifulsoup4 Python package.
```
$ source .../your/virtualenv/bin/activate
$ pip install beautifulsoup4
...
$ make -f mk/Makefile docs
```
## Release process
For each release candidate and final release, perform the following steps:
### Review the CHANGELOG
### Update bundle to latest librdkafka
See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md).
@ -87,8 +76,7 @@ See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md). @@ -87,8 +76,7 @@ See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md).
### Update librdkafka version requirement
Update the minimum required librdkafka version in `kafka/00version.go`
and `README.md`.
and `README.md` and the version in `examples/go.mod` and `mk/doc-gen.py`.
### Update error codes
@ -101,6 +89,19 @@ Update generated error codes: @@ -101,6 +89,19 @@ Update generated error codes:
# Verify by building
## Generating HTML documentation
To generate one-page HTML documentation run the mk/doc-gen.py script from the
top-level directory. This script requires the beautifulsoup4 Python package.
```
$ source .../your/virtualenv/bin/activate
$ pip install beautifulsoup4
...
$ make -f mk/Makefile docs
```
### Rebuild everything
$ go clean -i ./...
@ -125,11 +126,6 @@ Manually verify that the examples/ applications work. @@ -125,11 +126,6 @@ Manually verify that the examples/ applications work.
Also make sure the examples in README.md work.
Convert any examples using `github.com/confluentinc/confluent-kafka-go/kafka` to use
`gopkg.in/confluentinc/confluent-kafka-go.v1/kafka` import path.
$ find examples/ -type f -name *\.go -exec sed -i -e 's|github\.com/confluentinc/confluent-kafka-go/kafka|gopkg\.in/confluentinc/confluent-kafka-go\.v1/kafka|g' {} +
### Commit any changes
Make sure to push to github before creating the tag to have CI tests pass.
@ -143,3 +139,13 @@ Make sure to push to github before creating the tag to have CI tests pass. @@ -143,3 +139,13 @@ Make sure to push to github before creating the tag to have CI tests pass.
### Create release notes page on github
### Update version in Confluent docs
Put the new version in settings.sh of these two repos
https://github.com/confluentinc/docs
https://github.com/confluentinc/docs-platform
### Don't forget tweeting it!

589
vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go generated vendored

@ -48,6 +48,27 @@ ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t i @@ -48,6 +48,27 @@ ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t i
return NULL;
return entries[idx];
}
static const rd_kafka_acl_result_t *
acl_result_by_idx (const rd_kafka_acl_result_t **acl_results, size_t cnt, size_t idx) {
if (idx >= cnt)
return NULL;
return acl_results[idx];
}
static const rd_kafka_DeleteAcls_result_response_t *
DeleteAcls_result_response_by_idx (const rd_kafka_DeleteAcls_result_response_t **delete_acls_result_responses, size_t cnt, size_t idx) {
if (idx >= cnt)
return NULL;
return delete_acls_result_responses[idx];
}
static const rd_kafka_AclBinding_t *
AclBinding_by_idx (const rd_kafka_AclBinding_t **acl_bindings, size_t cnt, size_t idx) {
if (idx >= cnt)
return NULL;
return acl_bindings[idx];
}
*/
import "C"
@ -312,6 +333,225 @@ func (c ConfigResourceResult) String() string { @@ -312,6 +333,225 @@ func (c ConfigResourceResult) String() string {
return fmt.Sprintf("ResourceResult(%s, %s, %d config(s))", c.Type, c.Name, len(c.Config))
}
// ResourcePatternType enumerates the different types of Kafka resource patterns.
type ResourcePatternType int
const (
// ResourcePatternTypeUnknown is a resource pattern type not known or not set.
ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN)
// ResourcePatternTypeAny matches any resource, used for lookups.
ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY)
// ResourcePatternTypeMatch will perform pattern matching
ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH)
// ResourcePatternTypeLiteral matches a literal resource name
ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL)
// ResourcePatternTypePrefixed matches a prefixed resource name
ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED)
)
// String returns the human-readable representation of a ResourcePatternType
func (t ResourcePatternType) String() string {
return C.GoString(C.rd_kafka_ResourcePatternType_name(C.rd_kafka_ResourcePatternType_t(t)))
}
// ResourcePatternTypeFromString translates a resource pattern type name to
// a ResourcePatternType value.
func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) {
switch strings.ToUpper(patternTypeString) {
case "ANY":
return ResourcePatternTypeAny, nil
case "MATCH":
return ResourcePatternTypeMatch, nil
case "LITERAL":
return ResourcePatternTypeLiteral, nil
case "PREFIXED":
return ResourcePatternTypePrefixed, nil
default:
return ResourcePatternTypeUnknown, NewError(ErrInvalidArg, "Unknown resource pattern type", false)
}
}
// ACLOperation enumerates the different types of ACL operation.
type ACLOperation int
const (
// ACLOperationUnknown represents an unknown or unset operation
ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN)
// ACLOperationAny in a filter, matches any ACLOperation
ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY)
// ACLOperationAll represents all the operations
ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL)
// ACLOperationRead a read operation
ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ)
// ACLOperationWrite represents a write operation
ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE)
// ACLOperationCreate represents a create operation
ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE)
// ACLOperationDelete represents a delete operation
ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE)
// ACLOperationAlter represents an alter operation
ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER)
// ACLOperationDescribe represents a describe operation
ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE)
// ACLOperationClusterAction represents a cluster action operation
ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION)
// ACLOperationDescribeConfigs represents a describe configs operation
ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS)
// ACLOperationAlterConfigs represents an alter configs operation
ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS)
// ACLOperationIdempotentWrite represents an idempotent write operation
ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE)
)
// String returns the human-readable representation of an ACLOperation
func (o ACLOperation) String() string {
return C.GoString(C.rd_kafka_AclOperation_name(C.rd_kafka_AclOperation_t(o)))
}
// ACLOperationFromString translates a ACL operation name to
// a ACLOperation value.
func ACLOperationFromString(aclOperationString string) (ACLOperation, error) {
switch strings.ToUpper(aclOperationString) {
case "ANY":
return ACLOperationAny, nil
case "ALL":
return ACLOperationAll, nil
case "READ":
return ACLOperationRead, nil
case "WRITE":
return ACLOperationWrite, nil
case "CREATE":
return ACLOperationCreate, nil
case "DELETE":
return ACLOperationDelete, nil
case "ALTER":
return ACLOperationAlter, nil
case "DESCRIBE":
return ACLOperationDescribe, nil
case "CLUSTER_ACTION":
return ACLOperationClusterAction, nil
case "DESCRIBE_CONFIGS":
return ACLOperationDescribeConfigs, nil
case "ALTER_CONFIGS":
return ACLOperationAlterConfigs, nil
case "IDEMPOTENT_WRITE":
return ACLOperationIdempotentWrite, nil
default:
return ACLOperationUnknown, NewError(ErrInvalidArg, "Unknown ACL operation", false)
}
}
// ACLPermissionType enumerates the different types of ACL permission types.
type ACLPermissionType int
const (
// ACLPermissionTypeUnknown represents an unknown ACLPermissionType
ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN)
// ACLPermissionTypeAny in a filter, matches any ACLPermissionType
ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY)
// ACLPermissionTypeDeny disallows access
ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY)
// ACLPermissionTypeAllow grants access
ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW)
)
// String returns the human-readable representation of an ACLPermissionType
func (o ACLPermissionType) String() string {
return C.GoString(C.rd_kafka_AclPermissionType_name(C.rd_kafka_AclPermissionType_t(o)))
}
// ACLPermissionTypeFromString translates a ACL permission type name to
// a ACLPermissionType value.
func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) {
switch strings.ToUpper(aclPermissionTypeString) {
case "ANY":
return ACLPermissionTypeAny, nil
case "DENY":
return ACLPermissionTypeDeny, nil
case "ALLOW":
return ACLPermissionTypeAllow, nil
default:
return ACLPermissionTypeUnknown, NewError(ErrInvalidArg, "Unknown ACL permission type", false)
}
}
// ACLBinding specifies the operation and permission type for a specific principal
// over one or more resources of the same type. Used by `AdminClient.CreateACLs`,
// returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`.
type ACLBinding struct {
Type ResourceType // The resource type.
// The resource name, which depends on the resource type.
// For ResourceBroker the resource name is the broker id.
Name string
ResourcePatternType ResourcePatternType // The resource pattern, relative to the name.
Principal string // The principal this ACLBinding refers to.
Host string // The host that the call is allowed to come from.
Operation ACLOperation // The operation/s specified by this binding.
PermissionType ACLPermissionType // The permission type for the specified operation.
}
// ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes.
// Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`.
type ACLBindingFilter = ACLBinding
// ACLBindings is a slice of ACLBinding that also implements
// the sort interface
type ACLBindings []ACLBinding
// ACLBindingFilters is a slice of ACLBindingFilter that also implements
// the sort interface
type ACLBindingFilters []ACLBindingFilter
func (a ACLBindings) Len() int {
return len(a)
}
func (a ACLBindings) Less(i, j int) bool {
if a[i].Type != a[j].Type {
return a[i].Type < a[j].Type
}
if a[i].Name != a[j].Name {
return a[i].Name < a[j].Name
}
if a[i].ResourcePatternType != a[j].ResourcePatternType {
return a[i].ResourcePatternType < a[j].ResourcePatternType
}
if a[i].Principal != a[j].Principal {
return a[i].Principal < a[j].Principal
}
if a[i].Host != a[j].Host {
return a[i].Host < a[j].Host
}
if a[i].Operation != a[j].Operation {
return a[i].Operation < a[j].Operation
}
if a[i].PermissionType != a[j].PermissionType {
return a[i].PermissionType < a[j].PermissionType
}
return true
}
func (a ACLBindings) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
// CreateACLResult provides create ACL error information.
type CreateACLResult struct {
// Error, if any, of result. Check with `Error.Code() != ErrNoError`.
Error Error
}
// DescribeACLsResult provides describe ACLs result or error information.
type DescribeACLsResult struct {
// Slice of ACL bindings matching the provided filter
ACLBindings ACLBindings
// Error, if any, of result. Check with `Error.Code() != ErrNoError`.
Error Error
}
// DeleteACLsResult provides delete ACLs result or error information.
type DeleteACLsResult = DescribeACLsResult
// waitResult waits for a result event on cQueue or the ctx to be cancelled, whichever happens
// first.
// The returned result event is checked for errors its error is returned if set.
@ -950,6 +1190,355 @@ func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error { @@ -950,6 +1190,355 @@ func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error {
return a.handle.setOAuthBearerTokenFailure(errstr)
}
// aclBindingToC converts a Go ACLBinding struct to a C rd_kafka_AclBinding_t
func (a *AdminClient) aclBindingToC(aclBinding *ACLBinding, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBinding_t, err error) {
var cName, cPrincipal, cHost *C.char
cName, cPrincipal, cHost = nil, nil, nil
if len(aclBinding.Name) > 0 {
cName = C.CString(aclBinding.Name)
defer C.free(unsafe.Pointer(cName))
}
if len(aclBinding.Principal) > 0 {
cPrincipal = C.CString(aclBinding.Principal)
defer C.free(unsafe.Pointer(cPrincipal))
}
if len(aclBinding.Host) > 0 {
cHost = C.CString(aclBinding.Host)
defer C.free(unsafe.Pointer(cHost))
}
result = C.rd_kafka_AclBinding_new(
C.rd_kafka_ResourceType_t(aclBinding.Type),
cName,
C.rd_kafka_ResourcePatternType_t(aclBinding.ResourcePatternType),
cPrincipal,
cHost,
C.rd_kafka_AclOperation_t(aclBinding.Operation),
C.rd_kafka_AclPermissionType_t(aclBinding.PermissionType),
cErrstr,
cErrstrSize,
)
if result == nil {
err = newErrorFromString(ErrInvalidArg,
fmt.Sprintf("Invalid arguments for ACL binding %v: %v", aclBinding, C.GoString(cErrstr)))
}
return
}
// aclBindingFilterToC converts a Go ACLBindingFilter struct to a C rd_kafka_AclBindingFilter_t
func (a *AdminClient) aclBindingFilterToC(aclBindingFilter *ACLBindingFilter, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBindingFilter_t, err error) {
var cName, cPrincipal, cHost *C.char
cName, cPrincipal, cHost = nil, nil, nil
if len(aclBindingFilter.Name) > 0 {
cName = C.CString(aclBindingFilter.Name)
defer C.free(unsafe.Pointer(cName))
}
if len(aclBindingFilter.Principal) > 0 {
cPrincipal = C.CString(aclBindingFilter.Principal)
defer C.free(unsafe.Pointer(cPrincipal))
}
if len(aclBindingFilter.Host) > 0 {
cHost = C.CString(aclBindingFilter.Host)
defer C.free(unsafe.Pointer(cHost))
}
result = C.rd_kafka_AclBindingFilter_new(
C.rd_kafka_ResourceType_t(aclBindingFilter.Type),
cName,
C.rd_kafka_ResourcePatternType_t(aclBindingFilter.ResourcePatternType),
cPrincipal,
cHost,
C.rd_kafka_AclOperation_t(aclBindingFilter.Operation),
C.rd_kafka_AclPermissionType_t(aclBindingFilter.PermissionType),
cErrstr,
cErrstrSize,
)
if result == nil {
err = newErrorFromString(ErrInvalidArg,
fmt.Sprintf("Invalid arguments for ACL binding filter %v: %v", aclBindingFilter, C.GoString(cErrstr)))
}
return
}
// cToACLBinding converts a C rd_kafka_AclBinding_t to Go ACLBinding
func (a *AdminClient) cToACLBinding(cACLBinding *C.rd_kafka_AclBinding_t) ACLBinding {
return ACLBinding{
ResourceType(C.rd_kafka_AclBinding_restype(cACLBinding)),
C.GoString(C.rd_kafka_AclBinding_name(cACLBinding)),
ResourcePatternType(C.rd_kafka_AclBinding_resource_pattern_type(cACLBinding)),
C.GoString(C.rd_kafka_AclBinding_principal(cACLBinding)),
C.GoString(C.rd_kafka_AclBinding_host(cACLBinding)),
ACLOperation(C.rd_kafka_AclBinding_operation(cACLBinding)),
ACLPermissionType(C.rd_kafka_AclBinding_permission_type(cACLBinding)),
}
}
// cToACLBindings converts a C rd_kafka_AclBinding_t list to Go ACLBindings
func (a *AdminClient) cToACLBindings(cACLBindings **C.rd_kafka_AclBinding_t, aclCnt C.size_t) (result ACLBindings) {
result = make(ACLBindings, aclCnt)
for i := uint(0); i < uint(aclCnt); i++ {
cACLBinding := C.AclBinding_by_idx(cACLBindings, aclCnt, C.size_t(i))
if cACLBinding == nil {
panic("AclBinding_by_idx must not return nil")
}
result[i] = a.cToACLBinding(cACLBinding)
}
return
}
// cToCreateACLResults converts a C acl_result_t array to Go CreateACLResult list.
func (a *AdminClient) cToCreateACLResults(cCreateAclsRes **C.rd_kafka_acl_result_t, aclCnt C.size_t) (result []CreateACLResult, err error) {
result = make([]CreateACLResult, uint(aclCnt))
for i := uint(0); i < uint(aclCnt); i++ {
cCreateACLRes := C.acl_result_by_idx(cCreateAclsRes, aclCnt, C.size_t(i))
if cCreateACLRes != nil {
cCreateACLError := C.rd_kafka_acl_result_error(cCreateACLRes)
result[i].Error = newErrorFromCError(cCreateACLError)
}
}
return result, nil
}
// cToDescribeACLsResult converts a C rd_kafka_event_t to a Go DescribeAclsResult struct.
func (a *AdminClient) cToDescribeACLsResult(rkev *C.rd_kafka_event_t) (result *DescribeACLsResult) {
result = &DescribeACLsResult{}
err := C.rd_kafka_event_error(rkev)
errCode := ErrorCode(err)
errStr := C.rd_kafka_event_error_string(rkev)
var cResultACLsCount C.size_t
cResult := C.rd_kafka_event_DescribeAcls_result(rkev)
cResultACLs := C.rd_kafka_DescribeAcls_result_acls(cResult, &cResultACLsCount)
if errCode != ErrNoError {
result.Error = newErrorFromCString(err, errStr)
}
result.ACLBindings = a.cToACLBindings(cResultACLs, cResultACLsCount)
return
}
// cToDeleteACLsResults converts a C rd_kafka_DeleteAcls_result_response_t array to Go DeleteAclsResult slice.
func (a *AdminClient) cToDeleteACLsResults(cDeleteACLsResResponse **C.rd_kafka_DeleteAcls_result_response_t, resResponseCnt C.size_t) (result []DeleteACLsResult) {
result = make([]DeleteACLsResult, uint(resResponseCnt))
for i := uint(0); i < uint(resResponseCnt); i++ {
cDeleteACLsResResponse := C.DeleteAcls_result_response_by_idx(cDeleteACLsResResponse, resResponseCnt, C.size_t(i))
if cDeleteACLsResResponse == nil {
panic("DeleteAcls_result_response_by_idx must not return nil")
}
cDeleteACLsError := C.rd_kafka_DeleteAcls_result_response_error(cDeleteACLsResResponse)
result[i].Error = newErrorFromCError(cDeleteACLsError)
var cMatchingACLsCount C.size_t
cMatchingACLs := C.rd_kafka_DeleteAcls_result_response_matching_acls(
cDeleteACLsResResponse, &cMatchingACLsCount)
result[i].ACLBindings = a.cToACLBindings(cMatchingACLs, cMatchingACLsCount)
}
return
}
// CreateACLs creates one or more ACL bindings.
//
// Parameters:
// * `ctx` - context with the maximum amount of time to block, or nil for indefinite.
// * `aclBindings` - A slice of ACL binding specifications to create.
// * `options` - Create ACLs options
//
// Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful
// plus an error that is not nil for client level errors
func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) {
if aclBindings == nil {
return nil, newErrorFromString(ErrInvalidArg,
"Expected non-nil slice of ACLBinding structs")
}
if len(aclBindings) == 0 {
return nil, newErrorFromString(ErrInvalidArg,
"Expected non-empty slice of ACLBinding structs")
}
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cACLBindings := make([]*C.rd_kafka_AclBinding_t, len(aclBindings))
for i, aclBinding := range aclBindings {
cACLBindings[i], err = a.aclBindingToC(&aclBinding, cErrstr, cErrstrSize)
if err != nil {
return
}
defer C.rd_kafka_AclBinding_destroy(cACLBindings[i])
}
// Convert Go AdminOptions (if any) to C AdminOptions
genericOptions := make([]AdminOption, len(options))
for i := range options {
genericOptions[i] = options[i]
}
cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEACLS, genericOptions)
if err != nil {
return nil, err
}
// Create temporary queue for async operation
cQueue := C.rd_kafka_queue_new(a.handle.rk)
defer C.rd_kafka_queue_destroy(cQueue)
// Asynchronous call
C.rd_kafka_CreateAcls(
a.handle.rk,
(**C.rd_kafka_AclBinding_t)(&cACLBindings[0]),
C.size_t(len(cACLBindings)),
cOptions,
cQueue)
// Wait for result, error or context timeout
rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEACLS_RESULT)
if err != nil {
return nil, err
}
defer C.rd_kafka_event_destroy(rkev)
var cResultCnt C.size_t
cResult := C.rd_kafka_event_CreateAcls_result(rkev)
aclResults := C.rd_kafka_CreateAcls_result_acls(cResult, &cResultCnt)
result, err = a.cToCreateACLResults(aclResults, cResultCnt)
return
}
// DescribeACLs matches ACL bindings by filter.
//
// Parameters:
// * `ctx` - context with the maximum amount of time to block, or nil for indefinite.
// * `aclBindingFilter` - A filter with attributes that must match.
// string attributes match exact values or any string if set to empty string.
// Enum attributes match exact values or any value if ending with `Any`.
// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with:
// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name
// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name
// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name
// * `options` - Describe ACLs options
//
// Returns a slice of ACLBindings when the operation was successful
// plus an error that is not `nil` for client level errors
func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) {
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cACLBindingFilter, err := a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize)
if err != nil {
return
}
// Convert Go AdminOptions (if any) to C AdminOptions
genericOptions := make([]AdminOption, len(options))
for i := range options {
genericOptions[i] = options[i]
}
cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBEACLS, genericOptions)
if err != nil {
return nil, err
}
// Create temporary queue for async operation
cQueue := C.rd_kafka_queue_new(a.handle.rk)
defer C.rd_kafka_queue_destroy(cQueue)
// Asynchronous call
C.rd_kafka_DescribeAcls(
a.handle.rk,
cACLBindingFilter,
cOptions,
cQueue)
// Wait for result, error or context timeout
rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEACLS_RESULT)
if err != nil {
return nil, err
}
defer C.rd_kafka_event_destroy(rkev)
result = a.cToDescribeACLsResult(rkev)
return
}
// DeleteACLs deletes ACL bindings matching one or more ACL binding filters.
//
// Parameters:
// * `ctx` - context with the maximum amount of time to block, or nil for indefinite.
// * `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete.
// string attributes match exact values or any string if set to empty string.
// Enum attributes match exact values or any value if ending with `Any`.
// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with:
// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name
// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name
// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name
// * `options` - Delete ACLs options
//
// Returns a slice of ACLBinding for each filter when the operation was successful
// plus an error that is not `nil` for client level errors
func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) {
if aclBindingFilters == nil {
return nil, newErrorFromString(ErrInvalidArg,
"Expected non-nil slice of ACLBindingFilter structs")
}
if len(aclBindingFilters) == 0 {
return nil, newErrorFromString(ErrInvalidArg,
"Expected non-empty slice of ACLBindingFilter structs")
}
cErrstrSize := C.size_t(512)
cErrstr := (*C.char)(C.malloc(cErrstrSize))
defer C.free(unsafe.Pointer(cErrstr))
cACLBindingFilters := make([]*C.rd_kafka_AclBindingFilter_t, len(aclBindingFilters))
for i, aclBindingFilter := range aclBindingFilters {
cACLBindingFilters[i], err = a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize)
if err != nil {
return
}
defer C.rd_kafka_AclBinding_destroy(cACLBindingFilters[i])
}
// Convert Go AdminOptions (if any) to C AdminOptions
genericOptions := make([]AdminOption, len(options))
for i := range options {
genericOptions[i] = options[i]
}
cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETEACLS, genericOptions)
if err != nil {
return nil, err
}
// Create temporary queue for async operation
cQueue := C.rd_kafka_queue_new(a.handle.rk)
defer C.rd_kafka_queue_destroy(cQueue)
// Asynchronous call
C.rd_kafka_DeleteAcls(
a.handle.rk,
(**C.rd_kafka_AclBindingFilter_t)(&cACLBindingFilters[0]),
C.size_t(len(cACLBindingFilters)),
cOptions,
cQueue)
// Wait for result, error or context timeout
rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEACLS_RESULT)
if err != nil {
return nil, err
}
defer C.rd_kafka_event_destroy(rkev)
var cResultResponsesCount C.size_t
cResult := C.rd_kafka_event_DeleteAcls_result(rkev)
cResultResponses := C.rd_kafka_DeleteAcls_result_responses(cResult, &cResultResponsesCount)
result = a.cToDeleteACLsResults(cResultResponses, cResultResponsesCount)
return
}
// Close an AdminClient instance.
func (a *AdminClient) Close() {
if a.isDerived {

33
vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go generated vendored

@ -166,6 +166,15 @@ func (ao AdminOptionValidateOnly) supportsCreatePartitions() { @@ -166,6 +166,15 @@ func (ao AdminOptionValidateOnly) supportsCreatePartitions() {
func (ao AdminOptionValidateOnly) supportsAlterConfigs() {
}
func (ao AdminOptionRequestTimeout) supportsCreateACLs() {
}
func (ao AdminOptionRequestTimeout) supportsDescribeACLs() {
}
func (ao AdminOptionRequestTimeout) supportsDeleteACLs() {
}
func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
if !ao.isSet {
return nil
@ -240,6 +249,30 @@ type DescribeConfigsAdminOption interface { @@ -240,6 +249,30 @@ type DescribeConfigsAdminOption interface {
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// CreateACLsAdminOption - see setter.
//
// See SetAdminRequestTimeout
type CreateACLsAdminOption interface {
supportsCreateACLs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DescribeACLsAdminOption - see setter.
//
// See SetAdminRequestTimeout
type DescribeACLsAdminOption interface {
supportsDescribeACLs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// DeleteACLsAdminOption - see setter.
//
// See SetAdminRequestTimeout
type DeleteACLsAdminOption interface {
supportsDeleteACLs()
apply(cOptions *C.rd_kafka_AdminOptions_t) error
}
// AdminOption is a generic type not to be used directly.
//
// See CreateTopicsAdminOption et.al.

1780
vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html generated vendored

File diff suppressed because it is too large Load Diff

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin.go → vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go generated vendored

@ -6,8 +6,8 @@ @@ -6,8 +6,8 @@
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin.a -lm -lsasl2 -ldl -lpthread
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_amd64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static darwin from librdkafka-static-bundle-v1.8.2.tgz"
const LibrdkafkaLinkInfo = "static darwin_amd64 from librdkafka-static-bundle-v1.9.2.tgz"

13
vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go generated vendored

@ -0,0 +1,13 @@ @@ -0,0 +1,13 @@
// +build !dynamic
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
package kafka
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_arm64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static darwin_arm64 from librdkafka-static-bundle-v1.9.2.tgz"

2
vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go generated vendored

@ -10,4 +10,4 @@ package kafka @@ -10,4 +10,4 @@ package kafka
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.8.2.tgz"
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.9.2.tgz"

2
vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go generated vendored

@ -10,4 +10,4 @@ package kafka @@ -10,4 +10,4 @@ package kafka
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.8.2.tgz"
const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.9.2.tgz"

2
vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go generated vendored

@ -10,4 +10,4 @@ package kafka @@ -10,4 +10,4 @@ package kafka
import "C"
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.8.2.tgz"
const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.9.2.tgz"

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2016 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
import (
"fmt"
"reflect"

26
vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2016-2020 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
import (
"fmt"
"math"
@ -46,7 +46,7 @@ type Consumer struct { @@ -46,7 +46,7 @@ type Consumer struct {
readerTermChan chan bool
rebalanceCb RebalanceCb
appReassigned bool
appRebalanceEnable bool // Config setting
appRebalanceEnable bool // SerializerConfig setting
}
// Strings returns a human readable name for a Consumer instance
@ -424,30 +424,16 @@ func (c *Consumer) Close() (err error) { @@ -424,30 +424,16 @@ func (c *Consumer) Close() (err error) {
close(c.events)
}
// librdkafka's rd_kafka_consumer_close() will block
// and trigger the rebalance_cb() if one is set, if not, which is the
// case with the Go client since it registers EVENTs rather than callbacks,
// librdkafka will shortcut the rebalance_cb and do a forced unassign.
// But we can't have that since the application might need the final RevokePartitions
// before shutting down. So we trigger an Unsubscribe() first, wait for that to
// propagate (in the Poll loop below), and then close the consumer.
c.Unsubscribe()
C.rd_kafka_consumer_close_queue(c.handle.rk, c.handle.rkq)
// Poll for rebalance events
for {
c.Poll(10 * 1000)
if int(C.rd_kafka_queue_length(c.handle.rkq)) == 0 {
break
}
for C.rd_kafka_consumer_closed(c.handle.rk) != 1 {
c.Poll(100)
}
// Destroy our queue
C.rd_kafka_queue_destroy(c.handle.rkq)
c.handle.rkq = nil
// Close the consumer
C.rd_kafka_consumer_close(c.handle.rk)
c.handle.cleanup()
C.rd_kafka_destroy(c.handle.rk)

17
vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2016 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
// Automatically generate error codes from librdkafka
// See README for instructions
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
@ -67,10 +67,8 @@ func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) { @@ -67,10 +67,8 @@ func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) {
return newErrorFromString(ErrorCode(code), str)
}
// newErrorFromCError creates a new Error instance and destroys
// the passed cError.
func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error {
defer C.rd_kafka_error_destroy(cError)
// newErrorFromCError creates a new Error instance
func newErrorFromCError(cError *C.rd_kafka_error_t) Error {
return Error{
code: ErrorCode(C.rd_kafka_error_code(cError)),
str: C.GoString(C.rd_kafka_error_string(cError)),
@ -80,6 +78,13 @@ func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { @@ -80,6 +78,13 @@ func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error {
}
}
// newErrorFromCErrorDestroy creates a new Error instance and destroys
// the passed cError.
func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error {
defer C.rd_kafka_error_destroy(cError)
return newErrorFromCError(cError)
}
// Error returns a human readable representation of an Error
// Same as Error.String()
func (e Error) Error() string {

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2020 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
// Automatically generate error codes from librdkafka
// See README for instructions
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2016 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
import (
"fmt"
"os"

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go generated vendored

@ -1,6 +1,6 @@ @@ -1,6 +1,6 @@
package kafka
// Copyright 2016-2021 Confluent Inc.
// AUTOMATICALLY GENERATED ON 2021-12-08 12:44:39.243338672 +0100 CET m=+0.000248284 USING librdkafka 1.8.2
// Copyright 2016-2022 Confluent Inc.
// AUTOMATICALLY GENERATED ON 2022-08-01 22:56:19.86222475 +0200 CEST m=+0.000294735 USING librdkafka 1.9.2
/*
#include "select_rdkafka.h"

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2016 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
import (
"fmt"
"strings"

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go generated vendored

@ -1,5 +1,3 @@ @@ -1,5 +1,3 @@
package kafka
/**
* Copyright 2018 Confluent Inc.
*
@ -16,6 +14,8 @@ package kafka @@ -16,6 +14,8 @@ package kafka
* limitations under the License.
*/
package kafka
import (
"fmt"
"strconv"

26
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt generated vendored

@ -27,6 +27,32 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE @@ -27,6 +27,32 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
LICENSE.cjson
--------------------------------------------------------------
For cJSON.c and cJSON.h:
Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
LICENSE.crc32c
--------------------------------------------------------------
# For src/crc32c.c copied (with modifications) from

34
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh generated vendored

@ -14,28 +14,30 @@ usage() { @@ -14,28 +14,30 @@ usage() {
}
# Parse dynamic libraries from linker command line.
# Will print a list matching -lfoo and -framework X..
parse_dynlibs() {
# Parse dynamic libraries from pkg-config file,
# both the ones specified with Libs: but also through Requires:
local pc=$1
local libs=
local req=
local n=
for req in $(grep ^Requires: $pc | sed -e 's/^Requires://'); do
n=$(pkg-config --libs $req)
if [[ $n == -l* ]]; then
libs="${libs} $n"
fi
done
for n in $(grep ^Libs: $pc); do
if [[ $n == -l* ]]; then
libs="${libs} $n"
while [[ $# -gt 0 ]]; do
if [[ $1 == -l* ]]; then
libs="${libs} $1"
elif [[ $1 == -framework ]]; then
libs="${libs} $1 $2"
shift # remove one (extra) arg
fi
shift # remove one arg
done
echo "$libs"
}
# Parse dynamic library dependecies from pkg-config file and print
# them to stdout.
parse_pc_dynlibs() {
local pc=$1
parse_dynlibs $(sed -n 's/^Libs: \(..*\)/\1/p' "$pc")
}
setup_build() {
# Copies static library from the temp directory into final location,
# extracts dynamic lib list from the pkg-config file,
@ -54,7 +56,7 @@ setup_build() { @@ -54,7 +56,7 @@ setup_build() {
build_tag="// +build musl"
fi
local dynlibs=$(parse_dynlibs $pc)
local dynlibs=$(parse_pc_dynlibs $pc)
echo "Copying $apath to $dpath"
cp "$apath" "$dpath"
@ -99,7 +101,7 @@ for f in rdkafka.h LICENSES.txt ; do @@ -99,7 +101,7 @@ for f in rdkafka.h LICENSES.txt ; do
done
for btype in glibc_linux musl_linux darwin windows ; do
for btype in glibc_linux musl_linux darwin_amd64 darwin_arm64 windows ; do
lib=$bdir/librdkafka_${btype}.a
pc=${lib/%.a/.pc}
[[ -f $lib ]] || (echo "Expected file $lib missing" ; exit 1)

13
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh generated vendored

@ -51,12 +51,15 @@ fi @@ -51,12 +51,15 @@ fi
curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-)
uncommitted=$(git status --untracked-files=no --porcelain)
if [[ $devel != 1 ]] && ( [[ $curr_branch != master ]] || [[ ! -z $uncommitted ]] ); then
if [[ ! -z $uncommitted ]]; then
echo "Error: This script must be run on a clean branch with no uncommitted changes"
echo "Uncommitted files:"
echo "$uncommitted"
exit 1
fi
if [[ $devel != 1 ]] && [[ $curr_branch != master ]] ; then
echo "Error: This script must be run on an up-to-date, clean, master branch"
if [[ ! -z $uncommitted ]]; then
echo "Uncommitted files:"
echo "$uncommitted"
fi
exit 1
fi

BIN
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin.a → vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a generated vendored

Binary file not shown.

BIN
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a generated vendored

Binary file not shown.

BIN
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a generated vendored

Binary file not shown.

BIN
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a generated vendored

Binary file not shown.

BIN
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a generated vendored

Binary file not shown.

3023
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h generated vendored

File diff suppressed because it is too large Load Diff

331
vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka_mock.h generated vendored

@ -0,0 +1,331 @@ @@ -0,0 +1,331 @@
/*
* librdkafka - Apache Kafka C library
*
* Copyright (c) 2019 Magnus Edenhill
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _RDKAFKA_MOCK_H_
#define _RDKAFKA_MOCK_H_
#ifndef _RDKAFKA_H_
#error "rdkafka_mock.h must be included after rdkafka.h"
#endif
#ifdef __cplusplus
extern "C" {
#if 0
} /* Restore indent */
#endif
#endif
/**
* @name Mock cluster
*
* Provides a mock Kafka cluster with a configurable number of brokers
* that support a reasonable subset of Kafka protocol operations,
* error injection, etc.
*
* There are two ways to use the mock clusters, the most simple approach
* is to configure `test.mock.num.brokers` (to e.g. 3) on the rd_kafka_t
* in an existing application, which will replace the configured
* `bootstrap.servers` with the mock cluster brokers.
* This approach is convenient to easily test existing applications.
*
* The second approach is to explicitly create a mock cluster on an
* rd_kafka_t instance by using rd_kafka_mock_cluster_new().
*
* Mock clusters provide localhost listeners that can be used as the bootstrap
* servers by multiple rd_kafka_t instances.
*
* Currently supported functionality:
* - Producer
* - Idempotent Producer
* - Transactional Producer
* - Low-level consumer
* - High-level balanced consumer groups with offset commits
* - Topic Metadata and auto creation
*
* @remark High-level consumers making use of the balanced consumer groups
* are not supported.
*
* @remark This is an experimental public API that is NOT covered by the
* librdkafka API or ABI stability guarantees.
*
*
* @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
*
* @{
*/
typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
/**
* @brief Create new mock cluster with \p broker_cnt brokers.
*
* The broker ids will start at 1 up to and including \p broker_cnt.
*
* The \p rk instance is required for internal book keeping but continues
* to operate as usual.
*/
RD_EXPORT
rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk,
int broker_cnt);
/**
* @brief Destroy mock cluster.
*/
RD_EXPORT
void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster);
/**
* @returns the rd_kafka_t instance for a cluster as passed to
* rd_kafka_mock_cluster_new().
*/
RD_EXPORT rd_kafka_t *
rd_kafka_mock_cluster_handle (const rd_kafka_mock_cluster_t *mcluster);
/**
* @returns the rd_kafka_mock_cluster_t instance as created by
* setting the `test.mock.num.brokers` configuration property,
* or NULL if no such instance.
*/
RD_EXPORT rd_kafka_mock_cluster_t *
rd_kafka_handle_mock_cluster (const rd_kafka_t *rk);
/**
* @returns the mock cluster's bootstrap.servers list
*/
RD_EXPORT const char *
rd_kafka_mock_cluster_bootstraps (const rd_kafka_mock_cluster_t *mcluster);
/**
* @brief Clear the cluster's error state for the given \p ApiKey.
*/
RD_EXPORT
void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey);
/**
* @brief Push \p cnt errors in the \p ... va-arg list onto the cluster's
* error stack for the given \p ApiKey.
*
* \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
*
* The following \p cnt protocol requests matching \p ApiKey will fail with the
* provided error code and removed from the stack, starting with
* the first error code, then the second, etc.
*
* Passing \c RD_KAFKA_RESP_ERR__TRANSPORT will make the mock broker
* disconnect the client which can be useful to trigger a disconnect on certain
* requests.
*/
RD_EXPORT
void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey, size_t cnt, ...);
/**
* @brief Same as rd_kafka_mock_push_request_errors() but takes
* an array of errors.
*/
RD_EXPORT void
rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey,
size_t cnt,
const rd_kafka_resp_err_t *errors);
/**
* @brief Push \p cnt errors and RTT tuples in the \p ... va-arg list onto
* the broker's error stack for the given \p ApiKey.
*
* \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
*
* Each entry is a tuple of:
* rd_kafka_resp_err_t err - error to return (or 0)
* int rtt_ms - response RTT/delay in milliseconds (or 0)
*
* The following \p cnt protocol requests matching \p ApiKey will fail with the
* provided error code and removed from the stack, starting with
* the first error code, then the second, etc.
*
* @remark The broker errors take precedence over the cluster errors.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id,
int16_t ApiKey, size_t cnt, ...);
/**
* @brief Set the topic error to return in protocol requests.
*
* Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest.
*/
RD_EXPORT
void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster,
const char *topic,
rd_kafka_resp_err_t err);
/**
* @brief Creates a topic.
*
* This is an alternative to automatic topic creation as performed by
* the client itself.
*
* @remark The Topic Admin API (CreateTopics) is not supported by the
* mock broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_topic_create (rd_kafka_mock_cluster_t *mcluster,
const char *topic, int partition_cnt,
int replication_factor);
/**
* @brief Sets the partition leader.
*
* The topic will be created if it does not exist.
*
* \p broker_id needs to be an existing broker, or -1 to make the
* partition leader-less.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_partition_set_leader (rd_kafka_mock_cluster_t *mcluster,
const char *topic, int32_t partition,
int32_t broker_id);
/**
* @brief Sets the partition's preferred replica / follower.
*
* The topic will be created if it does not exist.
*
* \p broker_id does not need to point to an existing broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_partition_set_follower (rd_kafka_mock_cluster_t *mcluster,
const char *topic, int32_t partition,
int32_t broker_id);
/**
* @brief Sets the partition's preferred replica / follower low and high
* watermarks.
*
* The topic will be created if it does not exist.
*
* Setting an offset to -1 will revert back to the leader's corresponding
* watermark.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_partition_set_follower_wmarks (rd_kafka_mock_cluster_t *mcluster,
const char *topic,
int32_t partition,
int64_t lo, int64_t hi);
/**
* @brief Disconnects the broker and disallows any new connections.
* This does NOT trigger leader change.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_down (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id);
/**
* @brief Makes the broker accept connections again.
* This does NOT trigger leader change.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_up (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id);
/**
* @brief Set broker round-trip-time delay in milliseconds.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_rtt (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id, int rtt_ms);
/**
* @brief Sets the broker's rack as reported in Metadata to the client.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_broker_set_rack (rd_kafka_mock_cluster_t *mcluster,
int32_t broker_id, const char *rack);
/**
* @brief Explicitly sets the coordinator. If this API is not a standard
* hashing scheme will be used.
*
* @param key_type "transaction" or "group"
* @param key The transactional.id or group.id
* @param broker_id The new coordinator, does not have to be a valid broker.
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_coordinator_set (rd_kafka_mock_cluster_t *mcluster,
const char *key_type, const char *key,
int32_t broker_id);
/**
* @brief Set the allowed ApiVersion range for \p ApiKey.
*
* Set \p MinVersion and \p MaxVersion to -1 to disable the API
* completely.
*
* \p MaxVersion MUST not exceed the maximum implemented value,
* see rdkafka_mock_handlers.c.
*
* @param ApiKey Protocol request type/key
* @param MinVersion Minimum version supported (or -1 to disable).
* @param MinVersion Maximum version supported (or -1 to disable).
*/
RD_EXPORT rd_kafka_resp_err_t
rd_kafka_mock_set_apiversion (rd_kafka_mock_cluster_t *mcluster,
int16_t ApiKey,
int16_t MinVersion, int16_t MaxVersion);
/**@}*/
#ifdef __cplusplus
}
#endif
#endif /* _RDKAFKA_MOCK_H_ */

84
vendor/github.com/confluentinc/confluent-kafka-go/kafka/mockcluster.go generated vendored

@ -0,0 +1,84 @@ @@ -0,0 +1,84 @@
/**
* Copyright 2022 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import "unsafe"
/*
#include <stdlib.h>
#include "select_rdkafka.h"
#include "glue_rdkafka.h"
*/
import "C"
// MockCluster represents a Kafka mock cluster instance which can be used
// for testing.
type MockCluster struct {
rk *C.rd_kafka_t
mcluster *C.rd_kafka_mock_cluster_t
}
// NewMockCluster provides a mock Kafka cluster with a configurable
// number of brokers that support a reasonable subset of Kafka protocol
// operations, error injection, etc.
//
// Mock clusters provide localhost listeners that can be used as the bootstrap
// servers by multiple Kafka client instances.
//
// Currently supported functionality:
// - Producer
// - Idempotent Producer
// - Transactional Producer
// - Low-level consumer
// - High-level balanced consumer groups with offset commits
// - Topic Metadata and auto creation
//
// Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
func NewMockCluster(brokerCount int) (*MockCluster, error) {
mc := &MockCluster{}
cErrstr := (*C.char)(C.malloc(C.size_t(512)))
defer C.free(unsafe.Pointer(cErrstr))
cConf := C.rd_kafka_conf_new()
mc.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256)
if mc.rk == nil {
C.rd_kafka_conf_destroy(cConf)
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
}
mc.mcluster = C.rd_kafka_mock_cluster_new(mc.rk, C.int(brokerCount))
if mc.mcluster == nil {
C.rd_kafka_destroy(mc.rk)
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
}
return mc, nil
}
// BootstrapServers returns the bootstrap.servers property for this MockCluster
func (mc *MockCluster) BootstrapServers() string {
return C.GoString(C.rd_kafka_mock_cluster_bootstraps(mc.mcluster))
}
// Close and destroy the MockCluster
func (mc *MockCluster) Close() {
C.rd_kafka_mock_cluster_destroy(mc.mcluster)
C.rd_kafka_destroy(mc.rk)
}

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go generated vendored

@ -1,3 +1,5 @@ @@ -1,3 +1,5 @@
package kafka
/**
* Copyright 2016 Confluent Inc.
*
@ -14,8 +16,6 @@ @@ -14,8 +16,6 @@
* limitations under the License.
*/
package kafka
import (
"context"
"fmt"

2
vendor/github.com/confluentinc/confluent-kafka-go/kafka/select_rdkafka.h generated vendored

@ -24,6 +24,8 @@ @@ -24,6 +24,8 @@
#ifdef USE_VENDORED_LIBRDKAFKA
#include "librdkafka_vendor/rdkafka.h"
#include "librdkafka_vendor/rdkafka_mock.h"
#else
#include <librdkafka/rdkafka.h>
#include <librdkafka/rdkafka_mock.h>
#endif

4
vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go generated vendored

@ -77,13 +77,13 @@ func testconfRead() bool { @@ -77,13 +77,13 @@ func testconfRead() bool {
return true
}
// update existing ConfigMap with key=value pairs from testconf.Config
// update existing ConfigMap with key=value pairs from testconf.SerializerConfig
func (cm *ConfigMap) updateFromTestconf() error {
if testconf.Config == nil {
return nil
}
// Translate "key=value" pairs in Config to ConfigMap
// Translate "key=value" pairs in SerializerConfig to ConfigMap
for _, s := range testconf.Config {
err := cm.Set(s)
if err != nil {

21
vendor/github.com/mattn/go-colorable/LICENSE generated vendored

@ -0,0 +1,21 @@ @@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

48
vendor/github.com/mattn/go-colorable/README.md generated vendored

@ -0,0 +1,48 @@ @@ -0,0 +1,48 @@
# go-colorable
[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest)
[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable)
[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
Colorable writer for windows.
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
This package is possible to handle escape sequence for ansi color on windows.
## Too Bad!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
## So Good!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
## Usage
```go
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
logrus.SetOutput(colorable.NewColorableStdout())
logrus.Info("succeeded")
logrus.Warn("not correct")
logrus.Error("something error")
logrus.Fatal("panic")
```
You can compile above code on non-windows OSs.
## Installation
```
$ go get github.com/mattn/go-colorable
```
# License
MIT
# Author
Yasuhiro Matsumoto (a.k.a mattn)

38
vendor/github.com/mattn/go-colorable/colorable_appengine.go generated vendored

@ -0,0 +1,38 @@ @@ -0,0 +1,38 @@
//go:build appengine
// +build appengine
package colorable
import (
"io"
"os"
_ "github.com/mattn/go-isatty"
)
// NewColorable returns new instance of Writer which handles escape sequence.
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
return file
}
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
func NewColorableStdout() io.Writer {
return os.Stdout
}
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
func NewColorableStderr() io.Writer {
return os.Stderr
}
// EnableColorsStdout enable colors if possible.
func EnableColorsStdout(enabled *bool) func() {
if enabled != nil {
*enabled = true
}
return func() {}
}

38
vendor/github.com/mattn/go-colorable/colorable_others.go generated vendored

@ -0,0 +1,38 @@ @@ -0,0 +1,38 @@
//go:build !windows && !appengine
// +build !windows,!appengine
package colorable
import (
"io"
"os"
_ "github.com/mattn/go-isatty"
)
// NewColorable returns new instance of Writer which handles escape sequence.
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
return file
}
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
func NewColorableStdout() io.Writer {
return os.Stdout
}
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
func NewColorableStderr() io.Writer {
return os.Stderr
}
// EnableColorsStdout enable colors if possible.
func EnableColorsStdout(enabled *bool) func() {
if enabled != nil {
*enabled = true
}
return func() {}
}

1047
vendor/github.com/mattn/go-colorable/colorable_windows.go generated vendored

File diff suppressed because it is too large Load Diff

12
vendor/github.com/mattn/go-colorable/go.test.sh generated vendored

@ -0,0 +1,12 @@ @@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
go test -race -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done

57
vendor/github.com/mattn/go-colorable/noncolorable.go generated vendored

@ -0,0 +1,57 @@ @@ -0,0 +1,57 @@
package colorable
import (
"bytes"
"io"
)
// NonColorable holds writer but removes escape sequence.
type NonColorable struct {
out io.Writer
}
// NewNonColorable returns new instance of Writer which removes escape sequence from Writer.
func NewNonColorable(w io.Writer) io.Writer {
return &NonColorable{out: w}
}
// Write writes data on console
func (w *NonColorable) Write(data []byte) (n int, err error) {
er := bytes.NewReader(data)
var plaintext bytes.Buffer
loop:
for {
c1, err := er.ReadByte()
if err != nil {
plaintext.WriteTo(w.out)
break loop
}
if c1 != 0x1b {
plaintext.WriteByte(c1)
continue
}
_, err = plaintext.WriteTo(w.out)
if err != nil {
break loop
}
c2, err := er.ReadByte()
if err != nil {
break loop
}
if c2 != 0x5b {
continue
}
for {
c, err := er.ReadByte()
if err != nil {
break loop
}
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
break
}
}
}
return len(data), nil
}

9
vendor/github.com/mattn/go-isatty/LICENSE generated vendored

@ -0,0 +1,9 @@ @@ -0,0 +1,9 @@
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
MIT License (Expat)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

50
vendor/github.com/mattn/go-isatty/README.md generated vendored

@ -0,0 +1,50 @@ @@ -0,0 +1,50 @@
# go-isatty
[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty)
[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
isatty for golang
## Usage
```go
package main
import (
"fmt"
"github.com/mattn/go-isatty"
"os"
)
func main() {
if isatty.IsTerminal(os.Stdout.Fd()) {
fmt.Println("Is Terminal")
} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
fmt.Println("Is Cygwin/MSYS2 Terminal")
} else {
fmt.Println("Is Not Terminal")
}
}
```
## Installation
```
$ go get github.com/mattn/go-isatty
```
## License
MIT
## Author
Yasuhiro Matsumoto (a.k.a mattn)
## Thanks
* k-takata: base idea for IsCygwinTerminal
https://github.com/k-takata/go-iscygpty

2
vendor/github.com/mattn/go-isatty/doc.go generated vendored

@ -0,0 +1,2 @@ @@ -0,0 +1,2 @@
// Package isatty implements interface to isatty
package isatty

12
vendor/github.com/mattn/go-isatty/go.test.sh generated vendored

@ -0,0 +1,12 @@ @@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
go test -race -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done

20
vendor/github.com/mattn/go-isatty/isatty_bsd.go generated vendored

@ -0,0 +1,20 @@ @@ -0,0 +1,20 @@
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo
// +build darwin freebsd openbsd netbsd dragonfly hurd
// +build !appengine
// +build !tinygo
package isatty
import "golang.org/x/sys/unix"
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
_, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

17
vendor/github.com/mattn/go-isatty/isatty_others.go generated vendored

@ -0,0 +1,17 @@ @@ -0,0 +1,17 @@
//go:build (appengine || js || nacl || tinygo || wasm) && !windows
// +build appengine js nacl tinygo wasm
// +build !windows
package isatty
// IsTerminal returns true if the file descriptor is terminal which
// is always false on js and appengine classic which is a sandboxed PaaS.
func IsTerminal(fd uintptr) bool {
return false
}
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

23
vendor/github.com/mattn/go-isatty/isatty_plan9.go generated vendored

@ -0,0 +1,23 @@ @@ -0,0 +1,23 @@
//go:build plan9
// +build plan9
package isatty
import (
"syscall"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
path, err := syscall.Fd2path(int(fd))
if err != nil {
return false
}
return path == "/dev/cons" || path == "/mnt/term/dev/cons"
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

21
vendor/github.com/mattn/go-isatty/isatty_solaris.go generated vendored

@ -0,0 +1,21 @@ @@ -0,0 +1,21 @@
//go:build solaris && !appengine
// +build solaris,!appengine
package isatty
import (
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c
func IsTerminal(fd uintptr) bool {
_, err := unix.IoctlGetTermio(int(fd), unix.TCGETA)
return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

20
vendor/github.com/mattn/go-isatty/isatty_tcgets.go generated vendored

@ -0,0 +1,20 @@ @@ -0,0 +1,20 @@
//go:build (linux || aix || zos) && !appengine && !tinygo
// +build linux aix zos
// +build !appengine
// +build !tinygo
package isatty
import "golang.org/x/sys/unix"
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

125
vendor/github.com/mattn/go-isatty/isatty_windows.go generated vendored

@ -0,0 +1,125 @@ @@ -0,0 +1,125 @@
//go:build windows && !appengine
// +build windows,!appengine
package isatty
import (
"errors"
"strings"
"syscall"
"unicode/utf16"
"unsafe"
)
const (
objectNameInfo uintptr = 1
fileNameInfo = 2
fileTypePipe = 3
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
ntdll = syscall.NewLazyDLL("ntdll.dll")
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
procGetFileType = kernel32.NewProc("GetFileType")
procNtQueryObject = ntdll.NewProc("NtQueryObject")
)
func init() {
// Check if GetFileInformationByHandleEx is available.
if procGetFileInformationByHandleEx.Find() != nil {
procGetFileInformationByHandleEx = nil
}
}
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}
// Check pipe name is used for cygwin/msys2 pty.
// Cygwin/MSYS2 PTY has a name like:
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
func isCygwinPipeName(name string) bool {
token := strings.Split(name, "-")
if len(token) < 5 {
return false
}
if token[0] != `\msys` &&
token[0] != `\cygwin` &&
token[0] != `\Device\NamedPipe\msys` &&
token[0] != `\Device\NamedPipe\cygwin` {
return false
}
if token[1] == "" {
return false
}
if !strings.HasPrefix(token[2], "pty") {
return false
}
if token[3] != `from` && token[3] != `to` {
return false
}
if token[4] != "master" {
return false
}
return true
}
// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion
// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
// Windows vista to 10
// see https://stackoverflow.com/a/18792477 for details
func getFileNameByHandle(fd uintptr) (string, error) {
if procNtQueryObject == nil {
return "", errors.New("ntdll.dll: NtQueryObject not supported")
}
var buf [4 + syscall.MAX_PATH]uint16
var result int
r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
if r != 0 {
return "", e
}
return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
}
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
// terminal.
func IsCygwinTerminal(fd uintptr) bool {
if procGetFileInformationByHandleEx == nil {
name, err := getFileNameByHandle(fd)
if err != nil {
return false
}
return isCygwinPipeName(name)
}
// Cygwin/msys's pty is a pipe.
ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
if ft != fileTypePipe || e != 0 {
return false
}
var buf [2 + syscall.MAX_PATH]uint16
r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
uintptr(len(buf)*2), 0, 0)
if r == 0 || e != 0 {
return false
}
l := *(*uint32)(unsafe.Pointer(&buf))
return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
}

103
vendor/github.com/rs/zerolog/README.md generated vendored

@ -1,6 +1,6 @@ @@ -1,6 +1,6 @@
# Zero Allocation JSON Logger
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog)
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://github.com/rs/zerolog/actions/workflows/test.yml/badge.svg)](https://github.com/rs/zerolog/actions/workflows/test.yml) [![Go Coverage](https://github.com/rs/zerolog/wiki/coverage.svg)](https://raw.githack.com/wiki/rs/zerolog/coverage.html)
The zerolog package provides a fast and simple logger dedicated to JSON output.
@ -24,7 +24,7 @@ Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) @@ -24,7 +24,7 @@ Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog)
* [Sampling](#log-sampling)
* [Hooks](#hooks)
* [Contextual fields](#contextual-logging)
* `context.Context` integration
* [`context.Context` integration](#contextcontext-integration)
* [Integration with `net/http`](#integration-with-nethttp)
* [JSON and CBOR encoding formats](#binary-encoding)
* [Pretty logging for development](#pretty-logging)
@ -60,7 +60,7 @@ func main() { @@ -60,7 +60,7 @@ func main() {
// Output: {"time":1516134303,"level":"debug","message":"hello world"}
```
> Note: By default log writes to `os.Stderr`
> Note: The default log level for `log.Print` is *debug*
> Note: The default log level for `log.Print` is *trace*
### Contextual Logging
@ -399,6 +399,8 @@ log.Logger = log.With().Str("foo", "bar").Logger() @@ -399,6 +399,8 @@ log.Logger = log.With().Str("foo", "bar").Logger()
### Add file and line number to log
Equivalent of `Llongfile`:
```go
log.Logger = log.With().Caller().Logger()
log.Info().Msg("hello world")
@ -406,10 +408,21 @@ log.Info().Msg("hello world") @@ -406,10 +408,21 @@ log.Info().Msg("hello world")
// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"}
```
Equivalent of `Lshortfile`:
```go
zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string {
return filepath.Base(file) + ":" + strconv.Itoa(line)
}
log.Logger = log.With().Caller().Logger()
log.Info().Msg("hello world")
// Output: {"level": "info", "message": "hello world", "caller": "some_file:21"}
```
### Thread-safe, lock-free, non-blocking writer
If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow:
If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follows:
```go
wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) {
@ -490,6 +503,58 @@ stdlog.Print("hello world") @@ -490,6 +503,58 @@ stdlog.Print("hello world")
// Output: {"foo":"bar","message":"hello world"}
```
### context.Context integration
Go contexts are commonly passed throughout Go code, and this can help you pass
your Logger into places it might otherwise be hard to inject. The `Logger`
instance may be attached to Go context (`context.Context`) using
`Logger.WithContext(ctx)` and extracted from it using `zerolog.Ctx(ctx)`.
For example:
```go
func f() {
logger := zerolog.New(os.Stdout)
ctx := context.Background()
// Attach the Logger to the context.Context
ctx = logger.WithContext(ctx)
someFunc(ctx)
}
func someFunc(ctx context.Context) {
// Get Logger from the go Context. if it's nil, then
// `zerolog.DefaultContextLogger` is returned, if
// `DefaultContextLogger` is nil, then a disabled logger is returned.
logger := zerolog.Ctx(ctx)
logger.Info().Msg("Hello")
}
```
A second form of `context.Context` integration allows you to pass the current
context.Context into the logged event, and retrieve it from hooks. This can be
useful to log trace and span IDs or other information stored in the go context,
and facilitates the unification of logging and tracing in some systems:
```go
type TracingHook struct{}
func (h TracingHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
ctx := e.GetCtx()
spanId := getSpanIdFromContext(ctx) // as per your tracing framework
e.Str("span-id", spanId)
}
func f() {
// Setup the logger
logger := zerolog.New(os.Stdout)
logger = logger.Hook(TracingHook{})
ctx := context.Background()
// Use the Ctx function to make the context available to the hook
logger.Info().Ctx(ctx).Msg("Hello")
}
```
### Integration with `net/http`
The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`.
@ -564,7 +629,7 @@ func main() { @@ -564,7 +629,7 @@ func main() {
## Global Settings
Some settings can be changed and will by applied to all loggers:
Some settings can be changed and will be applied to all loggers:
* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods).
* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode).
@ -573,10 +638,14 @@ Some settings can be changed and will by applied to all loggers: @@ -573,10 +638,14 @@ Some settings can be changed and will by applied to all loggers:
* `zerolog.LevelFieldName`: Can be set to customize level field name.
* `zerolog.MessageFieldName`: Can be set to customize message field name.
* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp.
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formatted as UNIX timestamp.
* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`).
* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`).
* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking.
* `zerolog.FloatingPointPrecision`: If set to a value other than -1, controls the number
of digits when formatting float numbers in JSON. See
[strconv.FormatFloat](https://pkg.go.dev/strconv#FormatFloat)
for more details.
## Field Types
@ -604,7 +673,7 @@ Most fields are also available in the slice format (`Strs` for `[]string`, `Errs @@ -604,7 +673,7 @@ Most fields are also available in the slice format (`Strs` for `[]string`, `Errs
## Binary Encoding
In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows:
In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](https://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows:
```bash
go build -tags binary_log .
@ -621,7 +690,7 @@ with zerolog library is [CSD](https://github.com/toravir/csd/). @@ -621,7 +690,7 @@ with zerolog library is [CSD](https://github.com/toravir/csd/).
## Benchmarks
See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks.
See [logbench](http://bench.zerolog.io/) for more comprehensive and up-to-date benchmarks.
All operations are allocation free (those numbers *include* JSON encoding):
@ -682,6 +751,8 @@ Log a static string, without any context or `printf`-style templating: @@ -682,6 +751,8 @@ Log a static string, without any context or `printf`-style templating:
## Caveats
### Field duplication
Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON:
```go
@ -693,3 +764,19 @@ logger.Info(). @@ -693,3 +764,19 @@ logger.Info().
```
In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt.
### Concurrency safety
Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger:
```go
func handler(w http.ResponseWriter, r *http.Request) {
// Create a child logger for concurrency safety
logger := log.Logger.With().Logger()
// Add context fields, for example User-Agent from HTTP headers
logger.UpdateContext(func(c zerolog.Context) zerolog.Context {
...
})
}
```

48
vendor/github.com/rs/zerolog/array.go generated vendored

@ -49,7 +49,7 @@ func (*Array) MarshalZerologArray(*Array) { @@ -49,7 +49,7 @@ func (*Array) MarshalZerologArray(*Array) {
func (a *Array) write(dst []byte) []byte {
dst = enc.AppendArrayStart(dst)
if len(a.buf) > 0 {
dst = append(append(dst, a.buf...))
dst = append(dst, a.buf...)
}
dst = enc.AppendArrayEnd(dst)
putArray(a)
@ -57,7 +57,7 @@ func (a *Array) write(dst []byte) []byte { @@ -57,7 +57,7 @@ func (a *Array) write(dst []byte) []byte {
}
// Object marshals an object that implement the LogObjectMarshaler
// interface and append append it to the array.
// interface and appends it to the array.
func (a *Array) Object(obj LogObjectMarshaler) *Array {
e := Dict()
obj.MarshalZerologObject(e)
@ -67,19 +67,19 @@ func (a *Array) Object(obj LogObjectMarshaler) *Array { @@ -67,19 +67,19 @@ func (a *Array) Object(obj LogObjectMarshaler) *Array {
return a
}
// Str append append the val as a string to the array.
// Str appends the val as a string to the array.
func (a *Array) Str(val string) *Array {
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val)
return a
}
// Bytes append append the val as a string to the array.
// Bytes appends the val as a string to the array.
func (a *Array) Bytes(val []byte) *Array {
a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val)
return a
}
// Hex append append the val as a hex string to the array.
// Hex appends the val as a hex string to the array.
func (a *Array) Hex(val []byte) *Array {
a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val)
return a
@ -115,97 +115,97 @@ func (a *Array) Err(err error) *Array { @@ -115,97 +115,97 @@ func (a *Array) Err(err error) *Array {
return a
}
// Bool append append the val as a bool to the array.
// Bool appends the val as a bool to the array.
func (a *Array) Bool(b bool) *Array {
a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b)
return a
}
// Int append append i as a int to the array.
// Int appends i as a int to the array.
func (a *Array) Int(i int) *Array {
a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int8 append append i as a int8 to the array.
// Int8 appends i as a int8 to the array.
func (a *Array) Int8(i int8) *Array {
a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int16 append append i as a int16 to the array.
// Int16 appends i as a int16 to the array.
func (a *Array) Int16(i int16) *Array {
a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int32 append append i as a int32 to the array.
// Int32 appends i as a int32 to the array.
func (a *Array) Int32(i int32) *Array {
a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int64 append append i as a int64 to the array.
// Int64 appends i as a int64 to the array.
func (a *Array) Int64(i int64) *Array {
a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint append append i as a uint to the array.
// Uint appends i as a uint to the array.
func (a *Array) Uint(i uint) *Array {
a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint8 append append i as a uint8 to the array.
// Uint8 appends i as a uint8 to the array.
func (a *Array) Uint8(i uint8) *Array {
a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint16 append append i as a uint16 to the array.
// Uint16 appends i as a uint16 to the array.
func (a *Array) Uint16(i uint16) *Array {
a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint32 append append i as a uint32 to the array.
// Uint32 appends i as a uint32 to the array.
func (a *Array) Uint32(i uint32) *Array {
a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint64 append append i as a uint64 to the array.
// Uint64 appends i as a uint64 to the array.
func (a *Array) Uint64(i uint64) *Array {
a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i)
return a
}
// Float32 append append f as a float32 to the array.
// Float32 appends f as a float32 to the array.
func (a *Array) Float32(f float32) *Array {
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f)
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
return a
}
// Float64 append append f as a float64 to the array.
// Float64 appends f as a float64 to the array.
func (a *Array) Float64(f float64) *Array {
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f)
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
return a
}
// Time append append t formated as string using zerolog.TimeFieldFormat.
// Time appends t formatted as string using zerolog.TimeFieldFormat.
func (a *Array) Time(t time.Time) *Array {
a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat)
return a
}
// Dur append append d to the array.
// Dur appends d to the array.
func (a *Array) Dur(d time.Duration) *Array {
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger)
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return a
}
// Interface append append i marshaled using reflection.
// Interface appends i marshaled using reflection.
func (a *Array) Interface(i interface{}) *Array {
if obj, ok := i.(LogObjectMarshaler); ok {
return a.Object(obj)

215
vendor/github.com/rs/zerolog/console.go generated vendored

@ -12,6 +12,8 @@ import ( @@ -12,6 +12,8 @@ import (
"strings"
"sync"
"time"
"github.com/mattn/go-colorable"
)
const (
@ -26,6 +28,8 @@ const ( @@ -26,6 +28,8 @@ const (
colorBold = 1
colorDarkGray = 90
unknownLevel = "???"
)
var (
@ -55,12 +59,24 @@ type ConsoleWriter struct { @@ -55,12 +59,24 @@ type ConsoleWriter struct {
// TimeFormat specifies the format for timestamp in output.
TimeFormat string
// TimeLocation tells ConsoleWriter’s default FormatTimestamp
// how to localize the time.
TimeLocation *time.Location
// PartsOrder defines the order of parts in output.
PartsOrder []string
// PartsExclude defines parts to not display in output.
PartsExclude []string
// FieldsOrder defines the order of contextual fields in output.
FieldsOrder []string
fieldIsOrdered map[string]int
// FieldsExclude defines contextual fields to not display in output.
FieldsExclude []string
FormatTimestamp Formatter
FormatLevel Formatter
FormatCaller Formatter
@ -69,25 +85,39 @@ type ConsoleWriter struct { @@ -69,25 +85,39 @@ type ConsoleWriter struct {
FormatFieldValue Formatter
FormatErrFieldName Formatter
FormatErrFieldValue Formatter
FormatExtra func(map[string]interface{}, *bytes.Buffer) error
FormatPrepare func(map[string]interface{}) error
}
// NewConsoleWriter creates and initializes a new ConsoleWriter.
func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter {
w := ConsoleWriter{
Out: os.Stdout,
TimeFormat: consoleDefaultTimeFormat,
PartsOrder: consoleDefaultPartsOrder(),
Out: os.Stdout,
TimeFormat: consoleDefaultTimeFormat,
PartsOrder: consoleDefaultPartsOrder(),
}
for _, opt := range options {
opt(&w)
}
// Fix color on Windows
if w.Out == os.Stdout || w.Out == os.Stderr {
w.Out = colorable.NewColorable(w.Out.(*os.File))
}
return w
}
// Write transforms the JSON input with formatters and appends to w.Out.
func (w ConsoleWriter) Write(p []byte) (n int, err error) {
// Fix color on Windows
if w.Out == os.Stdout || w.Out == os.Stderr {
w.Out = colorable.NewColorable(w.Out.(*os.File))
}
if w.PartsOrder == nil {
w.PartsOrder = consoleDefaultPartsOrder()
}
@ -107,33 +137,74 @@ func (w ConsoleWriter) Write(p []byte) (n int, err error) { @@ -107,33 +137,74 @@ func (w ConsoleWriter) Write(p []byte) (n int, err error) {
return n, fmt.Errorf("cannot decode event: %s", err)
}
if w.FormatPrepare != nil {
err = w.FormatPrepare(evt)
if err != nil {
return n, err
}
}
for _, p := range w.PartsOrder {
w.writePart(buf, evt, p)
}
w.writeFields(evt, buf)
if w.FormatExtra != nil {
err = w.FormatExtra(evt, buf)
if err != nil {
return n, err
}
}
err = buf.WriteByte('\n')
if err != nil {
return n, err
}
_, err = buf.WriteTo(w.Out)
return len(p), err
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (w ConsoleWriter) Close() error {
if closer, ok := w.Out.(io.Closer); ok {
return closer.Close()
}
return nil
}
// writeFields appends formatted key-value pairs to buf.
func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) {
var fields = make([]string, 0, len(evt))
for field := range evt {
var isExcluded bool
for _, excluded := range w.FieldsExclude {
if field == excluded {
isExcluded = true
break
}
}
if isExcluded {
continue
}
switch field {
case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName:
continue
}
fields = append(fields, field)
}
sort.Strings(fields)
if len(fields) > 0 {
if len(w.FieldsOrder) > 0 {
w.orderFields(fields)
} else {
sort.Strings(fields)
}
// Write space only if something has already been written to the buffer, and if there are fields.
if buf.Len() > 0 && len(fields) > 0 {
buf.WriteByte(' ')
}
@ -194,7 +265,7 @@ func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer @@ -194,7 +265,7 @@ func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer
case json.Number:
buf.WriteString(fv(fValue))
default:
b, err := json.Marshal(fValue)
b, err := InterfaceMarshalFunc(fValue)
if err != nil {
fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err)
} else {
@ -229,13 +300,13 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, @@ -229,13 +300,13 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
}
case TimestampFieldName:
if w.FormatTimestamp == nil {
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor)
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.TimeLocation, w.NoColor)
} else {
f = w.FormatTimestamp
}
case MessageFieldName:
if w.FormatMessage == nil {
f = consoleDefaultFormatMessage
f = consoleDefaultFormatMessage(w.NoColor, evt[LevelFieldName])
} else {
f = w.FormatMessage
}
@ -256,11 +327,37 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, @@ -256,11 +327,37 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
var s = f(evt[p])
if len(s) > 0 {
if buf.Len() > 0 {
buf.WriteByte(' ') // Write space only if not the first part
}
buf.WriteString(s)
if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part
buf.WriteByte(' ')
}
}
// orderFields takes an array of field names and an array representing field order
// and returns an array with any ordered fields at the beginning, in order,
// and the remaining fields after in their original order.
func (w ConsoleWriter) orderFields(fields []string) {
if w.fieldIsOrdered == nil {
w.fieldIsOrdered = make(map[string]int)
for i, fieldName := range w.FieldsOrder {
w.fieldIsOrdered[fieldName] = i
}
}
sort.Slice(fields, func(i, j int) bool {
ii, iOrdered := w.fieldIsOrdered[fields[i]]
jj, jOrdered := w.fieldIsOrdered[fields[j]]
if iOrdered && jOrdered {
return ii < jj
}
if iOrdered {
return true
}
if jOrdered {
return false
}
return fields[i] < fields[j]
})
}
// needsQuote returns true when the string s should be quoted in output.
@ -273,8 +370,13 @@ func needsQuote(s string) bool { @@ -273,8 +370,13 @@ func needsQuote(s string) bool {
return false
}
// colorize returns the string s wrapped in ANSI code c, unless disabled is true.
// colorize returns the string s wrapped in ANSI code c, unless disabled is true or c is 0.
func colorize(s interface{}, c int, disabled bool) string {
e := os.Getenv("NO_COLOR")
if e != "" || c == 0 {
disabled = true
}
if disabled {
return fmt.Sprintf("%s", s)
}
@ -292,72 +394,74 @@ func consoleDefaultPartsOrder() []string { @@ -292,72 +394,74 @@ func consoleDefaultPartsOrder() []string {
}
}
func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
func consoleDefaultFormatTimestamp(timeFormat string, location *time.Location, noColor bool) Formatter {
if timeFormat == "" {
timeFormat = consoleDefaultTimeFormat
}
if location == nil {
location = time.Local
}
return func(i interface{}) string {
t := "<nil>"
switch tt := i.(type) {
case string:
ts, err := time.Parse(TimeFieldFormat, tt)
ts, err := time.ParseInLocation(TimeFieldFormat, tt, location)
if err != nil {
t = tt
} else {
t = ts.Format(timeFormat)
t = ts.In(location).Format(timeFormat)
}
case json.Number:
i, err := tt.Int64()
if err != nil {
t = tt.String()
} else {
var sec, nsec int64 = i, 0
var sec, nsec int64
switch TimeFieldFormat {
case TimeFormatUnixMs:
nsec = int64(time.Duration(i) * time.Millisecond)
sec = 0
case TimeFormatUnixNano:
sec, nsec = 0, i
case TimeFormatUnixMicro:
nsec = int64(time.Duration(i) * time.Microsecond)
sec = 0
sec, nsec = 0, int64(time.Duration(i)*time.Microsecond)
case TimeFormatUnixMs:
sec, nsec = 0, int64(time.Duration(i)*time.Millisecond)
default:
sec, nsec = i, 0
}
ts := time.Unix(sec, nsec).UTC()
t = ts.Format(timeFormat)
ts := time.Unix(sec, nsec)
t = ts.In(location).Format(timeFormat)
}
}
return colorize(t, colorDarkGray, noColor)
}
}
func stripLevel(ll string) string {
if len(ll) == 0 {
return unknownLevel
}
if len(ll) > 3 {
ll = ll[:3]
}
return strings.ToUpper(ll)
}
func consoleDefaultFormatLevel(noColor bool) Formatter {
return func(i interface{}) string {
var l string
if ll, ok := i.(string); ok {
switch ll {
case LevelTraceValue:
l = colorize("TRC", colorMagenta, noColor)
case LevelDebugValue:
l = colorize("DBG", colorYellow, noColor)
case LevelInfoValue:
l = colorize("INF", colorGreen, noColor)
case LevelWarnValue:
l = colorize("WRN", colorRed, noColor)
case LevelErrorValue:
l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor)
case LevelFatalValue:
l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor)
case LevelPanicValue:
l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor)
default:
l = colorize("???", colorBold, noColor)
}
} else {
if i == nil {
l = colorize("???", colorBold, noColor)
} else {
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
level, _ := ParseLevel(ll)
fl, ok := FormattedLevels[level]
if ok {
return colorize(fl, LevelColors[level], noColor)
}
return stripLevel(ll)
}
if i == nil {
return unknownLevel
}
return l
return stripLevel(fmt.Sprintf("%s", i))
}
}
@ -379,11 +483,18 @@ func consoleDefaultFormatCaller(noColor bool) Formatter { @@ -379,11 +483,18 @@ func consoleDefaultFormatCaller(noColor bool) Formatter {
}
}
func consoleDefaultFormatMessage(i interface{}) string {
if i == nil {
return ""
func consoleDefaultFormatMessage(noColor bool, level interface{}) Formatter {
return func(i interface{}) string {
if i == nil || i == "" {
return ""
}
switch level {
case LevelInfoValue, LevelWarnValue, LevelErrorValue, LevelFatalValue, LevelPanicValue:
return colorize(fmt.Sprintf("%s", i), colorBold, noColor)
default:
return fmt.Sprintf("%s", i)
}
}
return fmt.Sprintf("%s", i)
}
func consoleDefaultFormatFieldName(noColor bool) Formatter {
@ -404,6 +515,6 @@ func consoleDefaultFormatErrFieldName(noColor bool) Formatter { @@ -404,6 +515,6 @@ func consoleDefaultFormatErrFieldName(noColor bool) Formatter {
func consoleDefaultFormatErrFieldValue(noColor bool) Formatter {
return func(i interface{}) string {
return colorize(fmt.Sprintf("%s", i), colorRed, noColor)
return colorize(colorize(fmt.Sprintf("%s", i), colorBold, noColor), colorRed, noColor)
}
}

73
vendor/github.com/rs/zerolog/context.go generated vendored

@ -1,8 +1,9 @@ @@ -1,8 +1,9 @@
package zerolog
import (
"context"
"fmt"
"io/ioutil"
"io"
"math"
"net"
"time"
@ -22,7 +23,7 @@ func (c Context) Logger() Logger { @@ -22,7 +23,7 @@ func (c Context) Logger() Logger {
// Only map[string]interface{} and []interface{} are accepted. []interface{} must
// alternate string keys and arbitrary values, and extraneous ones are ignored.
func (c Context) Fields(fields interface{}) Context {
c.l.context = appendFields(c.l.context, fields)
c.l.context = appendFields(c.l.context, fields, c.l.stack)
return c
}
@ -56,7 +57,7 @@ func (c Context) Array(key string, arr LogArrayMarshaler) Context { @@ -56,7 +57,7 @@ func (c Context) Array(key string, arr LogArrayMarshaler) Context {
// Object marshals an object that implement the LogObjectMarshaler interface.
func (c Context) Object(key string, obj LogObjectMarshaler) Context {
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
e := newEvent(LevelWriterAdapter{io.Discard}, 0)
e.Object(key, obj)
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
putEvent(e)
@ -65,7 +66,7 @@ func (c Context) Object(key string, obj LogObjectMarshaler) Context { @@ -65,7 +66,7 @@ func (c Context) Object(key string, obj LogObjectMarshaler) Context {
// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface.
func (c Context) EmbedObject(obj LogObjectMarshaler) Context {
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
e := newEvent(LevelWriterAdapter{io.Discard}, 0)
e.EmbedObject(obj)
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
putEvent(e)
@ -162,9 +163,34 @@ func (c Context) Errs(key string, errs []error) Context { @@ -162,9 +163,34 @@ func (c Context) Errs(key string, errs []error) Context {
// Err adds the field "error" with serialized err to the logger context.
func (c Context) Err(err error) Context {
if c.l.stack && ErrorStackMarshaler != nil {
switch m := ErrorStackMarshaler(err).(type) {
case nil:
case LogObjectMarshaler:
c = c.Object(ErrorStackFieldName, m)
case error:
if m != nil && !isNilValue(m) {
c = c.Str(ErrorStackFieldName, m.Error())
}
case string:
c = c.Str(ErrorStackFieldName, m)
default:
c = c.Interface(ErrorStackFieldName, m)
}
}
return c.AnErr(ErrorFieldName, err)
}
// Ctx adds the context.Context to the logger context. The context.Context is
// not rendered in the error message, but is made available for hooks to use.
// A typical use case is to extract tracing information from the
// context.Context.
func (c Context) Ctx(ctx context.Context) Context {
c.l.ctx = ctx
return c
}
// Bool adds the field key with val as a bool to the logger context.
func (c Context) Bool(key string, b bool) Context {
c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b)
@ -299,25 +325,25 @@ func (c Context) Uints64(key string, i []uint64) Context { @@ -299,25 +325,25 @@ func (c Context) Uints64(key string, i []uint64) Context {
// Float32 adds the field key with f as a float32 to the logger context.
func (c Context) Float32(key string, f float32) Context {
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
// Floats32 adds the field key with f as a []float32 to the logger context.
func (c Context) Floats32(key string, f []float32) Context {
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
// Float64 adds the field key with f as a float64 to the logger context.
func (c Context) Float64(key string, f float64) Context {
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
// Floats64 adds the field key with f as a []float64 to the logger context.
func (c Context) Floats64(key string, f []float64) Context {
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f)
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
return c
}
@ -329,8 +355,9 @@ func (ts timestampHook) Run(e *Event, level Level, msg string) { @@ -329,8 +355,9 @@ func (ts timestampHook) Run(e *Event, level Level, msg string) {
var th = timestampHook{}
// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
// Timestamp adds the current local time to the logger context with the "time" key, formatted using zerolog.TimeFieldFormat.
// To customize the key name, change zerolog.TimestampFieldName.
// To customize the time format, change zerolog.TimeFieldFormat.
//
// NOTE: It won't dedupe the "time" key if the *Context has one already.
func (c Context) Timestamp() Context {
@ -338,13 +365,13 @@ func (c Context) Timestamp() Context { @@ -338,13 +365,13 @@ func (c Context) Timestamp() Context {
return c
}
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat.
func (c Context) Time(key string, t time.Time) Context {
c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
}
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat.
func (c Context) Times(key string, t []time.Time) Context {
c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
@ -352,22 +379,42 @@ func (c Context) Times(key string, t []time.Time) Context { @@ -352,22 +379,42 @@ func (c Context) Times(key string, t []time.Time) Context {
// Dur adds the fields key with d divided by unit and stored as a float.
func (c Context) Dur(key string, d time.Duration) Context {
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return c
}
// Durs adds the fields key with d divided by unit and stored as a float.
func (c Context) Durs(key string, d []time.Duration) Context {
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return c
}
// Interface adds the field key with obj marshaled using reflection.
func (c Context) Interface(key string, i interface{}) Context {
if obj, ok := i.(LogObjectMarshaler); ok {
return c.Object(key, obj)
}
c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i)
return c
}
// Type adds the field key with val's type using reflection.
func (c Context) Type(key string, val interface{}) Context {
c.l.context = enc.AppendType(enc.AppendKey(c.l.context, key), val)
return c
}
// Any is a wrapper around Context.Interface.
func (c Context) Any(key string, i interface{}) Context {
return c.Interface(key, i)
}
// Reset removes all the context fields.
func (c Context) Reset() Context {
c.l.context = enc.AppendBeginMarker(make([]byte, 0, 500))
return c
}
type callerHook struct {
callerSkipFrameCount int
}

23
vendor/github.com/rs/zerolog/ctx.go generated vendored

@ -14,10 +14,15 @@ func init() { @@ -14,10 +14,15 @@ func init() {
type ctxKey struct{}
// WithContext returns a copy of ctx with l associated. If an instance of Logger
// is already in the context, the context is not updated.
// WithContext returns a copy of ctx with the receiver attached. The Logger
// attached to the provided Context (if any) will not be effected. If the
// receiver's log level is Disabled it will only be attached to the returned
// Context if the provided Context has a previously attached Logger. If the
// provided Context has no attached Logger, a Disabled Logger will not be
// attached.
//
// For instance, to add a field to an existing logger in the context, use this
// Note: to modify the existing Logger attached to a Context (instead of
// replacing it in a new Context), use UpdateContext with the following
// notation:
//
// ctx := r.Context()
@ -25,17 +30,13 @@ type ctxKey struct{} @@ -25,17 +30,13 @@ type ctxKey struct{}
// l.UpdateContext(func(c Context) Context {
// return c.Str("bar", "baz")
// })
func (l *Logger) WithContext(ctx context.Context) context.Context {
if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok {
if lp == l {
// Do not store same logger.
return ctx
}
} else if l.level == Disabled {
//
func (l Logger) WithContext(ctx context.Context) context.Context {
if _, ok := ctx.Value(ctxKey{}).(*Logger); !ok && l.level == Disabled {
// Do not store disabled logger.
return ctx
}
return context.WithValue(ctx, ctxKey{}, l)
return context.WithValue(ctx, ctxKey{}, &l)
}
// Ctx returns the Logger associated with the ctx. If no logger

12
vendor/github.com/rs/zerolog/encoder.go generated vendored

@ -13,13 +13,13 @@ type encoder interface { @@ -13,13 +13,13 @@ type encoder interface {
AppendBool(dst []byte, val bool) []byte
AppendBools(dst []byte, vals []bool) []byte
AppendBytes(dst, s []byte) []byte
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte
AppendEndMarker(dst []byte) []byte
AppendFloat32(dst []byte, val float32) []byte
AppendFloat64(dst []byte, val float64) []byte
AppendFloats32(dst []byte, vals []float32) []byte
AppendFloats64(dst []byte, vals []float64) []byte
AppendFloat32(dst []byte, val float32, precision int) []byte
AppendFloat64(dst []byte, val float64, precision int) []byte
AppendFloats32(dst []byte, vals []float32, precision int) []byte
AppendFloats64(dst []byte, vals []float64, precision int) []byte
AppendHex(dst, s []byte) []byte
AppendIPAddr(dst []byte, ip net.IP) []byte
AppendIPPrefix(dst []byte, pfx net.IPNet) []byte

3
vendor/github.com/rs/zerolog/encoder_cbor.go generated vendored

@ -24,6 +24,9 @@ func init() { @@ -24,6 +24,9 @@ func init() {
func appendJSON(dst []byte, j []byte) []byte {
return cbor.AppendEmbeddedJSON(dst, j)
}
func appendCBOR(dst []byte, c []byte) []byte {
return cbor.AppendEmbeddedCBOR(dst, c)
}
// decodeIfBinaryToString - converts a binary formatted log msg to a
// JSON formatted String Log message.

12
vendor/github.com/rs/zerolog/encoder_json.go generated vendored

@ -6,6 +6,7 @@ package zerolog @@ -6,6 +6,7 @@ package zerolog
// JSON encoded byte stream.
import (
"encoding/base64"
"github.com/rs/zerolog/internal/json"
)
@ -25,6 +26,17 @@ func init() { @@ -25,6 +26,17 @@ func init() {
func appendJSON(dst []byte, j []byte) []byte {
return append(dst, j...)
}
func appendCBOR(dst []byte, cbor []byte) []byte {
dst = append(dst, []byte("\"data:application/cbor;base64,")...)
l := len(dst)
enc := base64.StdEncoding
n := enc.EncodedLen(len(cbor))
for i := 0; i < n; i++ {
dst = append(dst, '.')
}
enc.Encode(dst[l:], cbor)
return append(dst, '"')
}
func decodeIfBinaryToString(in []byte) string {
return string(in)

87
vendor/github.com/rs/zerolog/event.go generated vendored

@ -1,6 +1,7 @@ @@ -1,6 +1,7 @@
package zerolog
import (
"context"
"fmt"
"net"
"os"
@ -24,9 +25,10 @@ type Event struct { @@ -24,9 +25,10 @@ type Event struct {
w LevelWriter
level Level
done func(msg string)
stack bool // enable error stack trace
ch []Hook // hooks from context
skipFrame int // The number of additional frames to skip when printing the caller.
stack bool // enable error stack trace
ch []Hook // hooks from context
skipFrame int // The number of additional frames to skip when printing the caller.
ctx context.Context // Optional Go context for event
}
func putEvent(e *Event) {
@ -129,6 +131,13 @@ func (e *Event) Msgf(format string, v ...interface{}) { @@ -129,6 +131,13 @@ func (e *Event) Msgf(format string, v ...interface{}) {
e.msg(fmt.Sprintf(format, v...))
}
func (e *Event) MsgFunc(createMsg func() string) {
if e == nil {
return
}
e.msg(createMsg())
}
func (e *Event) msg(msg string) {
for _, hook := range e.ch {
hook.Run(e, e.level, msg)
@ -155,7 +164,7 @@ func (e *Event) Fields(fields interface{}) *Event { @@ -155,7 +164,7 @@ func (e *Event) Fields(fields interface{}) *Event {
if e == nil {
return e
}
e.buf = appendFields(e.buf, fields)
e.buf = appendFields(e.buf, fields, e.stack)
return e
}
@ -311,6 +320,18 @@ func (e *Event) RawJSON(key string, b []byte) *Event { @@ -311,6 +320,18 @@ func (e *Event) RawJSON(key string, b []byte) *Event {
return e
}
// RawCBOR adds already encoded CBOR to the log line under key.
//
// No sanity check is performed on b
// Note: The full featureset of CBOR is supported as data will not be mapped to json but stored as data-url
func (e *Event) RawCBOR(key string, b []byte) *Event {
if e == nil {
return e
}
e.buf = appendCBOR(enc.AppendKey(e.buf, key), b)
return e
}
// AnErr adds the field key with serialized err to the *Event context.
// If err is nil, no field is added.
func (e *Event) AnErr(key string, err error) *Event {
@ -398,6 +419,28 @@ func (e *Event) Stack() *Event { @@ -398,6 +419,28 @@ func (e *Event) Stack() *Event {
return e
}
// Ctx adds the Go Context to the *Event context. The context is not rendered
// in the output message, but is available to hooks and to Func() calls via the
// GetCtx() accessor. A typical use case is to extract tracing information from
// the Go Ctx.
func (e *Event) Ctx(ctx context.Context) *Event {
if e != nil {
e.ctx = ctx
}
return e
}
// GetCtx retrieves the Go context.Context which is optionally stored in the
// Event. This allows Hooks and functions passed to Func() to retrieve values
// which are stored in the context.Context. This can be useful in tracing,
// where span information is commonly propagated in the context.Context.
func (e *Event) GetCtx() context.Context {
if e == nil || e.ctx == nil {
return context.Background()
}
return e.ctx
}
// Bool adds the field key with val as a bool to the *Event context.
func (e *Event) Bool(key string, b bool) *Event {
if e == nil {
@ -601,7 +644,7 @@ func (e *Event) Float32(key string, f float32) *Event { @@ -601,7 +644,7 @@ func (e *Event) Float32(key string, f float32) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -610,7 +653,7 @@ func (e *Event) Floats32(key string, f []float32) *Event { @@ -610,7 +653,7 @@ func (e *Event) Floats32(key string, f []float32) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -619,7 +662,7 @@ func (e *Event) Float64(key string, f float64) *Event { @@ -619,7 +662,7 @@ func (e *Event) Float64(key string, f float64) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -628,7 +671,7 @@ func (e *Event) Floats64(key string, f []float64) *Event { @@ -628,7 +671,7 @@ func (e *Event) Floats64(key string, f []float64) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f)
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
return e
}
@ -645,7 +688,7 @@ func (e *Event) Timestamp() *Event { @@ -645,7 +688,7 @@ func (e *Event) Timestamp() *Event {
return e
}
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat.
func (e *Event) Time(key string, t time.Time) *Event {
if e == nil {
return e
@ -654,7 +697,7 @@ func (e *Event) Time(key string, t time.Time) *Event { @@ -654,7 +697,7 @@ func (e *Event) Time(key string, t time.Time) *Event {
return e
}
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat.
func (e *Event) Times(key string, t []time.Time) *Event {
if e == nil {
return e
@ -670,7 +713,7 @@ func (e *Event) Dur(key string, d time.Duration) *Event { @@ -670,7 +713,7 @@ func (e *Event) Dur(key string, d time.Duration) *Event {
if e == nil {
return e
}
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return e
}
@ -681,7 +724,7 @@ func (e *Event) Durs(key string, d []time.Duration) *Event { @@ -681,7 +724,7 @@ func (e *Event) Durs(key string, d []time.Duration) *Event {
if e == nil {
return e
}
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return e
}
@ -696,10 +739,15 @@ func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { @@ -696,10 +739,15 @@ func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
if t.After(start) {
d = t.Sub(start)
}
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
return e
}
// Any is a wrapper around Event.Interface.
func (e *Event) Any(key string, i interface{}) *Event {
return e.Interface(key, i)
}
// Interface adds the field key with i marshaled using reflection.
func (e *Event) Interface(key string, i interface{}) *Event {
if e == nil {
@ -712,6 +760,15 @@ func (e *Event) Interface(key string, i interface{}) *Event { @@ -712,6 +760,15 @@ func (e *Event) Interface(key string, i interface{}) *Event {
return e
}
// Type adds the field key with val's type using reflection.
func (e *Event) Type(key string, val interface{}) *Event {
if e == nil {
return e
}
e.buf = enc.AppendType(enc.AppendKey(e.buf, key), val)
return e
}
// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames.
// This includes those added via hooks from the context.
func (e *Event) CallerSkipFrame(skip int) *Event {
@ -737,11 +794,11 @@ func (e *Event) caller(skip int) *Event { @@ -737,11 +794,11 @@ func (e *Event) caller(skip int) *Event {
if e == nil {
return e
}
_, file, line, ok := runtime.Caller(skip + e.skipFrame)
pc, file, line, ok := runtime.Caller(skip + e.skipFrame)
if !ok {
return e
}
e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(file, line))
e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(pc, file, line))
return e
}

7
vendor/github.com/rs/zerolog/example.jsonl generated vendored

@ -0,0 +1,7 @@ @@ -0,0 +1,7 @@
{"time":"5:41PM","level":"info","message":"Starting listener","listen":":8080","pid":37556}
{"time":"5:41PM","level":"debug","message":"Access","database":"myapp","host":"localhost:4962","pid":37556}
{"time":"5:41PM","level":"info","message":"Access","method":"GET","path":"/users","pid":37556,"resp_time":23}
{"time":"5:41PM","level":"info","message":"Access","method":"POST","path":"/posts","pid":37556,"resp_time":532}
{"time":"5:41PM","level":"warn","message":"Slow request","method":"POST","path":"/posts","pid":37556,"resp_time":532}
{"time":"5:41PM","level":"info","message":"Access","method":"GET","path":"/users","pid":37556,"resp_time":10}
{"time":"5:41PM","level":"error","message":"Database connection lost","database":"myapp","pid":37556,"error":"connection reset by peer"}

41
vendor/github.com/rs/zerolog/fields.go generated vendored

@ -12,13 +12,13 @@ func isNilValue(i interface{}) bool { @@ -12,13 +12,13 @@ func isNilValue(i interface{}) bool {
return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0
}
func appendFields(dst []byte, fields interface{}) []byte {
func appendFields(dst []byte, fields interface{}, stack bool) []byte {
switch fields := fields.(type) {
case []interface{}:
if n := len(fields); n&0x1 == 1 { // odd number
fields = fields[:n-1]
}
dst = appendFieldList(dst, fields)
dst = appendFieldList(dst, fields, stack)
case map[string]interface{}:
keys := make([]string, 0, len(fields))
for key := range fields {
@ -28,13 +28,13 @@ func appendFields(dst []byte, fields interface{}) []byte { @@ -28,13 +28,13 @@ func appendFields(dst []byte, fields interface{}) []byte {
kv := make([]interface{}, 2)
for _, key := range keys {
kv[0], kv[1] = key, fields[key]
dst = appendFieldList(dst, kv)
dst = appendFieldList(dst, kv, stack)
}
}
return dst
}
func appendFieldList(dst []byte, kvList []interface{}) []byte {
func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
for i, n := 0, len(kvList); i < n; i += 2 {
key, val := kvList[i], kvList[i+1]
if key, ok := key.(string); ok {
@ -74,6 +74,21 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte { @@ -74,6 +74,21 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
default:
dst = enc.AppendInterface(dst, m)
}
if stack && ErrorStackMarshaler != nil {
dst = enc.AppendKey(dst, ErrorStackFieldName)
switch m := ErrorStackMarshaler(val).(type) {
case nil:
case error:
if m != nil && !isNilValue(m) {
dst = enc.AppendString(dst, m.Error())
}
case string:
dst = enc.AppendString(dst, m)
default:
dst = enc.AppendInterface(dst, m)
}
}
case []error:
dst = enc.AppendArrayStart(dst)
for i, err := range val {
@ -124,13 +139,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte { @@ -124,13 +139,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
case uint64:
dst = enc.AppendUint64(dst, val)
case float32:
dst = enc.AppendFloat32(dst, val)
dst = enc.AppendFloat32(dst, val, FloatingPointPrecision)
case float64:
dst = enc.AppendFloat64(dst, val)
dst = enc.AppendFloat64(dst, val, FloatingPointPrecision)
case time.Time:
dst = enc.AppendTime(dst, val, TimeFieldFormat)
case time.Duration:
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
case *string:
if val != nil {
dst = enc.AppendString(dst, *val)
@ -205,13 +220,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte { @@ -205,13 +220,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
}
case *float32:
if val != nil {
dst = enc.AppendFloat32(dst, *val)
dst = enc.AppendFloat32(dst, *val, FloatingPointPrecision)
} else {
dst = enc.AppendNil(dst)
}
case *float64:
if val != nil {
dst = enc.AppendFloat64(dst, *val)
dst = enc.AppendFloat64(dst, *val, FloatingPointPrecision)
} else {
dst = enc.AppendNil(dst)
}
@ -223,7 +238,7 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte { @@ -223,7 +238,7 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
}
case *time.Duration:
if val != nil {
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger)
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
} else {
dst = enc.AppendNil(dst)
}
@ -252,13 +267,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte { @@ -252,13 +267,13 @@ func appendFieldList(dst []byte, kvList []interface{}) []byte {
case []uint64:
dst = enc.AppendUints64(dst, val)
case []float32:
dst = enc.AppendFloats32(dst, val)
dst = enc.AppendFloats32(dst, val, FloatingPointPrecision)
case []float64:
dst = enc.AppendFloats64(dst, val)
dst = enc.AppendFloats64(dst, val, FloatingPointPrecision)
case []time.Time:
dst = enc.AppendTimes(dst, val, TimeFieldFormat)
case []time.Duration:
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
case nil:
dst = enc.AppendNil(dst)
case net.IP:

60
vendor/github.com/rs/zerolog/globals.go generated vendored

@ -1,6 +1,7 @@ @@ -1,6 +1,7 @@
package zerolog
import (
"bytes"
"encoding/json"
"strconv"
"sync/atomic"
@ -19,6 +20,10 @@ const ( @@ -19,6 +20,10 @@ const (
// TimeFormatUnixMicro defines a time format that makes time fields to be
// serialized as Unix timestamp integers in microseconds.
TimeFormatUnixMicro = "UNIXMICRO"
// TimeFormatUnixNano defines a time format that makes time fields to be
// serialized as Unix timestamp integers in nanoseconds.
TimeFormatUnixNano = "UNIXNANO"
)
var (
@ -61,7 +66,7 @@ var ( @@ -61,7 +66,7 @@ var (
CallerSkipFrameCount = 2
// CallerMarshalFunc allows customization of global caller marshaling
CallerMarshalFunc = func(file string, line int) string {
CallerMarshalFunc = func(pc uintptr, file string, line int) string {
return file + ":" + strconv.Itoa(line)
}
@ -77,11 +82,25 @@ var ( @@ -77,11 +82,25 @@ var (
}
// InterfaceMarshalFunc allows customization of interface marshaling.
// Default: "encoding/json.Marshal"
InterfaceMarshalFunc = json.Marshal
// Default: "encoding/json.Marshal" with disabled HTML escaping
InterfaceMarshalFunc = func(v interface{}) ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
err := encoder.Encode(v)
if err != nil {
return nil, err
}
b := buf.Bytes()
if len(b) > 0 {
// Remove trailing \n which is added by Encode.
return b[:len(b)-1], nil
}
return b, nil
}
// TimeFieldFormat defines the time format of the Time field type. If set to
// TimeFormatUnix, TimeFormatUnixMs or TimeFormatUnixMicro, the time is formatted as an UNIX
// TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX
// timestamp as integer.
TimeFieldFormat = time.RFC3339
@ -104,6 +123,39 @@ var ( @@ -104,6 +123,39 @@ var (
// DefaultContextLogger is returned from Ctx() if there is no logger associated
// with the context.
DefaultContextLogger *Logger
// LevelColors are used by ConsoleWriter's consoleDefaultFormatLevel to color
// log levels.
LevelColors = map[Level]int{
TraceLevel: colorBlue,
DebugLevel: 0,
InfoLevel: colorGreen,
WarnLevel: colorYellow,
ErrorLevel: colorRed,
FatalLevel: colorRed,
PanicLevel: colorRed,
}
// FormattedLevels are used by ConsoleWriter's consoleDefaultFormatLevel
// for a short level name.
FormattedLevels = map[Level]string{
TraceLevel: "TRC",
DebugLevel: "DBG",
InfoLevel: "INF",
WarnLevel: "WRN",
ErrorLevel: "ERR",
FatalLevel: "FTL",
PanicLevel: "PNC",
}
// TriggerLevelWriterBufferReuseLimit is a limit in bytes that a buffer is dropped
// from the TriggerLevelWriter buffer pool if the buffer grows above the limit.
TriggerLevelWriterBufferReuseLimit = 64 * 1024
// FloatingPointPrecision, if set to a value other than -1, controls the number
// of digits when formatting float numbers in JSON. See strconv.FormatFloat for
// more details.
FloatingPointPrecision = -1
)
var (

8
vendor/github.com/rs/zerolog/internal/cbor/cbor.go generated vendored

@ -26,7 +26,8 @@ const ( @@ -26,7 +26,8 @@ const (
additionalTypeBreak byte = 31
// Tag Sub-types.
additionalTypeTimestamp byte = 01
additionalTypeTimestamp byte = 01
additionalTypeEmbeddedCBOR byte = 63
// Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml
additionalTypeTagNetworkAddr uint16 = 260
@ -67,7 +68,7 @@ const ( @@ -67,7 +68,7 @@ const (
var IntegerTimeFieldFormat = time.RFC3339
// NanoTimeFieldFormat indicates the format of timestamp decoded
// from a float value (time in seconds and nano seconds).
// from a float value (time in seconds and nanoseconds).
var NanoTimeFieldFormat = time.RFC3339Nano
func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte {
@ -91,7 +92,8 @@ func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { @@ -91,7 +92,8 @@ func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte {
minor = additionalTypeIntUint64
}
dst = append(dst, byte(major|minor))
dst = append(dst, major|minor)
byteCount--
for ; byteCount >= 0; byteCount-- {
dst = append(dst, byte(number>>(uint(byteCount)*8)))

68
vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go generated vendored

@ -5,6 +5,7 @@ package cbor @@ -5,6 +5,7 @@ package cbor
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"math"
@ -43,7 +44,7 @@ func readByte(src *bufio.Reader) byte { @@ -43,7 +44,7 @@ func readByte(src *bufio.Reader) byte {
return b
}
func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 {
func decodeIntAdditionalType(src *bufio.Reader, minor byte) int64 {
val := int64(0)
if minor <= 23 {
val = int64(minor)
@ -77,7 +78,7 @@ func decodeInteger(src *bufio.Reader) int64 { @@ -77,7 +78,7 @@ func decodeInteger(src *bufio.Reader) int64 {
if major != majorTypeUnsignedInt && major != majorTypeNegativeInt {
panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major))
}
val := decodeIntAdditonalType(src, minor)
val := decodeIntAdditionalType(src, minor)
if major == 0 {
return val
}
@ -94,7 +95,7 @@ func decodeFloat(src *bufio.Reader) (float64, int) { @@ -94,7 +95,7 @@ func decodeFloat(src *bufio.Reader) (float64, int) {
switch minor {
case additionalTypeFloat16:
panic(fmt.Errorf("float16 is not suppported in decodeFloat"))
panic(fmt.Errorf("float16 is not supported in decodeFloat"))
case additionalTypeFloat32:
pb := readNBytes(src, 4)
@ -204,7 +205,7 @@ func decodeString(src *bufio.Reader, noQuotes bool) []byte { @@ -204,7 +205,7 @@ func decodeString(src *bufio.Reader, noQuotes bool) []byte {
if !noQuotes {
result = append(result, '"')
}
length := decodeIntAdditonalType(src, minor)
length := decodeIntAdditionalType(src, minor)
len := int(length)
pbs := readNBytes(src, len)
result = append(result, pbs...)
@ -213,6 +214,31 @@ func decodeString(src *bufio.Reader, noQuotes bool) []byte { @@ -213,6 +214,31 @@ func decodeString(src *bufio.Reader, noQuotes bool) []byte {
}
return append(result, '"')
}
func decodeStringToDataUrl(src *bufio.Reader, mimeType string) []byte {
pb := readByte(src)
major := pb & maskOutAdditionalType
minor := pb & maskOutMajorType
if major != majorTypeByteString {
panic(fmt.Errorf("Major type is: %d in decodeString", major))
}
length := decodeIntAdditionalType(src, minor)
l := int(length)
enc := base64.StdEncoding
lEnc := enc.EncodedLen(l)
result := make([]byte, len("\"data:;base64,\"")+len(mimeType)+lEnc)
dest := result
u := copy(dest, "\"data:")
dest = dest[u:]
u = copy(dest, mimeType)
dest = dest[u:]
u = copy(dest, ";base64,")
dest = dest[u:]
pbs := readNBytes(src, l)
enc.Encode(dest, pbs)
dest = dest[lEnc:]
dest[0] = '"'
return result
}
func decodeUTF8String(src *bufio.Reader) []byte {
pb := readByte(src)
@ -222,7 +248,7 @@ func decodeUTF8String(src *bufio.Reader) []byte { @@ -222,7 +248,7 @@ func decodeUTF8String(src *bufio.Reader) []byte {
panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major))
}
result := []byte{'"'}
length := decodeIntAdditonalType(src, minor)
length := decodeIntAdditionalType(src, minor)
len := int(length)
pbs := readNBytes(src, len)
@ -238,7 +264,7 @@ func decodeUTF8String(src *bufio.Reader) []byte { @@ -238,7 +264,7 @@ func decodeUTF8String(src *bufio.Reader) []byte {
return append(dst, '"')
}
}
// The string has no need for encoding an therefore is directly
// The string has no need for encoding and therefore is directly
// appended to the byte slice.
result = append(result, pbs...)
return append(result, '"')
@ -257,7 +283,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) { @@ -257,7 +283,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) {
if minor == additionalTypeInfiniteCount {
unSpecifiedCount = true
} else {
length := decodeIntAdditonalType(src, minor)
length := decodeIntAdditionalType(src, minor)
len = int(length)
}
for i := 0; unSpecifiedCount || i < len; i++ {
@ -266,7 +292,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) { @@ -266,7 +292,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
readByte(src)
break
}
@ -277,7 +303,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) { @@ -277,7 +303,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
readByte(src)
break
}
@ -301,7 +327,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) { @@ -301,7 +327,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) {
if minor == additionalTypeInfiniteCount {
unSpecifiedCount = true
} else {
length := decodeIntAdditonalType(src, minor)
length := decodeIntAdditionalType(src, minor)
len = int(length)
}
dst.Write([]byte{'{'})
@ -311,7 +337,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) { @@ -311,7 +337,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
readByte(src)
break
}
@ -326,7 +352,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) { @@ -326,7 +352,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
readByte(src)
break
}
@ -349,10 +375,24 @@ func decodeTagData(src *bufio.Reader) []byte { @@ -349,10 +375,24 @@ func decodeTagData(src *bufio.Reader) []byte {
switch minor {
case additionalTypeTimestamp:
return decodeTimeStamp(src)
case additionalTypeIntUint8:
val := decodeIntAdditionalType(src, minor)
switch byte(val) {
case additionalTypeEmbeddedCBOR:
pb := readByte(src)
dataMajor := pb & maskOutAdditionalType
if dataMajor != majorTypeByteString {
panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedCBOR", dataMajor))
}
src.UnreadByte()
return decodeStringToDataUrl(src, "application/cbor")
default:
panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val))
}
// Tag value is larger than 256 (so uint16).
case additionalTypeIntUint16:
val := decodeIntAdditonalType(src, minor)
val := decodeIntAdditionalType(src, minor)
switch uint16(val) {
case additionalTypeEmbeddedJSON:
@ -383,7 +423,7 @@ func decodeTagData(src *bufio.Reader) []byte { @@ -383,7 +423,7 @@ func decodeTagData(src *bufio.Reader) []byte {
case additionalTypeTagNetworkPrefix:
pb := readByte(src)
if pb != byte(majorTypeMap|0x1) {
if pb != majorTypeMap|0x1 {
panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected"))
}
octets := decodeString(src, true)

32
vendor/github.com/rs/zerolog/internal/cbor/string.go generated vendored

@ -8,7 +8,7 @@ func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { @@ -8,7 +8,7 @@ func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
l := len(vals)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -25,7 +25,7 @@ func (Encoder) AppendString(dst []byte, s string) []byte { @@ -25,7 +25,7 @@ func (Encoder) AppendString(dst []byte, s string) []byte {
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l))
}
@ -64,7 +64,7 @@ func (Encoder) AppendBytes(dst, s []byte) []byte { @@ -64,7 +64,7 @@ func (Encoder) AppendBytes(dst, s []byte) []byte {
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -77,7 +77,7 @@ func AppendEmbeddedJSON(dst, s []byte) []byte { @@ -77,7 +77,7 @@ func AppendEmbeddedJSON(dst, s []byte) []byte {
minor := additionalTypeEmbeddedJSON
// Append the TAG to indicate this is Embedded JSON.
dst = append(dst, byte(major|additionalTypeIntUint16))
dst = append(dst, major|additionalTypeIntUint16)
dst = append(dst, byte(minor>>8))
dst = append(dst, byte(minor&0xff))
@ -87,7 +87,29 @@ func AppendEmbeddedJSON(dst, s []byte) []byte { @@ -87,7 +87,29 @@ func AppendEmbeddedJSON(dst, s []byte) []byte {
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
return append(dst, s...)
}
// AppendEmbeddedCBOR adds a tag and embeds input CBOR as such.
func AppendEmbeddedCBOR(dst, s []byte) []byte {
major := majorTypeTags
minor := additionalTypeEmbeddedCBOR
// Append the TAG to indicate this is Embedded JSON.
dst = append(dst, major|additionalTypeIntUint8)
dst = append(dst, minor)
// Append the CBOR Object as Byte String.
major = majorTypeByteString
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}

22
vendor/github.com/rs/zerolog/internal/cbor/time.go generated vendored

@ -7,7 +7,7 @@ import ( @@ -7,7 +7,7 @@ import (
func appendIntegerTimestamp(dst []byte, t time.Time) []byte {
major := majorTypeTags
minor := additionalTypeTimestamp
dst = append(dst, byte(major|minor))
dst = append(dst, major|minor)
secs := t.Unix()
var val uint64
if secs < 0 {
@ -17,19 +17,19 @@ func appendIntegerTimestamp(dst []byte, t time.Time) []byte { @@ -17,19 +17,19 @@ func appendIntegerTimestamp(dst []byte, t time.Time) []byte {
major = majorTypeUnsignedInt
val = uint64(secs)
}
dst = appendCborTypePrefix(dst, major, uint64(val))
dst = appendCborTypePrefix(dst, major, val)
return dst
}
func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte {
major := majorTypeTags
minor := additionalTypeTimestamp
dst = append(dst, byte(major|minor))
dst = append(dst, major|minor)
secs := t.Unix()
nanos := t.Nanosecond()
var val float64
val = float64(secs)*1.0 + float64(nanos)*1E-9
return e.AppendFloat64(dst, val)
val = float64(secs)*1.0 + float64(nanos)*1e-9
return e.AppendFloat64(dst, val, -1)
}
// AppendTime encodes and adds a timestamp to the dst byte array.
@ -50,7 +50,7 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte @@ -50,7 +50,7 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -64,17 +64,17 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte @@ -64,17 +64,17 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte
// AppendDuration encodes and adds a duration to the dst byte array.
// useInt field indicates whether to store the duration as seconds (integer) or
// as seconds+nanoseconds (float).
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, unused int) []byte {
if useInt {
return e.AppendInt64(dst, int64(d/unit))
}
return e.AppendFloat64(dst, float64(d)/float64(unit))
return e.AppendFloat64(dst, float64(d)/float64(unit), unused)
}
// AppendDurations encodes and adds an array of durations to the dst byte array.
// useInt field indicates whether to store the duration as seconds (integer) or
// as seconds+nanoseconds (float).
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, unused int) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -82,12 +82,12 @@ func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Dur @@ -82,12 +82,12 @@ func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Dur
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, d := range vals {
dst = e.AppendDuration(dst, d, unit, useInt)
dst = e.AppendDuration(dst, d, unit, useInt, unused)
}
return dst
}

83
vendor/github.com/rs/zerolog/internal/cbor/types.go generated vendored

@ -4,21 +4,22 @@ import ( @@ -4,21 +4,22 @@ import (
"fmt"
"math"
"net"
"reflect"
)
// AppendNil inserts a 'Nil' object into the dst byte array.
func (Encoder) AppendNil(dst []byte) []byte {
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull))
return append(dst, majorTypeSimpleAndFloat|additionalTypeNull)
}
// AppendBeginMarker inserts a map start into the dst byte array.
func (Encoder) AppendBeginMarker(dst []byte) []byte {
return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount))
return append(dst, majorTypeMap|additionalTypeInfiniteCount)
}
// AppendEndMarker inserts a map end into the dst byte array.
func (Encoder) AppendEndMarker(dst []byte) []byte {
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak)
}
// AppendObjectData takes an object in form of a byte array and appends to dst.
@ -30,12 +31,12 @@ func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { @@ -30,12 +31,12 @@ func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
// AppendArrayStart adds markers to indicate the start of an array.
func (Encoder) AppendArrayStart(dst []byte) []byte {
return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount))
return append(dst, majorTypeArray|additionalTypeInfiniteCount)
}
// AppendArrayEnd adds markers to indicate the end of an array.
func (Encoder) AppendArrayEnd(dst []byte) []byte {
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak)
}
// AppendArrayDelim adds markers to indicate end of a particular array element.
@ -56,7 +57,7 @@ func (Encoder) AppendBool(dst []byte, val bool) []byte { @@ -56,7 +57,7 @@ func (Encoder) AppendBool(dst []byte, val bool) []byte {
if val {
b = additionalTypeBoolTrue
}
return append(dst, byte(majorTypeSimpleAndFloat|b))
return append(dst, majorTypeSimpleAndFloat|b)
}
// AppendBools encodes and inserts an array of boolean values into the dst byte array.
@ -68,7 +69,7 @@ func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { @@ -68,7 +69,7 @@ func (e Encoder) AppendBools(dst []byte, vals []bool) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -88,7 +89,7 @@ func (Encoder) AppendInt(dst []byte, val int) []byte { @@ -88,7 +89,7 @@ func (Encoder) AppendInt(dst []byte, val int) []byte {
}
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
@ -104,7 +105,7 @@ func (e Encoder) AppendInts(dst []byte, vals []int) []byte { @@ -104,7 +105,7 @@ func (e Encoder) AppendInts(dst []byte, vals []int) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -128,7 +129,7 @@ func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { @@ -128,7 +129,7 @@ func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -152,7 +153,7 @@ func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { @@ -152,7 +153,7 @@ func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -176,7 +177,7 @@ func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { @@ -176,7 +177,7 @@ func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -196,7 +197,7 @@ func (Encoder) AppendInt64(dst []byte, val int64) []byte { @@ -196,7 +197,7 @@ func (Encoder) AppendInt64(dst []byte, val int64) []byte {
}
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
@ -212,7 +213,7 @@ func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { @@ -212,7 +213,7 @@ func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -236,7 +237,7 @@ func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { @@ -236,7 +237,7 @@ func (e Encoder) AppendUints(dst []byte, vals []uint) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -260,7 +261,7 @@ func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { @@ -260,7 +261,7 @@ func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -284,7 +285,7 @@ func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { @@ -284,7 +285,7 @@ func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -308,7 +309,7 @@ func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { @@ -308,7 +309,7 @@ func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -324,9 +325,9 @@ func (Encoder) AppendUint64(dst []byte, val uint64) []byte { @@ -324,9 +325,9 @@ func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
contentVal := val
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
dst = appendCborTypePrefix(dst, major, contentVal)
}
return dst
}
@ -340,7 +341,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { @@ -340,7 +341,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -351,7 +352,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { @@ -351,7 +352,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
}
// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
func (Encoder) AppendFloat32(dst []byte, val float32, unused int) []byte {
switch {
case math.IsNaN(float64(val)):
return append(dst, "\xfa\x7f\xc0\x00\x00"...)
@ -367,11 +368,11 @@ func (Encoder) AppendFloat32(dst []byte, val float32) []byte { @@ -367,11 +368,11 @@ func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
for i := uint(0); i < 4; i++ {
buf[i] = byte(n >> ((3 - i) * 8))
}
return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3])
return append(append(dst, major|subType), buf[0], buf[1], buf[2], buf[3])
}
// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
func (e Encoder) AppendFloats32(dst []byte, vals []float32, unused int) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -379,18 +380,18 @@ func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { @@ -379,18 +380,18 @@ func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = e.AppendFloat32(dst, v)
dst = e.AppendFloat32(dst, v, unused)
}
return dst
}
// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
func (Encoder) AppendFloat64(dst []byte, val float64, unused int) []byte {
switch {
case math.IsNaN(val):
return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
@ -402,7 +403,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64) []byte { @@ -402,7 +403,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
major := majorTypeSimpleAndFloat
subType := additionalTypeFloat64
n := math.Float64bits(val)
dst = append(dst, byte(major|subType))
dst = append(dst, major|subType)
for i := uint(1); i <= 8; i++ {
b := byte(n >> ((8 - i) * 8))
dst = append(dst, b)
@ -411,7 +412,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64) []byte { @@ -411,7 +412,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
}
// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
func (e Encoder) AppendFloats64(dst []byte, vals []float64, unused int) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -419,12 +420,12 @@ func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { @@ -419,12 +420,12 @@ func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, byte(major|lb))
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = e.AppendFloat64(dst, v)
dst = e.AppendFloat64(dst, v, unused)
}
return dst
}
@ -438,9 +439,17 @@ func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { @@ -438,9 +439,17 @@ func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
return AppendEmbeddedJSON(dst, marshaled)
}
// AppendType appends the parameter type (as a string) to the input byte slice.
func (e Encoder) AppendType(dst []byte, i interface{}) []byte {
if i == nil {
return e.AppendString(dst, "<nil>")
}
return e.AppendString(dst, reflect.TypeOf(i).String())
}
// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6).
func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
return e.AppendBytes(dst, ip)
@ -448,21 +457,21 @@ func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { @@ -448,21 +457,21 @@ func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length).
func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8))
dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff))
// Prefix is a tuple (aka MAP of 1 pair of elements) -
// first element is prefix, second is mask length.
dst = append(dst, byte(majorTypeMap|0x1))
dst = append(dst, majorTypeMap|0x1)
dst = e.AppendBytes(dst, pfx.IP)
maskLen, _ := pfx.Mask.Size()
return e.AppendUint8(dst, uint8(maskLen))
}
// AppendMACAddr encodes and inserts an Hardware (MAC) address.
// AppendMACAddr encodes and inserts a Hardware (MAC) address.
func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
return e.AppendBytes(dst, ha)
@ -470,7 +479,7 @@ func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { @@ -470,7 +479,7 @@ func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
// AppendHex adds a TAG and inserts a hex bytes as a string.
func (e Encoder) AppendHex(dst []byte, val []byte) []byte {
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(additionalTypeTagHexString>>8))
dst = append(dst, byte(additionalTypeTagHexString&0xff))
return e.AppendBytes(dst, val)

6
vendor/github.com/rs/zerolog/internal/json/string.go generated vendored

@ -37,7 +37,7 @@ func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { @@ -37,7 +37,7 @@ func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
//
// The operation loops though each byte in the string looking
// for characters that need json or utf8 encoding. If the string
// does not need encoding, then the string is appended in it's
// does not need encoding, then the string is appended in its
// entirety to the byte slice.
// If we encounter a byte that does need encoding, switch up
// the operation and perform a byte-by-byte read-encode-append.
@ -56,7 +56,7 @@ func (Encoder) AppendString(dst []byte, s string) []byte { @@ -56,7 +56,7 @@ func (Encoder) AppendString(dst []byte, s string) []byte {
return append(dst, '"')
}
}
// The string has no need for encoding an therefore is directly
// The string has no need for encoding and therefore is directly
// appended to the byte slice.
dst = append(dst, s...)
// End with a double quote
@ -99,7 +99,7 @@ func appendStringComplex(dst []byte, s string, i int) []byte { @@ -99,7 +99,7 @@ func appendStringComplex(dst []byte, s string, i int) []byte {
r, size := utf8.DecodeRuneInString(s[i:])
if r == utf8.RuneError && size == 1 {
// In case of error, first append previous simple characters to
// the byte slice if any and append a remplacement character code
// the byte slice if any and append a replacement character code
// in place of the invalid sequence.
if start < i {
dst = append(dst, s[start:i]...)

29
vendor/github.com/rs/zerolog/internal/json/time.go generated vendored

@ -7,9 +7,10 @@ import ( @@ -7,9 +7,10 @@ import (
const (
// Import from zerolog/global.go
timeFormatUnix = ""
timeFormatUnixMs = "UNIXMS"
timeFormatUnix = ""
timeFormatUnixMs = "UNIXMS"
timeFormatUnixMicro = "UNIXMICRO"
timeFormatUnixNano = "UNIXNANO"
)
// AppendTime formats the input time with the given format
@ -22,6 +23,8 @@ func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { @@ -22,6 +23,8 @@ func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte {
return e.AppendInt64(dst, t.UnixNano()/1000000)
case timeFormatUnixMicro:
return e.AppendInt64(dst, t.UnixNano()/1000)
case timeFormatUnixNano:
return e.AppendInt64(dst, t.UnixNano())
}
return append(t.AppendFormat(append(dst, '"'), format), '"')
}
@ -33,7 +36,11 @@ func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { @@ -33,7 +36,11 @@ func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte {
case timeFormatUnix:
return appendUnixTimes(dst, vals)
case timeFormatUnixMs:
return appendUnixMsTimes(dst, vals)
return appendUnixNanoTimes(dst, vals, 1000000)
case timeFormatUnixMicro:
return appendUnixNanoTimes(dst, vals, 1000)
case timeFormatUnixNano:
return appendUnixNanoTimes(dst, vals, 1)
}
if len(vals) == 0 {
return append(dst, '[', ']')
@ -64,15 +71,15 @@ func appendUnixTimes(dst []byte, vals []time.Time) []byte { @@ -64,15 +71,15 @@ func appendUnixTimes(dst []byte, vals []time.Time) []byte {
return dst
}
func appendUnixMsTimes(dst []byte, vals []time.Time) []byte {
func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, vals[0].UnixNano()/1000000, 10)
dst = strconv.AppendInt(dst, vals[0].UnixNano()/div, 10)
if len(vals) > 1 {
for _, t := range vals[1:] {
dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/1000000, 10)
dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/div, 10)
}
}
dst = append(dst, ']')
@ -81,24 +88,24 @@ func appendUnixMsTimes(dst []byte, vals []time.Time) []byte { @@ -81,24 +88,24 @@ func appendUnixMsTimes(dst []byte, vals []time.Time) []byte {
// AppendDuration formats the input duration with the given unit & format
// and appends the encoded string to the input byte slice.
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte {
if useInt {
return strconv.AppendInt(dst, int64(d/unit), 10)
}
return e.AppendFloat64(dst, float64(d)/float64(unit))
return e.AppendFloat64(dst, float64(d)/float64(unit), precision)
}
// AppendDurations formats the input durations with the given unit & format
// and appends the encoded string list to the input byte slice.
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = e.AppendDuration(dst, vals[0], unit, useInt)
dst = e.AppendDuration(dst, vals[0], unit, useInt, precision)
if len(vals) > 1 {
for _, d := range vals[1:] {
dst = e.AppendDuration(append(dst, ','), d, unit, useInt)
dst = e.AppendDuration(append(dst, ','), d, unit, useInt, precision)
}
}
dst = append(dst, ']')

58
vendor/github.com/rs/zerolog/internal/json/types.go generated vendored

@ -4,6 +4,7 @@ import ( @@ -4,6 +4,7 @@ import (
"fmt"
"math"
"net"
"reflect"
"strconv"
)
@ -278,7 +279,7 @@ func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { @@ -278,7 +279,7 @@ func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
// AppendUint64 converts the input uint64 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
return strconv.AppendUint(dst, uint64(val), 10)
return strconv.AppendUint(dst, val, 10)
}
// AppendUints64 encodes the input uint64s to json and
@ -298,9 +299,9 @@ func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { @@ -298,9 +299,9 @@ func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
return dst
}
func appendFloat(dst []byte, val float64, bitSize int) []byte {
func appendFloat(dst []byte, val float64, bitSize, precision int) []byte {
// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
// with an error, but a logging library wants the data to get thru so we
// with an error, but a logging library wants the data to get through so we
// make a tradeoff and store those types as string.
switch {
case math.IsNaN(val):
@ -310,26 +311,47 @@ func appendFloat(dst []byte, val float64, bitSize int) []byte { @@ -310,26 +311,47 @@ func appendFloat(dst []byte, val float64, bitSize int) []byte {
case math.IsInf(val, -1):
return append(dst, `"-Inf"`...)
}
return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
// convert as if by es6 number to string conversion
// see also https://cs.opensource.google/go/go/+/refs/tags/go1.20.3:src/encoding/json/encode.go;l=573
strFmt := byte('f')
// If precision is set to a value other than -1, we always just format the float using that precision.
if precision == -1 {
// Use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs := math.Abs(val); abs != 0 {
if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
strFmt = 'e'
}
}
}
dst = strconv.AppendFloat(dst, val, strFmt, precision, bitSize)
if strFmt == 'e' {
// Clean up e-09 to e-9
n := len(dst)
if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' {
dst[n-2] = dst[n-1]
dst = dst[:n-1]
}
}
return dst
}
// AppendFloat32 converts the input float32 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
return appendFloat(dst, float64(val), 32)
func (Encoder) AppendFloat32(dst []byte, val float32, precision int) []byte {
return appendFloat(dst, float64(val), 32, precision)
}
// AppendFloats32 encodes the input float32s to json and
// appends the encoded string list to the input byte slice.
func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
func (Encoder) AppendFloats32(dst []byte, vals []float32, precision int) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = appendFloat(dst, float64(vals[0]), 32)
dst = appendFloat(dst, float64(vals[0]), 32, precision)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = appendFloat(append(dst, ','), float64(val), 32)
dst = appendFloat(append(dst, ','), float64(val), 32, precision)
}
}
dst = append(dst, ']')
@ -338,21 +360,21 @@ func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { @@ -338,21 +360,21 @@ func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
// AppendFloat64 converts the input float64 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
return appendFloat(dst, val, 64)
func (Encoder) AppendFloat64(dst []byte, val float64, precision int) []byte {
return appendFloat(dst, val, 64, precision)
}
// AppendFloats64 encodes the input float64s to json and
// appends the encoded string list to the input byte slice.
func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
func (Encoder) AppendFloats64(dst []byte, vals []float64, precision int) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = appendFloat(dst, vals[0], 64)
dst = appendFloat(dst, vals[0], 64, precision)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = appendFloat(append(dst, ','), val, 64)
dst = appendFloat(append(dst, ','), val, 64, precision)
}
}
dst = append(dst, ']')
@ -369,6 +391,14 @@ func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { @@ -369,6 +391,14 @@ func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
return append(dst, marshaled...)
}
// AppendType appends the parameter type (as a string) to the input byte slice.
func (e Encoder) AppendType(dst []byte, i interface{}) []byte {
if i == nil {
return e.AppendString(dst, "<nil>")
}
return e.AppendString(dst, reflect.TypeOf(i).String())
}
// AppendObjectData takes in an object that is already in a byte array
// and adds it to the dst.
func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {

101
vendor/github.com/rs/zerolog/log.go generated vendored

@ -24,7 +24,7 @@ @@ -24,7 +24,7 @@
//
// Sub-loggers let you chain loggers with additional context:
//
// sublogger := log.With().Str("component": "foo").Logger()
// sublogger := log.With().Str("component", "foo").Logger()
// sublogger.Info().Msg("hello world")
// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
//
@ -82,8 +82,9 @@ @@ -82,8 +82,9 @@
// log.Warn().Msg("")
// // Output: {"level":"warn","severity":"warn"}
//
// # Caveats
//
// Caveats
// Field duplication:
//
// There is no fields deduplication out-of-the-box.
// Using the same key multiple times creates new key in final JSON each time.
@ -96,14 +97,30 @@ @@ -96,14 +97,30 @@
//
// In this case, many consumers will take the last value,
// but this is not guaranteed; check yours if in doubt.
//
// Concurrency safety:
//
// Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger:
//
// func handler(w http.ResponseWriter, r *http.Request) {
// // Create a child logger for concurrency safety
// logger := log.Logger.With().Logger()
//
// // Add context fields, for example User-Agent from HTTP headers
// logger.UpdateContext(func(c zerolog.Context) zerolog.Context {
// ...
// })
// }
package zerolog
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
)
// Level defines log levels.
@ -159,24 +176,24 @@ func (l Level) String() string { @@ -159,24 +176,24 @@ func (l Level) String() string {
// ParseLevel converts a level string into a zerolog Level value.
// returns an error if the input string does not match known values.
func ParseLevel(levelStr string) (Level, error) {
switch levelStr {
case LevelFieldMarshalFunc(TraceLevel):
switch {
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(TraceLevel)):
return TraceLevel, nil
case LevelFieldMarshalFunc(DebugLevel):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(DebugLevel)):
return DebugLevel, nil
case LevelFieldMarshalFunc(InfoLevel):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(InfoLevel)):
return InfoLevel, nil
case LevelFieldMarshalFunc(WarnLevel):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(WarnLevel)):
return WarnLevel, nil
case LevelFieldMarshalFunc(ErrorLevel):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(ErrorLevel)):
return ErrorLevel, nil
case LevelFieldMarshalFunc(FatalLevel):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(FatalLevel)):
return FatalLevel, nil
case LevelFieldMarshalFunc(PanicLevel):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(PanicLevel)):
return PanicLevel, nil
case LevelFieldMarshalFunc(Disabled):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(Disabled)):
return Disabled, nil
case LevelFieldMarshalFunc(NoLevel):
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(NoLevel)):
return NoLevel, nil
}
i, err := strconv.Atoi(levelStr)
@ -189,6 +206,21 @@ func ParseLevel(levelStr string) (Level, error) { @@ -189,6 +206,21 @@ func ParseLevel(levelStr string) (Level, error) {
return Level(i), nil
}
// UnmarshalText implements encoding.TextUnmarshaler to allow for easy reading from toml/yaml/json formats
func (l *Level) UnmarshalText(text []byte) error {
if l == nil {
return errors.New("can't unmarshal a nil *Level")
}
var err error
*l, err = ParseLevel(string(text))
return err
}
// MarshalText implements encoding.TextMarshaler to allow for easy writing into toml/yaml/json formats
func (l Level) MarshalText() ([]byte, error) {
return []byte(LevelFieldMarshalFunc(l)), nil
}
// A Logger represents an active logging object that generates lines
// of JSON output to an io.Writer. Each logging operation makes a single
// call to the Writer's Write method. There is no guarantee on access
@ -201,6 +233,7 @@ type Logger struct { @@ -201,6 +233,7 @@ type Logger struct {
context []byte
hooks []Hook
stack bool
ctx context.Context
}
// New creates a root logger with given output writer. If the output writer implements
@ -212,11 +245,11 @@ type Logger struct { @@ -212,11 +245,11 @@ type Logger struct {
// you may consider using sync wrapper.
func New(w io.Writer) Logger {
if w == nil {
w = ioutil.Discard
w = io.Discard
}
lw, ok := w.(LevelWriter)
if !ok {
lw = levelWriterAdapter{w}
lw = LevelWriterAdapter{w}
}
return Logger{w: lw, level: TraceLevel}
}
@ -258,7 +291,8 @@ func (l Logger) With() Context { @@ -258,7 +291,8 @@ func (l Logger) With() Context {
// UpdateContext updates the internal logger's context.
//
// Use this method with caution. If unsure, prefer the With method.
// Caution: This method is not concurrency safe.
// Use the With method to create a child logger before modifying the context from concurrent goroutines.
func (l *Logger) UpdateContext(update func(c Context) Context) {
if l == disabledLogger {
return
@ -291,8 +325,13 @@ func (l Logger) Sample(s Sampler) Logger { @@ -291,8 +325,13 @@ func (l Logger) Sample(s Sampler) Logger {
}
// Hook returns a logger with the h Hook.
func (l Logger) Hook(h Hook) Logger {
l.hooks = append(l.hooks, h)
func (l Logger) Hook(hooks ...Hook) Logger {
if len(hooks) == 0 {
return l
}
newHooks := make([]Hook, len(l.hooks), len(l.hooks)+len(hooks))
copy(newHooks, l.hooks)
l.hooks = append(newHooks, hooks...)
return l
}
@ -348,7 +387,14 @@ func (l *Logger) Err(err error) *Event { @@ -348,7 +387,14 @@ func (l *Logger) Err(err error) *Event {
//
// You must call Msg on the returned event in order to send the event.
func (l *Logger) Fatal() *Event {
return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) })
return l.newEvent(FatalLevel, func(msg string) {
if closer, ok := l.w.(io.Closer); ok {
// Close the writer to flush any buffered message. Otherwise the message
// will be lost as os.Exit() terminates the program immediately.
closer.Close()
}
os.Exit(1)
})
}
// Panic starts a new message with panic level. The panic() function
@ -361,7 +407,7 @@ func (l *Logger) Panic() *Event { @@ -361,7 +407,7 @@ func (l *Logger) Panic() *Event {
// WithLevel starts a new message with level. Unlike Fatal and Panic
// methods, WithLevel does not terminate the program or stop the ordinary
// flow of a gourotine when used with their respective levels.
// flow of a goroutine when used with their respective levels.
//
// You must call Msg on the returned event in order to send the event.
func (l *Logger) WithLevel(level Level) *Event {
@ -413,6 +459,14 @@ func (l *Logger) Printf(format string, v ...interface{}) { @@ -413,6 +459,14 @@ func (l *Logger) Printf(format string, v ...interface{}) {
}
}
// Println sends a log event using debug level and no extra field.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Println(v ...interface{}) {
if e := l.Debug(); e.Enabled() {
e.CallerSkipFrame(1).Msg(fmt.Sprintln(v...))
}
}
// Write implements the io.Writer interface. This is useful to set as a writer
// for the standard library log.
func (l Logger) Write(p []byte) (n int, err error) {
@ -428,11 +482,15 @@ func (l Logger) Write(p []byte) (n int, err error) { @@ -428,11 +482,15 @@ func (l Logger) Write(p []byte) (n int, err error) {
func (l *Logger) newEvent(level Level, done func(string)) *Event {
enabled := l.should(level)
if !enabled {
if done != nil {
done("")
}
return nil
}
e := newEvent(l.w, level)
e.done = done
e.ch = l.hooks
e.ctx = l.ctx
if level != NoLevel && LevelFieldName != "" {
e.Str(LevelFieldName, LevelFieldMarshalFunc(level))
}
@ -447,6 +505,9 @@ func (l *Logger) newEvent(level Level, done func(string)) *Event { @@ -447,6 +505,9 @@ func (l *Logger) newEvent(level Level, done func(string)) *Event {
// should returns true if the log event should be logged.
func (l *Logger) should(lvl Level) bool {
if l.w == nil {
return false
}
if lvl < l.level || lvl < GlobalLevel() {
return false
}

BIN
vendor/github.com/rs/zerolog/pretty.png generated vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 82 KiB

After

Width:  |  Height:  |  Size: 116 KiB

2
vendor/github.com/rs/zerolog/sampler.go generated vendored

@ -84,7 +84,7 @@ func (s *BurstSampler) Sample(lvl Level) bool { @@ -84,7 +84,7 @@ func (s *BurstSampler) Sample(lvl Level) bool {
}
func (s *BurstSampler) inc() uint32 {
now := time.Now().UnixNano()
now := TimestampFunc().UnixNano()
resetAt := atomic.LoadInt64(&s.resetAt)
var c uint32
if now > resetAt {

9
vendor/github.com/rs/zerolog/syslog.go generated vendored

@ -78,3 +78,12 @@ func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { @@ -78,3 +78,12 @@ func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) {
n = len(p)
return
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (sw syslogWriter) Close() error {
if c, ok := sw.w.(io.Closer); ok {
return c.Close()
}
return nil
}

200
vendor/github.com/rs/zerolog/writer.go generated vendored

@ -17,14 +17,25 @@ type LevelWriter interface { @@ -17,14 +17,25 @@ type LevelWriter interface {
WriteLevel(level Level, p []byte) (n int, err error)
}
type levelWriterAdapter struct {
// LevelWriterAdapter adapts an io.Writer to support the LevelWriter interface.
type LevelWriterAdapter struct {
io.Writer
}
func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
// WriteLevel simply writes everything to the adapted writer, ignoring the level.
func (lw LevelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
return lw.Write(p)
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (lw LevelWriterAdapter) Close() error {
if closer, ok := lw.Writer.(io.Closer); ok {
return closer.Close()
}
return nil
}
type syncWriter struct {
mu sync.Mutex
lw LevelWriter
@ -38,7 +49,7 @@ func SyncWriter(w io.Writer) io.Writer { @@ -38,7 +49,7 @@ func SyncWriter(w io.Writer) io.Writer {
if lw, ok := w.(LevelWriter); ok {
return &syncWriter{lw: lw}
}
return &syncWriter{lw: levelWriterAdapter{w}}
return &syncWriter{lw: LevelWriterAdapter{w}}
}
// Write implements the io.Writer interface.
@ -55,6 +66,15 @@ func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { @@ -55,6 +66,15 @@ func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) {
return s.lw.WriteLevel(l, p)
}
func (s *syncWriter) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
if closer, ok := s.lw.(io.Closer); ok {
return closer.Close()
}
return nil
}
type multiLevelWriter struct {
writers []LevelWriter
}
@ -87,6 +107,20 @@ func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { @@ -87,6 +107,20 @@ func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
return n, err
}
// Calls close on all the underlying writers that are io.Closers. If any of the
// Close methods return an error, the remainder of the closers are not closed
// and the error is returned.
func (t multiLevelWriter) Close() error {
for _, w := range t.writers {
if closer, ok := w.(io.Closer); ok {
if err := closer.Close(); err != nil {
return err
}
}
}
return nil
}
// MultiLevelWriter creates a writer that duplicates its writes to all the
// provided writers, similar to the Unix tee(1) command. If some writers
// implement LevelWriter, their WriteLevel method will be used instead of Write.
@ -96,7 +130,7 @@ func MultiLevelWriter(writers ...io.Writer) LevelWriter { @@ -96,7 +130,7 @@ func MultiLevelWriter(writers ...io.Writer) LevelWriter {
if lw, ok := w.(LevelWriter); ok {
lwriters = append(lwriters, lw)
} else {
lwriters = append(lwriters, levelWriterAdapter{w})
lwriters = append(lwriters, LevelWriterAdapter{w})
}
}
return multiLevelWriter{lwriters}
@ -152,3 +186,161 @@ func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { @@ -152,3 +186,161 @@ func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) {
w.Out = TestWriter{T: t, Frame: 6}
}
}
// FilteredLevelWriter writes only logs at Level or above to Writer.
//
// It should be used only in combination with MultiLevelWriter when you
// want to write to multiple destinations at different levels. Otherwise
// you should just set the level on the logger and filter events early.
// When using MultiLevelWriter then you set the level on the logger to
// the lowest of the levels you use for writers.
type FilteredLevelWriter struct {
Writer LevelWriter
Level Level
}
// Write writes to the underlying Writer.
func (w *FilteredLevelWriter) Write(p []byte) (int, error) {
return w.Writer.Write(p)
}
// WriteLevel calls WriteLevel of the underlying Writer only if the level is equal
// or above the Level.
func (w *FilteredLevelWriter) WriteLevel(level Level, p []byte) (int, error) {
if level >= w.Level {
return w.Writer.WriteLevel(level, p)
}
return len(p), nil
}
var triggerWriterPool = &sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, 1024))
},
}
// TriggerLevelWriter buffers log lines at the ConditionalLevel or below
// until a trigger level (or higher) line is emitted. Log lines with level
// higher than ConditionalLevel are always written out to the destination
// writer. If trigger never happens, buffered log lines are never written out.
//
// It can be used to configure "log level per request".
type TriggerLevelWriter struct {
// Destination writer. If LevelWriter is provided (usually), its WriteLevel is used
// instead of Write.
io.Writer
// ConditionalLevel is the level (and below) at which lines are buffered until
// a trigger level (or higher) line is emitted. Usually this is set to DebugLevel.
ConditionalLevel Level
// TriggerLevel is the lowest level that triggers the sending of the conditional
// level lines. Usually this is set to ErrorLevel.
TriggerLevel Level
buf *bytes.Buffer
triggered bool
mu sync.Mutex
}
func (w *TriggerLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
// At first trigger level or above log line, we flush the buffer and change the
// trigger state to triggered.
if !w.triggered && l >= w.TriggerLevel {
err := w.trigger()
if err != nil {
return 0, err
}
}
// Unless triggered, we buffer everything at and below ConditionalLevel.
if !w.triggered && l <= w.ConditionalLevel {
if w.buf == nil {
w.buf = triggerWriterPool.Get().(*bytes.Buffer)
}
// We prefix each log line with a byte with the level.
// Hopefully we will never have a level value which equals a newline
// (which could interfere with reconstruction of log lines in the trigger method).
w.buf.WriteByte(byte(l))
w.buf.Write(p)
return len(p), nil
}
// Anything above ConditionalLevel is always passed through.
// Once triggered, everything is passed through.
if lw, ok := w.Writer.(LevelWriter); ok {
return lw.WriteLevel(l, p)
}
return w.Write(p)
}
// trigger expects lock to be held.
func (w *TriggerLevelWriter) trigger() error {
if w.triggered {
return nil
}
w.triggered = true
if w.buf == nil {
return nil
}
p := w.buf.Bytes()
for len(p) > 0 {
// We do not use bufio.Scanner here because we already have full buffer
// in the memory and we do not want extra copying from the buffer to
// scanner's token slice, nor we want to hit scanner's token size limit,
// and we also want to preserve newlines.
i := bytes.IndexByte(p, '\n')
line := p[0 : i+1]
p = p[i+1:]
// We prefixed each log line with a byte with the level.
level := Level(line[0])
line = line[1:]
var err error
if lw, ok := w.Writer.(LevelWriter); ok {
_, err = lw.WriteLevel(level, line)
} else {
_, err = w.Write(line)
}
if err != nil {
return err
}
}
return nil
}
// Trigger forces flushing the buffer and change the trigger state to
// triggered, if the writer has not already been triggered before.
func (w *TriggerLevelWriter) Trigger() error {
w.mu.Lock()
defer w.mu.Unlock()
return w.trigger()
}
// Close closes the writer and returns the buffer to the pool.
func (w *TriggerLevelWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.buf == nil {
return nil
}
// We return the buffer only if it has not grown above the limit.
// This prevents accumulation of large buffers in the pool just
// because occasionally a large buffer might be needed.
if w.buf.Cap() <= TriggerLevelWriterBufferReuseLimit {
w.buf.Reset()
triggerWriterPool.Put(w.buf)
}
w.buf = nil
return nil
}

27
vendor/golang.org/x/sys/LICENSE generated vendored

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/sys/PATENTS generated vendored

@ -0,0 +1,22 @@ @@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

2
vendor/golang.org/x/sys/unix/.gitignore generated vendored

@ -0,0 +1,2 @@ @@ -0,0 +1,2 @@
_obj/
unix.test

184
vendor/golang.org/x/sys/unix/README.md generated vendored

@ -0,0 +1,184 @@ @@ -0,0 +1,184 @@
# Building `sys/unix`
The sys/unix package provides access to the raw system call interface of the
underlying operating system. See: https://godoc.org/golang.org/x/sys/unix
Porting Go to a new architecture/OS combination or adding syscalls, types, or
constants to an existing architecture/OS pair requires some manual effort;
however, there are tools that automate much of the process.
## Build Systems
There are currently two ways we generate the necessary files. We are currently
migrating the build system to use containers so the builds are reproducible.
This is being done on an OS-by-OS basis. Please update this documentation as
components of the build system change.
### Old Build System (currently for `GOOS != "linux"`)
The old build system generates the Go files based on the C header files
present on your system. This means that files
for a given GOOS/GOARCH pair must be generated on a system with that OS and
architecture. This also means that the generated code can differ from system
to system, based on differences in the header files.
To avoid this, if you are using the old build system, only generate the Go
files on an installation with unmodified header files. It is also important to
keep track of which version of the OS the files were generated from (ex.
Darwin 14 vs Darwin 15). This makes it easier to track the progress of changes
and have each OS upgrade correspond to a single change.
To build the files for your current OS and architecture, make sure GOOS and
GOARCH are set correctly and run `mkall.sh`. This will generate the files for
your specific system. Running `mkall.sh -n` shows the commands that will be run.
Requirements: bash, go
### New Build System (currently for `GOOS == "linux"`)
The new build system uses a Docker container to generate the go files directly
from source checkouts of the kernel and various system libraries. This means
that on any platform that supports Docker, all the files using the new build
system can be generated at once, and generated files will not change based on
what the person running the scripts has installed on their computer.
The OS specific files for the new build system are located in the `${GOOS}`
directory, and the build is coordinated by the `${GOOS}/mkall.go` program. When
the kernel or system library updates, modify the Dockerfile at
`${GOOS}/Dockerfile` to checkout the new release of the source.
To build all the files under the new build system, you must be on an amd64/Linux
system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
then generate all of the files for all of the GOOS/GOARCH pairs in the new build
system. Running `mkall.sh -n` shows the commands that will be run.
Requirements: bash, go, docker
## Component files
This section describes the various files used in the code generation process.
It also contains instructions on how to modify these files to add a new
architecture/OS or to add additional syscalls, types, or constants. Note that
if you are using the new build system, the scripts/programs cannot be called normally.
They must be called from within the docker container.
### asm files
The hand-written assembly file at `asm_${GOOS}_${GOARCH}.s` implements system
call dispatch. There are three entry points:
```
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
```
The first and second are the standard ones; they differ only in how many
arguments can be passed to the kernel. The third is for low-level use by the
ForkExec wrapper. Unlike the first two, it does not call into the scheduler to
let it know that a system call is running.
When porting Go to a new architecture/OS, this file must be implemented for
each GOOS/GOARCH pair.
### mksysnum
Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
for the old system). This program takes in a list of header files containing the
syscall number declarations and parses them to produce the corresponding list of
Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
constants.
Adding new syscall numbers is mostly done by running the build on a sufficiently
new installation of the target OS (or updating the source checkouts for the
new build system). However, depending on the OS, you may need to update the
parsing in mksysnum.
### mksyscall.go
The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
hand-written Go files which implement system calls (for unix, the specific OS,
or the specific OS/Architecture pair respectively) that need special handling
and list `//sys` comments giving prototypes for ones that can be generated.
The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
them into syscalls. This requires the name of the prototype in the comment to
match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
prototype can be exported (capitalized) or not.
Adding a new syscall often just requires adding a new `//sys` function prototype
with the desired arguments and a capitalized name so it is exported. However, if
you want the interface to the syscall to be different, often one will make an
unexported `//sys` prototype, and then write a custom wrapper in
`syscall_${GOOS}.go`.
### types files
For each OS, there is a hand-written Go file at `${GOOS}/types.go` (or
`types_${GOOS}.go` on the old system). This file includes standard C headers and
creates Go type aliases to the corresponding C types. The file is then fed
through godef to get the Go compatible definitions. Finally, the generated code
is fed though mkpost.go to format the code correctly and remove any hidden or
private identifiers. This cleaned-up code is written to
`ztypes_${GOOS}_${GOARCH}.go`.
The hardest part about preparing this file is figuring out which headers to
include and which symbols need to be `#define`d to get the actual data
structures that pass through to the kernel system calls. Some C libraries
preset alternate versions for binary compatibility and translate them on the
way in and out of system calls, but there is almost always a `#define` that can
get the real ones.
See `types_darwin.go` and `linux/types.go` for examples.
To add a new type, add in the necessary include statement at the top of the
file (if it is not already there) and add in a type alias line. Note that if
your type is significantly different on different architectures, you may need
some `#if/#elif` macros in your include statements.
### mkerrors.sh
This script is used to generate the system's various constants. This doesn't
just include the error numbers and error strings, but also the signal numbers
and a wide variety of miscellaneous constants. The constants come from the list
of include files in the `includes_${uname}` variable. A regex then picks out
the desired `#define` statements, and generates the corresponding Go constants.
The error numbers and strings are generated from `#include <errno.h>`, and the
signal numbers and strings are generated from `#include <signal.h>`. All of
these constants are written to `zerrors_${GOOS}_${GOARCH}.go` via a C program,
`_errors.c`, which prints out all the constants.
To add a constant, add the header that includes it to the appropriate variable.
Then, edit the regex (if necessary) to match the desired constant. Avoid making
the regex too broad to avoid matching unintended constants.
### internal/mkmerge
This program is used to extract duplicate const, func, and type declarations
from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
1. Construct the set of common code that is idential in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.
## Generated files
### `zerrors_${GOOS}_${GOARCH}.go`
A file containing all of the system's generated error numbers, error strings,
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
### `zsyscall_${GOOS}_${GOARCH}.go`
A file containing all the generated syscalls for a specific GOOS and GOARCH.
Generated by `mksyscall.go` (see above).
### `zsysnum_${GOOS}_${GOARCH}.go`
A list of numeric constants for all the syscall number of the specific GOOS
and GOARCH. Generated by mksysnum (see above).
### `ztypes_${GOOS}_${GOARCH}.go`
A file containing Go types for passing into (or returning from) syscalls.
Generated by godefs and the types file (see above).

86
vendor/golang.org/x/sys/unix/affinity_linux.go generated vendored

@ -0,0 +1,86 @@ @@ -0,0 +1,86 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// CPU affinity functions
package unix
import (
"math/bits"
"unsafe"
)
const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
// CPUSet represents a CPU affinity mask.
type CPUSet [cpuSetSize]cpuMask
func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
_, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
if e != 0 {
return errnoErr(e)
}
return nil
}
// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
// If pid is 0 the calling thread is used.
func SchedGetaffinity(pid int, set *CPUSet) error {
return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
}
// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
// If pid is 0 the calling thread is used.
func SchedSetaffinity(pid int, set *CPUSet) error {
return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
}
// Zero clears the set s, so that it contains no CPUs.
func (s *CPUSet) Zero() {
for i := range s {
s[i] = 0
}
}
func cpuBitsIndex(cpu int) int {
return cpu / _NCPUBITS
}
func cpuBitsMask(cpu int) cpuMask {
return cpuMask(1 << (uint(cpu) % _NCPUBITS))
}
// Set adds cpu to the set s.
func (s *CPUSet) Set(cpu int) {
i := cpuBitsIndex(cpu)
if i < len(s) {
s[i] |= cpuBitsMask(cpu)
}
}
// Clear removes cpu from the set s.
func (s *CPUSet) Clear(cpu int) {
i := cpuBitsIndex(cpu)
if i < len(s) {
s[i] &^= cpuBitsMask(cpu)
}
}
// IsSet reports whether cpu is in the set s.
func (s *CPUSet) IsSet(cpu int) bool {
i := cpuBitsIndex(cpu)
if i < len(s) {
return s[i]&cpuBitsMask(cpu) != 0
}
return false
}
// Count returns the number of CPUs in the set s.
func (s *CPUSet) Count() int {
c := 0
for _, b := range s {
c += bits.OnesCount64(uint64(b))
}
return c
}

13
vendor/golang.org/x/sys/unix/aliases.go generated vendored

@ -0,0 +1,13 @@ @@ -0,0 +1,13 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
package unix
import "syscall"
type Signal = syscall.Signal
type Errno = syscall.Errno
type SysProcAttr = syscall.SysProcAttr

17
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s generated vendored

@ -0,0 +1,17 @@ @@ -0,0 +1,17 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
//
// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
//
TEXT ·syscall6(SB),NOSPLIT,$0-88
JMP syscall·syscall6(SB)
TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
JMP syscall·rawSyscall6(SB)

27
vendor/golang.org/x/sys/unix/asm_bsd_386.s generated vendored

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (freebsd || netbsd || openbsd) && gc
#include "textflag.h"
// System call support for 386 BSD
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)

27
vendor/golang.org/x/sys/unix/asm_bsd_amd64.s generated vendored

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
#include "textflag.h"
// System call support for AMD64 BSD
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)

27
vendor/golang.org/x/sys/unix/asm_bsd_arm.s generated vendored

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (freebsd || netbsd || openbsd) && gc
#include "textflag.h"
// System call support for ARM BSD
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
B syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)

27
vendor/golang.org/x/sys/unix/asm_bsd_arm64.s generated vendored

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (darwin || freebsd || netbsd || openbsd) && gc
#include "textflag.h"
// System call support for ARM64 BSD
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)

29
vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s generated vendored

@ -0,0 +1,29 @@ @@ -0,0 +1,29 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (darwin || freebsd || netbsd || openbsd) && gc
#include "textflag.h"
//
// System call support for ppc64, BSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)

27
vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s generated vendored

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (darwin || freebsd || netbsd || openbsd) && gc
#include "textflag.h"
// System call support for RISCV64 BSD
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)

65
vendor/golang.org/x/sys/unix/asm_linux_386.s generated vendored

@ -0,0 +1,65 @@ @@ -0,0 +1,65 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
//
// System calls for 386, Linux
//
// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
// instead of the glibc-specific "CALL 0x10(GS)".
#define INVOKE_SYSCALL INT $0x80
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
CALL runtime·entersyscall(SB)
MOVL trap+0(FP), AX // syscall entry
MOVL a1+4(FP), BX
MOVL a2+8(FP), CX
MOVL a3+12(FP), DX
MOVL $0, SI
MOVL $0, DI
INVOKE_SYSCALL
MOVL AX, r1+16(FP)
MOVL DX, r2+20(FP)
CALL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVL trap+0(FP), AX // syscall entry
MOVL a1+4(FP), BX
MOVL a2+8(FP), CX
MOVL a3+12(FP), DX
MOVL $0, SI
MOVL $0, DI
INVOKE_SYSCALL
MOVL AX, r1+16(FP)
MOVL DX, r2+20(FP)
RET
TEXT ·socketcall(SB),NOSPLIT,$0-36
JMP syscall·socketcall(SB)
TEXT ·rawsocketcall(SB),NOSPLIT,$0-36
JMP syscall·rawsocketcall(SB)
TEXT ·seek(SB),NOSPLIT,$0-28
JMP syscall·seek(SB)

57
vendor/golang.org/x/sys/unix/asm_linux_amd64.s generated vendored

@ -0,0 +1,57 @@ @@ -0,0 +1,57 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
//
// System calls for AMD64, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
CALL runtime·entersyscall(SB)
MOVQ a1+8(FP), DI
MOVQ a2+16(FP), SI
MOVQ a3+24(FP), DX
MOVQ $0, R10
MOVQ $0, R8
MOVQ $0, R9
MOVQ trap+0(FP), AX // syscall entry
SYSCALL
MOVQ AX, r1+32(FP)
MOVQ DX, r2+40(FP)
CALL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVQ a1+8(FP), DI
MOVQ a2+16(FP), SI
MOVQ a3+24(FP), DX
MOVQ $0, R10
MOVQ $0, R8
MOVQ $0, R9
MOVQ trap+0(FP), AX // syscall entry
SYSCALL
MOVQ AX, r1+32(FP)
MOVQ DX, r2+40(FP)
RET
TEXT ·gettimeofday(SB),NOSPLIT,$0-16
JMP syscall·gettimeofday(SB)

56
vendor/golang.org/x/sys/unix/asm_linux_arm.s generated vendored

@ -0,0 +1,56 @@ @@ -0,0 +1,56 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
//
// System calls for arm, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
BL runtime·entersyscall(SB)
MOVW trap+0(FP), R7
MOVW a1+4(FP), R0
MOVW a2+8(FP), R1
MOVW a3+12(FP), R2
MOVW $0, R3
MOVW $0, R4
MOVW $0, R5
SWI $0
MOVW R0, r1+16(FP)
MOVW $0, R0
MOVW R0, r2+20(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVW trap+0(FP), R7 // syscall entry
MOVW a1+4(FP), R0
MOVW a2+8(FP), R1
MOVW a3+12(FP), R2
SWI $0
MOVW R0, r1+16(FP)
MOVW $0, R0
MOVW R0, r2+20(FP)
RET
TEXT ·seek(SB),NOSPLIT,$0-28
B syscall·seek(SB)

50
vendor/golang.org/x/sys/unix/asm_linux_arm64.s generated vendored

@ -0,0 +1,50 @@ @@ -0,0 +1,50 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && arm64 && gc
#include "textflag.h"
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
B syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R0
MOVD a2+16(FP), R1
MOVD a3+24(FP), R2
MOVD $0, R3
MOVD $0, R4
MOVD $0, R5
MOVD trap+0(FP), R8 // syscall entry
SVC
MOVD R0, r1+32(FP) // r1
MOVD R1, r2+40(FP) // r2
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
B syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R0
MOVD a2+16(FP), R1
MOVD a3+24(FP), R2
MOVD $0, R3
MOVD $0, R4
MOVD $0, R5
MOVD trap+0(FP), R8 // syscall entry
SVC
MOVD R0, r1+32(FP)
MOVD R1, r2+40(FP)
RET

51
vendor/golang.org/x/sys/unix/asm_linux_loong64.s generated vendored

@ -0,0 +1,51 @@ @@ -0,0 +1,51 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && loong64 && gc
#include "textflag.h"
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
JAL runtime·entersyscall(SB)
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R11 // syscall entry
SYSCALL
MOVV R4, r1+32(FP)
MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
JAL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R11 // syscall entry
SYSCALL
MOVV R4, r1+32(FP)
MOVV R0, r2+40(FP) // r2 is not used. Always set to 0
RET

54
vendor/golang.org/x/sys/unix/asm_linux_mips64x.s generated vendored

@ -0,0 +1,54 @@ @@ -0,0 +1,54 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (mips64 || mips64le) && gc
#include "textflag.h"
//
// System calls for mips64, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
JAL runtime·entersyscall(SB)
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R2 // syscall entry
SYSCALL
MOVV R2, r1+32(FP)
MOVV R3, r2+40(FP)
JAL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVV a3+24(FP), R6
MOVV R0, R7
MOVV R0, R8
MOVV R0, R9
MOVV trap+0(FP), R2 // syscall entry
SYSCALL
MOVV R2, r1+32(FP)
MOVV R3, r2+40(FP)
RET

52
vendor/golang.org/x/sys/unix/asm_linux_mipsx.s generated vendored

@ -0,0 +1,52 @@ @@ -0,0 +1,52 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (mips || mipsle) && gc
#include "textflag.h"
//
// System calls for mips, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
JAL runtime·entersyscall(SB)
MOVW a1+4(FP), R4
MOVW a2+8(FP), R5
MOVW a3+12(FP), R6
MOVW R0, R7
MOVW trap+0(FP), R2 // syscall entry
SYSCALL
MOVW R2, r1+16(FP) // r1
MOVW R3, r2+20(FP) // r2
JAL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVW a1+4(FP), R4
MOVW a2+8(FP), R5
MOVW a3+12(FP), R6
MOVW trap+0(FP), R2 // syscall entry
SYSCALL
MOVW R2, r1+16(FP)
MOVW R3, r2+20(FP)
RET

42
vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s generated vendored

@ -0,0 +1,42 @@ @@ -0,0 +1,42 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && (ppc64 || ppc64le) && gc
#include "textflag.h"
//
// System calls for ppc64, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R3
MOVD a2+16(FP), R4
MOVD a3+24(FP), R5
MOVD R0, R6
MOVD R0, R7
MOVD R0, R8
MOVD trap+0(FP), R9 // syscall entry
SYSCALL R9
MOVD R3, r1+32(FP)
MOVD R4, r2+40(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R3
MOVD a2+16(FP), R4
MOVD a3+24(FP), R5
MOVD R0, R6
MOVD R0, R7
MOVD R0, R8
MOVD trap+0(FP), R9 // syscall entry
SYSCALL R9
MOVD R3, r1+32(FP)
MOVD R4, r2+40(FP)
RET

47
vendor/golang.org/x/sys/unix/asm_linux_riscv64.s generated vendored

@ -0,0 +1,47 @@ @@ -0,0 +1,47 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build riscv64 && gc
#include "textflag.h"
//
// System calls for linux/riscv64.
//
// Where available, just jump to package syscall's implementation of
// these functions.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
CALL runtime·entersyscall(SB)
MOV a1+8(FP), A0
MOV a2+16(FP), A1
MOV a3+24(FP), A2
MOV trap+0(FP), A7 // syscall entry
ECALL
MOV A0, r1+32(FP) // r1
MOV A1, r2+40(FP) // r2
CALL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOV a1+8(FP), A0
MOV a2+16(FP), A1
MOV a3+24(FP), A2
MOV trap+0(FP), A7 // syscall entry
ECALL
MOV A0, r1+32(FP)
MOV A1, r2+40(FP)
RET

54
vendor/golang.org/x/sys/unix/asm_linux_s390x.s generated vendored

@ -0,0 +1,54 @@ @@ -0,0 +1,54 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux && s390x && gc
#include "textflag.h"
//
// System calls for s390x, Linux
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
BR syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
BR syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R2
MOVD a2+16(FP), R3
MOVD a3+24(FP), R4
MOVD $0, R5
MOVD $0, R6
MOVD $0, R7
MOVD trap+0(FP), R1 // syscall entry
SYSCALL
MOVD R2, r1+32(FP)
MOVD R3, r2+40(FP)
BL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BR syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BR syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R2
MOVD a2+16(FP), R3
MOVD a3+24(FP), R4
MOVD $0, R5
MOVD $0, R6
MOVD $0, R7
MOVD trap+0(FP), R1 // syscall entry
SYSCALL
MOVD R2, r1+32(FP)
MOVD R3, r2+40(FP)
RET

29
vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s generated vendored

@ -0,0 +1,29 @@ @@ -0,0 +1,29 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
//
// System call support for mips64, OpenBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save