diff --git a/Dockerfile b/Dockerfile index b42aded..52b325d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18 +FROM golang:1.23 # Set app workdir WORKDIR /go/src/app diff --git a/go.mod b/go.mod index 7f0808f..319cb60 100644 --- a/go.mod +++ b/go.mod @@ -1,16 +1,16 @@ module gitea.russia9.dev/Russia9/chatwars-offers -go 1.18 +go 1.23 require ( - github.com/confluentinc/confluent-kafka-go v1.8.2 - github.com/rs/zerolog v1.26.1 + github.com/confluentinc/confluent-kafka-go v1.9.2 + github.com/rs/zerolog v1.33.0 gopkg.in/tucnak/telebot.v2 v2.5.0 ) require ( - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/stretchr/testify v1.7.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + golang.org/x/sys v0.24.0 // indirect ) diff --git a/go.sum b/go.sum index 4557be2..17bda03 100644 --- a/go.sum +++ b/go.sum @@ -1,52 +1,236 @@ -github.com/confluentinc/confluent-kafka-go v1.8.2 h1:PBdbvYpyOdFLehj8j+9ba7FL4c4Moxn79gy9cYKxG5E= -github.com/confluentinc/confluent-kafka-go v1.8.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA= +github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ= +github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/confluentinc/confluent-kafka-go v1.9.2 h1:gV/GxhMBUb03tFWkN+7kdhg+zf+QUM+wVkI9zwh770Q= +github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4Fr8= +github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= +github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= +github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= +github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= +github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/linkedin/goavro/v2 v2.10.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/linkedin/goavro/v2 v2.11.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= -github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/avro.v0 v0.0.0-20171217001914-a730b5802183/go.mod h1:FvqrFXt+jCsyQibeRv4xxEJBL5iG2DDW5aeJwzDiq4A= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g= gopkg.in/tucnak/telebot.v2 v2.5.0 h1:i+NynLo443Vp+Zn3Gv9JBjh3Z/PaiKAQwcnhNI7y6Po= gopkg.in/tucnak/telebot.v2 v2.5.0/go.mod h1:BgaIIx50PSRS9pG59JH+geT82cfvoJU/IaI5TJdN3v8= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go index 8c2c7b4..2b8f5a7 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2016-2019 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + import ( "fmt" ) @@ -29,19 +29,19 @@ import ( //defines and strings in sync. // -#define MIN_RD_KAFKA_VERSION 0x01060000 +#define MIN_RD_KAFKA_VERSION 0x01090000 #ifdef __APPLE__ -#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" #else -#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" #endif #if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION #ifdef __APPLE__ -#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" #else -#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" #endif #endif */ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md index 98152e9..fcbb62b 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md @@ -62,23 +62,12 @@ these tags should be specified on the **application** build/get/install command. -## Generating HTML documentation - -To generate one-page HTML documentation run the mk/doc-gen.py script from the -top-level directory. This script requires the beautifulsoup4 Python package. - -``` -$ source .../your/virtualenv/bin/activate -$ pip install beautifulsoup4 -... -$ make -f mk/Makefile docs -``` - - ## Release process For each release candidate and final release, perform the following steps: +### Review the CHANGELOG + ### Update bundle to latest librdkafka See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md). @@ -87,8 +76,7 @@ See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md). ### Update librdkafka version requirement Update the minimum required librdkafka version in `kafka/00version.go` -and `README.md`. - +and `README.md` and the version in `examples/go.mod` and `mk/doc-gen.py`. ### Update error codes @@ -101,6 +89,19 @@ Update generated error codes: # Verify by building +## Generating HTML documentation + +To generate one-page HTML documentation run the mk/doc-gen.py script from the +top-level directory. This script requires the beautifulsoup4 Python package. + +``` +$ source .../your/virtualenv/bin/activate +$ pip install beautifulsoup4 +... +$ make -f mk/Makefile docs +``` + + ### Rebuild everything $ go clean -i ./... @@ -125,11 +126,6 @@ Manually verify that the examples/ applications work. Also make sure the examples in README.md work. -Convert any examples using `github.com/confluentinc/confluent-kafka-go/kafka` to use -`gopkg.in/confluentinc/confluent-kafka-go.v1/kafka` import path. - - $ find examples/ -type f -name *\.go -exec sed -i -e 's|github\.com/confluentinc/confluent-kafka-go/kafka|gopkg\.in/confluentinc/confluent-kafka-go\.v1/kafka|g' {} + - ### Commit any changes Make sure to push to github before creating the tag to have CI tests pass. @@ -143,3 +139,13 @@ Make sure to push to github before creating the tag to have CI tests pass. ### Create release notes page on github + +### Update version in Confluent docs + +Put the new version in settings.sh of these two repos + +https://github.com/confluentinc/docs + +https://github.com/confluentinc/docs-platform + +### Don't forget tweeting it! diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go index e128129..e8e5bb4 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go @@ -48,6 +48,27 @@ ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t i return NULL; return entries[idx]; } + +static const rd_kafka_acl_result_t * +acl_result_by_idx (const rd_kafka_acl_result_t **acl_results, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_results[idx]; +} + +static const rd_kafka_DeleteAcls_result_response_t * +DeleteAcls_result_response_by_idx (const rd_kafka_DeleteAcls_result_response_t **delete_acls_result_responses, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return delete_acls_result_responses[idx]; +} + +static const rd_kafka_AclBinding_t * +AclBinding_by_idx (const rd_kafka_AclBinding_t **acl_bindings, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_bindings[idx]; +} */ import "C" @@ -312,6 +333,225 @@ func (c ConfigResourceResult) String() string { return fmt.Sprintf("ResourceResult(%s, %s, %d config(s))", c.Type, c.Name, len(c.Config)) } +// ResourcePatternType enumerates the different types of Kafka resource patterns. +type ResourcePatternType int + +const ( + // ResourcePatternTypeUnknown is a resource pattern type not known or not set. + ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) + // ResourcePatternTypeAny matches any resource, used for lookups. + ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY) + // ResourcePatternTypeMatch will perform pattern matching + ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH) + // ResourcePatternTypeLiteral matches a literal resource name + ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL) + // ResourcePatternTypePrefixed matches a prefixed resource name + ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED) +) + +// String returns the human-readable representation of a ResourcePatternType +func (t ResourcePatternType) String() string { + return C.GoString(C.rd_kafka_ResourcePatternType_name(C.rd_kafka_ResourcePatternType_t(t))) +} + +// ResourcePatternTypeFromString translates a resource pattern type name to +// a ResourcePatternType value. +func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) { + switch strings.ToUpper(patternTypeString) { + case "ANY": + return ResourcePatternTypeAny, nil + case "MATCH": + return ResourcePatternTypeMatch, nil + case "LITERAL": + return ResourcePatternTypeLiteral, nil + case "PREFIXED": + return ResourcePatternTypePrefixed, nil + default: + return ResourcePatternTypeUnknown, NewError(ErrInvalidArg, "Unknown resource pattern type", false) + } +} + +// ACLOperation enumerates the different types of ACL operation. +type ACLOperation int + +const ( + // ACLOperationUnknown represents an unknown or unset operation + ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN) + // ACLOperationAny in a filter, matches any ACLOperation + ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY) + // ACLOperationAll represents all the operations + ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL) + // ACLOperationRead a read operation + ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ) + // ACLOperationWrite represents a write operation + ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE) + // ACLOperationCreate represents a create operation + ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE) + // ACLOperationDelete represents a delete operation + ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE) + // ACLOperationAlter represents an alter operation + ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER) + // ACLOperationDescribe represents a describe operation + ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE) + // ACLOperationClusterAction represents a cluster action operation + ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION) + // ACLOperationDescribeConfigs represents a describe configs operation + ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS) + // ACLOperationAlterConfigs represents an alter configs operation + ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS) + // ACLOperationIdempotentWrite represents an idempotent write operation + ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE) +) + +// String returns the human-readable representation of an ACLOperation +func (o ACLOperation) String() string { + return C.GoString(C.rd_kafka_AclOperation_name(C.rd_kafka_AclOperation_t(o))) +} + +// ACLOperationFromString translates a ACL operation name to +// a ACLOperation value. +func ACLOperationFromString(aclOperationString string) (ACLOperation, error) { + switch strings.ToUpper(aclOperationString) { + case "ANY": + return ACLOperationAny, nil + case "ALL": + return ACLOperationAll, nil + case "READ": + return ACLOperationRead, nil + case "WRITE": + return ACLOperationWrite, nil + case "CREATE": + return ACLOperationCreate, nil + case "DELETE": + return ACLOperationDelete, nil + case "ALTER": + return ACLOperationAlter, nil + case "DESCRIBE": + return ACLOperationDescribe, nil + case "CLUSTER_ACTION": + return ACLOperationClusterAction, nil + case "DESCRIBE_CONFIGS": + return ACLOperationDescribeConfigs, nil + case "ALTER_CONFIGS": + return ACLOperationAlterConfigs, nil + case "IDEMPOTENT_WRITE": + return ACLOperationIdempotentWrite, nil + default: + return ACLOperationUnknown, NewError(ErrInvalidArg, "Unknown ACL operation", false) + } +} + +// ACLPermissionType enumerates the different types of ACL permission types. +type ACLPermissionType int + +const ( + // ACLPermissionTypeUnknown represents an unknown ACLPermissionType + ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN) + // ACLPermissionTypeAny in a filter, matches any ACLPermissionType + ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY) + // ACLPermissionTypeDeny disallows access + ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY) + // ACLPermissionTypeAllow grants access + ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW) +) + +// String returns the human-readable representation of an ACLPermissionType +func (o ACLPermissionType) String() string { + return C.GoString(C.rd_kafka_AclPermissionType_name(C.rd_kafka_AclPermissionType_t(o))) +} + +// ACLPermissionTypeFromString translates a ACL permission type name to +// a ACLPermissionType value. +func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) { + switch strings.ToUpper(aclPermissionTypeString) { + case "ANY": + return ACLPermissionTypeAny, nil + case "DENY": + return ACLPermissionTypeDeny, nil + case "ALLOW": + return ACLPermissionTypeAllow, nil + default: + return ACLPermissionTypeUnknown, NewError(ErrInvalidArg, "Unknown ACL permission type", false) + } +} + +// ACLBinding specifies the operation and permission type for a specific principal +// over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +// returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBinding struct { + Type ResourceType // The resource type. + // The resource name, which depends on the resource type. + // For ResourceBroker the resource name is the broker id. + Name string + ResourcePatternType ResourcePatternType // The resource pattern, relative to the name. + Principal string // The principal this ACLBinding refers to. + Host string // The host that the call is allowed to come from. + Operation ACLOperation // The operation/s specified by this binding. + PermissionType ACLPermissionType // The permission type for the specified operation. +} + +// ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +// Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBindingFilter = ACLBinding + +// ACLBindings is a slice of ACLBinding that also implements +// the sort interface +type ACLBindings []ACLBinding + +// ACLBindingFilters is a slice of ACLBindingFilter that also implements +// the sort interface +type ACLBindingFilters []ACLBindingFilter + +func (a ACLBindings) Len() int { + return len(a) +} + +func (a ACLBindings) Less(i, j int) bool { + if a[i].Type != a[j].Type { + return a[i].Type < a[j].Type + } + if a[i].Name != a[j].Name { + return a[i].Name < a[j].Name + } + if a[i].ResourcePatternType != a[j].ResourcePatternType { + return a[i].ResourcePatternType < a[j].ResourcePatternType + } + if a[i].Principal != a[j].Principal { + return a[i].Principal < a[j].Principal + } + if a[i].Host != a[j].Host { + return a[i].Host < a[j].Host + } + if a[i].Operation != a[j].Operation { + return a[i].Operation < a[j].Operation + } + if a[i].PermissionType != a[j].PermissionType { + return a[i].PermissionType < a[j].PermissionType + } + return true +} + +func (a ACLBindings) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +// CreateACLResult provides create ACL error information. +type CreateACLResult struct { + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DescribeACLsResult provides describe ACLs result or error information. +type DescribeACLsResult struct { + // Slice of ACL bindings matching the provided filter + ACLBindings ACLBindings + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DeleteACLsResult provides delete ACLs result or error information. +type DeleteACLsResult = DescribeACLsResult + // waitResult waits for a result event on cQueue or the ctx to be cancelled, whichever happens // first. // The returned result event is checked for errors its error is returned if set. @@ -950,6 +1190,355 @@ func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error { return a.handle.setOAuthBearerTokenFailure(errstr) } +// aclBindingToC converts a Go ACLBinding struct to a C rd_kafka_AclBinding_t +func (a *AdminClient) aclBindingToC(aclBinding *ACLBinding, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBinding_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBinding.Name) > 0 { + cName = C.CString(aclBinding.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBinding.Principal) > 0 { + cPrincipal = C.CString(aclBinding.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBinding.Host) > 0 { + cHost = C.CString(aclBinding.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBinding_new( + C.rd_kafka_ResourceType_t(aclBinding.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBinding.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBinding.Operation), + C.rd_kafka_AclPermissionType_t(aclBinding.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding %v: %v", aclBinding, C.GoString(cErrstr))) + } + return +} + +// aclBindingFilterToC converts a Go ACLBindingFilter struct to a C rd_kafka_AclBindingFilter_t +func (a *AdminClient) aclBindingFilterToC(aclBindingFilter *ACLBindingFilter, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBindingFilter_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBindingFilter.Name) > 0 { + cName = C.CString(aclBindingFilter.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBindingFilter.Principal) > 0 { + cPrincipal = C.CString(aclBindingFilter.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBindingFilter.Host) > 0 { + cHost = C.CString(aclBindingFilter.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBindingFilter_new( + C.rd_kafka_ResourceType_t(aclBindingFilter.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBindingFilter.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBindingFilter.Operation), + C.rd_kafka_AclPermissionType_t(aclBindingFilter.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding filter %v: %v", aclBindingFilter, C.GoString(cErrstr))) + } + return +} + +// cToACLBinding converts a C rd_kafka_AclBinding_t to Go ACLBinding +func (a *AdminClient) cToACLBinding(cACLBinding *C.rd_kafka_AclBinding_t) ACLBinding { + return ACLBinding{ + ResourceType(C.rd_kafka_AclBinding_restype(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_name(cACLBinding)), + ResourcePatternType(C.rd_kafka_AclBinding_resource_pattern_type(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_principal(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_host(cACLBinding)), + ACLOperation(C.rd_kafka_AclBinding_operation(cACLBinding)), + ACLPermissionType(C.rd_kafka_AclBinding_permission_type(cACLBinding)), + } +} + +// cToACLBindings converts a C rd_kafka_AclBinding_t list to Go ACLBindings +func (a *AdminClient) cToACLBindings(cACLBindings **C.rd_kafka_AclBinding_t, aclCnt C.size_t) (result ACLBindings) { + result = make(ACLBindings, aclCnt) + for i := uint(0); i < uint(aclCnt); i++ { + cACLBinding := C.AclBinding_by_idx(cACLBindings, aclCnt, C.size_t(i)) + if cACLBinding == nil { + panic("AclBinding_by_idx must not return nil") + } + result[i] = a.cToACLBinding(cACLBinding) + } + return +} + +// cToCreateACLResults converts a C acl_result_t array to Go CreateACLResult list. +func (a *AdminClient) cToCreateACLResults(cCreateAclsRes **C.rd_kafka_acl_result_t, aclCnt C.size_t) (result []CreateACLResult, err error) { + result = make([]CreateACLResult, uint(aclCnt)) + + for i := uint(0); i < uint(aclCnt); i++ { + cCreateACLRes := C.acl_result_by_idx(cCreateAclsRes, aclCnt, C.size_t(i)) + if cCreateACLRes != nil { + cCreateACLError := C.rd_kafka_acl_result_error(cCreateACLRes) + result[i].Error = newErrorFromCError(cCreateACLError) + } + } + + return result, nil +} + +// cToDescribeACLsResult converts a C rd_kafka_event_t to a Go DescribeAclsResult struct. +func (a *AdminClient) cToDescribeACLsResult(rkev *C.rd_kafka_event_t) (result *DescribeACLsResult) { + result = &DescribeACLsResult{} + err := C.rd_kafka_event_error(rkev) + errCode := ErrorCode(err) + errStr := C.rd_kafka_event_error_string(rkev) + + var cResultACLsCount C.size_t + cResult := C.rd_kafka_event_DescribeAcls_result(rkev) + cResultACLs := C.rd_kafka_DescribeAcls_result_acls(cResult, &cResultACLsCount) + if errCode != ErrNoError { + result.Error = newErrorFromCString(err, errStr) + } + result.ACLBindings = a.cToACLBindings(cResultACLs, cResultACLsCount) + return +} + +// cToDeleteACLsResults converts a C rd_kafka_DeleteAcls_result_response_t array to Go DeleteAclsResult slice. +func (a *AdminClient) cToDeleteACLsResults(cDeleteACLsResResponse **C.rd_kafka_DeleteAcls_result_response_t, resResponseCnt C.size_t) (result []DeleteACLsResult) { + result = make([]DeleteACLsResult, uint(resResponseCnt)) + + for i := uint(0); i < uint(resResponseCnt); i++ { + cDeleteACLsResResponse := C.DeleteAcls_result_response_by_idx(cDeleteACLsResResponse, resResponseCnt, C.size_t(i)) + if cDeleteACLsResResponse == nil { + panic("DeleteAcls_result_response_by_idx must not return nil") + } + + cDeleteACLsError := C.rd_kafka_DeleteAcls_result_response_error(cDeleteACLsResResponse) + result[i].Error = newErrorFromCError(cDeleteACLsError) + + var cMatchingACLsCount C.size_t + cMatchingACLs := C.rd_kafka_DeleteAcls_result_response_matching_acls( + cDeleteACLsResResponse, &cMatchingACLsCount) + + result[i].ACLBindings = a.cToACLBindings(cMatchingACLs, cMatchingACLsCount) + } + return +} + +// CreateACLs creates one or more ACL bindings. +// +// Parameters: +// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// * `aclBindings` - A slice of ACL binding specifications to create. +// * `options` - Create ACLs options +// +// Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +// plus an error that is not nil for client level errors +func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) { + if aclBindings == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBinding structs") + } + if len(aclBindings) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBinding structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindings := make([]*C.rd_kafka_AclBinding_t, len(aclBindings)) + + for i, aclBinding := range aclBindings { + cACLBindings[i], err = a.aclBindingToC(&aclBinding, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindings[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEACLS, genericOptions) + if err != nil { + return nil, err + } + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreateAcls( + a.handle.rk, + (**C.rd_kafka_AclBinding_t)(&cACLBindings[0]), + C.size_t(len(cACLBindings)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultCnt C.size_t + cResult := C.rd_kafka_event_CreateAcls_result(rkev) + aclResults := C.rd_kafka_CreateAcls_result_acls(cResult, &cResultCnt) + result, err = a.cToCreateACLResults(aclResults, cResultCnt) + return +} + +// DescribeACLs matches ACL bindings by filter. +// +// Parameters: +// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// * `aclBindingFilter` - A filter with attributes that must match. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with: +// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +// * `options` - Describe ACLs options +// +// Returns a slice of ACLBindings when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) { + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilter, err := a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DescribeAcls( + a.handle.rk, + cACLBindingFilter, + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + result = a.cToDescribeACLsResult(rkev) + return +} + +// DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +// +// Parameters: +// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// * `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with: +// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +// * `options` - Delete ACLs options +// +// Returns a slice of ACLBinding for each filter when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) { + if aclBindingFilters == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBindingFilter structs") + } + if len(aclBindingFilters) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBindingFilter structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilters := make([]*C.rd_kafka_AclBindingFilter_t, len(aclBindingFilters)) + + for i, aclBindingFilter := range aclBindingFilters { + cACLBindingFilters[i], err = a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindingFilters[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteAcls( + a.handle.rk, + (**C.rd_kafka_AclBindingFilter_t)(&cACLBindingFilters[0]), + C.size_t(len(cACLBindingFilters)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultResponsesCount C.size_t + cResult := C.rd_kafka_event_DeleteAcls_result(rkev) + cResultResponses := C.rd_kafka_DeleteAcls_result_responses(cResult, &cResultResponsesCount) + result = a.cToDeleteACLsResults(cResultResponses, cResultResponsesCount) + return +} + // Close an AdminClient instance. func (a *AdminClient) Close() { if a.isDerived { diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go index 842631b..db55d2d 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go @@ -166,6 +166,15 @@ func (ao AdminOptionValidateOnly) supportsCreatePartitions() { func (ao AdminOptionValidateOnly) supportsAlterConfigs() { } +func (ao AdminOptionRequestTimeout) supportsCreateACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDescribeACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDeleteACLs() { +} + func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error { if !ao.isSet { return nil @@ -240,6 +249,30 @@ type DescribeConfigsAdminOption interface { apply(cOptions *C.rd_kafka_AdminOptions_t) error } +// CreateACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type CreateACLsAdminOption interface { + supportsCreateACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DescribeACLsAdminOption interface { + supportsDescribeACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DeleteACLsAdminOption interface { + supportsDeleteACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + // AdminOption is a generic type not to be used directly. // // See CreateTopicsAdminOption et.al. diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html index 8c495b3..d38ca2e 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html @@ -7,16 +7,16 @@
- + 00version.go - + adminapi.go - + adminoptions.go - + build_glibc_linux.go - + config.go - + consumer.go - + context.go - + error.go - + error_gen.go - + event.go - + generated_errors.go - + handle.go - + header.go - + kafka.go - + log.go - + message.go - + metadata.go - + misc.go - + + mockcluster.go + + offset.go - + producer.go - + testhelpers.go - + time.go @@ -1282,37 +1435,87 @@ possible complications with blocking Poll() calls.
const ( // ResourceUnknown - Unknown - ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN) + ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN) // ResourceAny - match any resource type (DescribeConfigs) - ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY) + ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY) // ResourceTopic - Topic - ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC) + ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC) // ResourceGroup - Group - ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP) + ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP) // ResourceBroker - Broker - ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER) + ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER) )
const ( // ConfigSourceUnknown is the default value - ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG) + ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG) // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic - ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG) + ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG) // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker - ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG) + ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG) // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster - ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG) + ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG) // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file) - ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG) + ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG) // ConfigSourceDefault is built-in default configuration for configs that have a default value - ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG) + ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG) +)+
const ( + // ResourcePatternTypeUnknown is a resource pattern type not known or not set. + ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) + // ResourcePatternTypeAny matches any resource, used for lookups. + ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY) + // ResourcePatternTypeMatch will perform pattern matching + ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH) + // ResourcePatternTypeLiteral matches a literal resource name + ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL) + // ResourcePatternTypePrefixed matches a prefixed resource name + ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED) +)+
const ( + // ACLOperationUnknown represents an unknown or unset operation + ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN) + // ACLOperationAny in a filter, matches any ACLOperation + ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY) + // ACLOperationAll represents all the operations + ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL) + // ACLOperationRead a read operation + ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ) + // ACLOperationWrite represents a write operation + ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE) + // ACLOperationCreate represents a create operation + ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE) + // ACLOperationDelete represents a delete operation + ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE) + // ACLOperationAlter represents an alter operation + ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER) + // ACLOperationDescribe represents a describe operation + ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE) + // ACLOperationClusterAction represents a cluster action operation + ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION) + // ACLOperationDescribeConfigs represents a describe configs operation + ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS) + // ACLOperationAlterConfigs represents an alter configs operation + ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS) + // ACLOperationIdempotentWrite represents an idempotent write operation + ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE) +)+
const ( + // ACLPermissionTypeUnknown represents an unknown ACLPermissionType + ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN) + // ACLPermissionTypeAny in a filter, matches any ACLPermissionType + ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY) + // ACLPermissionTypeDeny disallows access + ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY) + // ACLPermissionTypeAllow grants access + ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW) )
const ( // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support - TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) + TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) // TimestampCreateTime indicates timestamp set by producer (source time) - TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME) + TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME) // TimestampLogAppendTime indicates timestamp set set by broker (store time) - TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) + TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) )
const ( // PurgeInFlight purges messages in-flight to or from the broker. @@ -1320,75 +1523,253 @@ possible complications with blocking Poll() calls. // broker, making it impossible for the application to know if these // messages were successfully delivered or not. // Retrying these messages may lead to duplicates. - PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT) + PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT) // PurgeQueue Purge messages in internal queues. - PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE) + PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE) // PurgeNonBlocking Don't wait for background thread queue purging to finish. - PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING) + PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING) )
const ( // AlterOperationSet sets/overwrites the configuration setting. - AlterOperationSet = iota + AlterOperationSet = iota )
LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
-const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.8.2.tgz"
+ const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.9.2.tgz"
OffsetBeginning represents the earliest offset (logical)
-const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
+ const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
OffsetEnd represents the latest offset (logical)
-const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
+ const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
OffsetInvalid represents an invalid/unspecified offset
-const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
+ const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
OffsetStored represents a stored offset
-const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
+ const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
PartitionAny represents any partition (for partitioning), or unspecified value (for all other cases)
-const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
+ const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
func LibraryVersion() (int, string)+
func LibraryVersion() (int, string)
LibraryVersion returns the underlying librdkafka library version as a (version_int, version_str) tuple.
func WriteErrorCodes(f *os.File)+
func WriteErrorCodes(f *os.File)
WriteErrorCodes writes Go error code constants to file from the librdkafka error codes. This function is not intended for public use.
++ ACLBinding specifies the operation and permission type for a specific principal +over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +
+type ACLBinding struct { + Type ResourceType // The resource type. + // The resource name, which depends on the resource type. + // For ResourceBroker the resource name is the broker id. + Name string + ResourcePatternType ResourcePatternType // The resource pattern, relative to the name. + Principal string // The principal this ACLBinding refers to. + Host string // The host that the call is allowed to come from. + Operation ACLOperation // The operation/s specified by this binding. + PermissionType ACLPermissionType // The permission type for the specified operation. +} ++
+ ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +
+type ACLBindingFilter = ACLBinding+
+ ACLBindingFilters is a slice of ACLBindingFilter that also implements +the sort interface +
+type ACLBindingFilters []ACLBindingFilter+
+ ACLBindings is a slice of ACLBinding that also implements +the sort interface +
+type ACLBindings []ACLBinding+
func (a ACLBindings) Len() int+
func (a ACLBindings) Less(i, j int) bool+
func (a ACLBindings) Swap(i, j int)+
+ ACLOperation enumerates the different types of ACL operation. +
+type ACLOperation int+
func ACLOperationFromString(aclOperationString string) (ACLOperation, error)+
+ ACLOperationFromString translates a ACL operation name to +a ACLOperation value. +
+func (o ACLOperation) String() string+
+ String returns the human-readable representation of an ACLOperation +
++ ACLPermissionType enumerates the different types of ACL permission types. +
+type ACLPermissionType int+
func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error)+
+ ACLPermissionTypeFromString translates a ACL permission type name to +a ACLPermissionType value. +
+func (o ACLPermissionType) String() string+
+ String returns the human-readable representation of an ACLPermissionType +
func NewAdminClient(conf *ConfigMap) (*AdminClient, error)+
func NewAdminClient(conf *ConfigMap) (*AdminClient, error)
NewAdminClient creats a new AdminClient instance with a new underlying client instance
func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error)+
func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error)
NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. The AdminClient will use the same configuration and connections as the parent instance.
func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error)+
func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error)
NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. The AdminClient will use the same configuration and connections as the parent instance.
func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)+
func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)
AlterConfigs alters/updates cluster resource configuration.
@@ -1478,7 +1859,7 @@ resource requests must be sent to the broker specified in the resource.func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error)+
func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error)
ClusterID returns the cluster ID as reported in broker metadata.
@@ -1512,14 +1893,14 @@ cancelling the context will block until the C function call returns.func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)+
func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)
ControllerID returns the broker ID of the current controller as reported in broker metadata. @@ -1532,29 +1913,53 @@ cancelling the context will block until the C function call returns.
Requires broker version >= 0.10.0.
+func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error)+
+ CreateACLs creates one or more ACL bindings. +
++ Parameters: +
+* `ctx` - context with the maximum amount of time to block, or nil for indefinite. +* `aclBindings` - A slice of ACL binding specifications to create. +* `options` - Create ACLs options ++
+ Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +plus an error that is not nil for client level errors +
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error)+
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error)
CreatePartitions creates additional partitions for topics.
func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)+
func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)
CreateTopics creates topics in cluster.
@@ -1568,16 +1973,46 @@ make sure to check the result for topic-specific errors.Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API.
+func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error)+
+ DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +
++ Parameters: +
+* `ctx` - context with the maximum amount of time to block, or nil for indefinite. +* `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete. + string attributes match exact values or any string if set to empty string. + Enum attributes match exact values or any value if ending with `Any`. + If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with: + - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name + - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name + - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +* `options` - Delete ACLs options ++
+ Returns a slice of ACLBinding for each filter when the operation was successful +plus an error that is not `nil` for client level errors +
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)+
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)
DeleteTopics deletes a batch of topics.
@@ -1591,16 +2026,46 @@ topic metadata and configuration may continue to return information about deleteRequires broker version >= 0.10.1.0
+func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error)+
+ DescribeACLs matches ACL bindings by filter. +
++ Parameters: +
+* `ctx` - context with the maximum amount of time to block, or nil for indefinite. +* `aclBindingFilter` - A filter with attributes that must match. + string attributes match exact values or any string if set to empty string. + Enum attributes match exact values or any value if ending with `Any`. + If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with: + - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name + - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name + - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +* `options` - Describe ACLs options ++
+ Returns a slice of ACLBindings when the operation was successful +plus an error that is not `nil` for client level errors +
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error)+
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error)
DescribeConfigs retrieves configuration for cluster resources.
@@ -1633,14 +2098,14 @@ in the resource.func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)+
func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
GetMetadata queries broker for cluster and topic metadata. If topic is non-nil only information about that topic is returned, else if @@ -1650,14 +2115,14 @@ GetMetadata is equivalent to listTopics, describeTopics and describeCluster in t
func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error+
func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
SetOAuthBearerToken sets the the data to be transmitted to a broker during SASL/OAUTHBEARER authentication. It will return nil @@ -1675,14 +2140,14 @@ authentication mechanism.
func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error+
func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error
SetOAuthBearerTokenFailure sets the error message describing why token retrieval/setting failed; it also schedules a new token refresh event for 10 @@ -1694,20 +2159,20 @@ authentication mechanism.
func (a *AdminClient) String() string+
func (a *AdminClient) String() string
String returns a human readable name for an AdminClient instance
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout)+
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout)
SetAdminOperationTimeout sets the broker's operation timeout, such as the timeout for CreateTopics to complete the creation of topics on the controller @@ -1782,7 +2247,7 @@ in cluster.
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout)+
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout)
SetAdminRequestTimeout sets the overall request timeout, including broker lookup, request transmission, operation time on broker, and response. @@ -1825,7 +2290,7 @@ lookup, request transmission, operation time on broker, and response.
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly)+
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly)
SetAdminValidateOnly tells the broker to only validate the request, without performing the requested operation (create topics, etc). @@ -1868,7 +2333,7 @@ without performing the requested operation (create topics, etc).
type AlterOperation int+
type AlterOperation int
func (o AlterOperation) String() string+
func (o AlterOperation) String() string
String returns the human-readable representation of an AlterOperation
func (e AssignedPartitions) String() string+
func (e AssignedPartitions) String() string
type BrokerMetadata struct { - ID int32 - Host string - Port int + ID int32 + Host string + Port int }
type ConfigEntry struct { // Name of configuration entry, e.g., topic configuration property name. - Name string + Name string // Value of configuration entry. - Value string + Value string // Operation to perform on the entry. Operation AlterOperation }
func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry+
func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry
StringMapToConfigEntries creates a new map of ConfigEntry objects from the provided string map. The AlterOperation is set on each created entry.
func (c ConfigEntry) String() string+
func (c ConfigEntry) String() string
String returns a human-readable representation of a ConfigEntry.
type ConfigEntryResult struct { // Name of configuration entry, e.g., topic configuration property name. - Name string + Name string // Value of configuration entry. - Value string + Value string // Source indicates the configuration source. Source ConfigSource // IsReadOnly indicates whether the configuration entry can be altered. - IsReadOnly bool + IsReadOnly bool // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset. - IsSensitive bool + IsSensitive bool // IsSynonym indicates whether the configuration entry is a synonym for another configuration property. - IsSynonym bool + IsSynonym bool // Synonyms contains a map of configuration entries that are synonyms to this configuration entry. - Synonyms map[string]ConfigEntryResult + Synonyms map[string]ConfigEntryResult }
func (c ConfigEntryResult) String() string+
func (c ConfigEntryResult) String() string
String returns a human-readable representation of a ConfigEntryResult.
type ConfigMap map[string]ConfigValue+
type ConfigMap map[string]ConfigValue
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error)+
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error)
Get finds the given key in the ConfigMap and returns its value. If the key is not found `defval` is returned. @@ -2090,28 +2555,28 @@ an ErrInvalidArg error is returned.
func (m ConfigMap) Set(kv string) error+
func (m ConfigMap) Set(kv string) error
Set implements flag.Set (command line argument parser) as a convenience for `-X key=value` config.
func (m ConfigMap) SetKey(key string, value ConfigValue) error+
func (m ConfigMap) SetKey(key string, value ConfigValue) error
SetKey sets configuration property key to value.
@@ -2121,7 +2586,7 @@ set on the "default.topic.config" sub-map, this use is deprecated.func (c ConfigResource) String() string+
func (c ConfigResource) String() string
String returns a human-readable representation of a ConfigResource
func (c ConfigResourceResult) String() string+
func (c ConfigResourceResult) String() string
String returns a human-readable representation of a ConfigResourceResult.
ConfigSource represents an Apache Kafka config source
-type ConfigSource int+
type ConfigSource int
func (t ConfigSource) String() string+
func (t ConfigSource) String() string
String returns the human-readable representation of a ConfigSource type
type ConfigValue interface{}
func NewConsumer(conf *ConfigMap) (*Consumer, error)+
func NewConsumer(conf *ConfigMap) (*Consumer, error)
NewConsumer creates a new high-level Consumer instance.
@@ -2288,14 +2753,14 @@ event or message may be outdated.func (c *Consumer) Assign(partitions []TopicPartition) (err error)+
func (c *Consumer) Assign(partitions []TopicPartition) (err error)
Assign an atomic set of partitions to consume.
@@ -2311,27 +2776,27 @@ use the committed offset as a start position, with a fallback tofunc (c *Consumer) Assignment() (partitions []TopicPartition, err error)+
func (c *Consumer) Assignment() (partitions []TopicPartition, err error)
Assignment returns the current partition assignments
func (c *Consumer) AssignmentLost() bool+
func (c *Consumer) AssignmentLost() bool
AssignmentLost returns true if current partition assignment has been lost. This method is only applicable for use with a subscribing consumer when @@ -2341,28 +2806,28 @@ group and therefore commiting offsets, for example, may fail.
func (c *Consumer) Close() (err error)+
func (c *Consumer) Close() (err error)
Close Consumer instance. The object is no longer usable after this call.
func (c *Consumer) Commit() ([]TopicPartition, error)+
func (c *Consumer) Commit() ([]TopicPartition, error)
Commit offsets for currently assigned partitions This is a blocking call. @@ -2370,14 +2835,14 @@ Returns the committed offsets on success.
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)+
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)
CommitMessage commits offset based on the provided message. This is a blocking call. @@ -2385,14 +2850,14 @@ Returns the committed offsets on success.
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)+
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)
CommitOffsets commits the provided list of offsets This is a blocking call. @@ -2400,20 +2865,20 @@ Returns the committed offsets on success.
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)+
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
Committed retrieves committed offsets for the given set of partitions
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error)+
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error)
GetConsumerGroupMetadata returns the consumer's current group metadata. This object should be passed to the transactional producer's @@ -2441,14 +2906,14 @@ SendOffsetsToTransaction() API.
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)+
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
GetMetadata queries broker for cluster and topic metadata. If topic is non-nil only information about that topic is returned, else if @@ -2458,14 +2923,14 @@ GetMetadata is equivalent to listTopics, describeTopics and describeCluster in t
func (c *Consumer) GetRebalanceProtocol() string+
func (c *Consumer) GetRebalanceProtocol() string
GetRebalanceProtocol returns the current consumer group rebalance protocol, which is either "EAGER" or "COOPERATIVE". @@ -2475,14 +2940,14 @@ Should typically only be called during rebalancing.
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)+
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)
GetWatermarkOffsets returns the cached low and high offsets for the given topic and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets. @@ -2491,14 +2956,14 @@ OffsetInvalid will be returned if there is no cached offset for either value.
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)+
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)
IncrementalAssign adds the specified partitions to the current set of partitions to consume. @@ -2515,14 +2980,14 @@ use the committed offset as a start position, with a fallback to
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)+
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)
IncrementalUnassign removes the specified partitions from the current set of partitions to consume. @@ -2535,7 +3000,7 @@ partitions to consume.
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)+
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
OffsetsForTimes looks up offsets by timestamp for the given partitions.
@@ -2579,14 +3044,14 @@ Per-partition errors may be returned in the `.Error` field.func (c *Consumer) Pause(partitions []TopicPartition) (err error)+
func (c *Consumer) Pause(partitions []TopicPartition) (err error)
Pause consumption for the provided list of partitions
@@ -2597,14 +3062,14 @@ this call, set `go.events.channel.size` accordingly.func (c *Consumer) Poll(timeoutMs int) (event Event)+
func (c *Consumer) Poll(timeoutMs int) (event Event)
Poll the consumer for messages or events.
@@ -2621,14 +3086,14 @@ this call, set `go.events.channel.size` accordingly.func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error)+
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error)
Position returns the current consume position for the given partitions. Typical use is to call Assignment() to get the partition list @@ -2639,27 +3104,27 @@ i.e., the offset of the last message seen by the application + 1.
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)+
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition.
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error)+
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error)
ReadMessage polls the consumer for a message.
@@ -2686,27 +3151,27 @@ msg.TopicPartition provides partition-specific information (such as topic, partifunc (c *Consumer) Resume(partitions []TopicPartition) (err error)+
func (c *Consumer) Resume(partitions []TopicPartition) (err error)
Resume consumption for the provided list of partitions
func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error+
func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error
Seek seeks the given topic partitions using the offset from the TopicPartition.
@@ -2728,14 +3193,14 @@ a starting offset for each partition.func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error+
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
SetOAuthBearerToken sets the the data to be transmitted to a broker during SASL/OAUTHBEARER authentication. It will return nil @@ -2753,14 +3218,14 @@ authentication mechanism.
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error+
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error
SetOAuthBearerTokenFailure sets the error message describing why token retrieval/setting failed; it also schedules a new token refresh event for 10 @@ -2769,17 +3234,31 @@ success, otherwise an error if: 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; 2) SASL/OAUTHBEARER is supported but is not configured as the client's authentication mechanism. +
+func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error)+
+ StoreMessage stores offset based on the provided message. +This is a convenience method that uses StoreOffsets to do the actual work.
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error)+
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error)
StoreOffsets stores the provided list of offsets that will be committed to the offset store according to `auto.commit.interval.ms` or manual @@ -2792,87 +3271,87 @@ specific errors via its `.Error` member.
func (c *Consumer) String() string+
func (c *Consumer) String() string
Strings returns a human readable name for a Consumer instance
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error+
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
Subscribe to a single topic This replaces the current subscription
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)+
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)
SubscribeTopics subscribes to the provided list of topics. This replaces the current subscription.
func (c *Consumer) Subscription() (topics []string, err error)+
func (c *Consumer) Subscription() (topics []string, err error)
Subscription returns the current subscription as set by Subscribe()
func (c *Consumer) Unassign() (err error)+
func (c *Consumer) Unassign() (err error)
Unassign the current set of partitions to consume.
func (c *Consumer) Unsubscribe() (err error)+
func (c *Consumer) Unsubscribe() (err error)
Unsubscribe from the current subscription, if any.
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)+
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)
NewTestConsumerGroupMetadata creates a new consumer group metadata instance mainly for testing use. Use GetConsumerGroupMetadata() to retrieve the real metadata.
++ CreateACLResult provides create ACL error information. +
+type CreateACLResult struct {
+ // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+ Error Error
+}
+
+ + CreateACLsAdminOption - see setter. +
++ See SetAdminRequestTimeout +
+type CreateACLsAdminOption interface {
+ // contains filtered or unexported methods
+}
type CreateTopicsAdminOption interface {
// contains filtered or unexported methods
}
+ + DeleteACLsAdminOption - see setter. +
++ See SetAdminRequestTimeout +
+type DeleteACLsAdminOption interface {
+ // contains filtered or unexported methods
+}
+ + DeleteACLsResult provides delete ACLs result or error information. +
+type DeleteACLsResult = DescribeACLsResult
type DeleteTopicsAdminOption interface {
// contains filtered or unexported methods
}
+ + DescribeACLsAdminOption - see setter. +
++ See SetAdminRequestTimeout +
+type DescribeACLsAdminOption interface {
+ // contains filtered or unexported methods
+}
+ + DescribeACLsResult provides describe ACLs result or error information. +
+type DescribeACLsResult struct { + // Slice of ACL bindings matching the provided filter + ACLBindings ACLBindings + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} +
func NewError(code ErrorCode, str string, fatal bool) (err Error)+
func NewError(code ErrorCode, str string, fatal bool) (err Error)
NewError creates a new Error.
func (e Error) Error() string+
func (e Error) Error() string
Error returns a human readable representation of an Error Same as Error.String()
func (e Error) IsFatal() bool+
func (e Error) IsFatal() bool
IsFatal returns true if the error is a fatal error. A fatal error indicates the client instance is no longer operable and @@ -3047,14 +3629,14 @@ idempotent producer errors.
func (e Error) IsRetriable() bool+
func (e Error) IsRetriable() bool
IsRetriable returns true if the operation that caused this error may be retried. @@ -3062,27 +3644,27 @@ This flag is currently only set by the Transactional producer API.
func (e Error) String() string+
func (e Error) String() string
String returns a human readable representation of an Error
func (e Error) TxnRequiresAbort() bool+
func (e Error) TxnRequiresAbort() bool
TxnRequiresAbort returns true if the error is an abortable transaction error that requires the application to abort the current transaction with @@ -3092,7 +3674,7 @@ This flag is only set by the Transactional producer API.
ErrorCode is the integer representation of local and broker error codes
-type ErrorCode int+
type ErrorCode int
const ( // ErrBadMsg Local: Bad message format - ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG) + ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG) // ErrBadCompression Local: Invalid compressed data - ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION) + ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION) // ErrDestroy Local: Broker handle destroyed - ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY) + ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY) // ErrFail Local: Communication failure with broker - ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL) + ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL) // ErrTransport Local: Broker transport failure - ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT) + ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT) // ErrCritSysResource Local: Critical system resource failure - ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE) + ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE) // ErrResolve Local: Host resolution failure - ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE) + ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE) // ErrMsgTimedOut Local: Message timed out - ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) + ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) // ErrPartitionEOF Broker: No more messages - ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF) + ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF) // ErrUnknownPartition Local: Unknown partition - ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) // ErrFs Local: File or filesystem error - ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS) + ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS) // ErrUnknownTopic Local: Unknown topic - ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) // ErrAllBrokersDown Local: All broker connections are down - ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) + ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) // ErrInvalidArg Local: Invalid argument or configuration - ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG) + ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG) // ErrTimedOut Local: Timed out - ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT) + ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT) // ErrQueueFull Local: Queue full - ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL) + ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL) // ErrIsrInsuff Local: ISR count insufficient - ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF) + ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF) // ErrNodeUpdate Local: Broker node update - ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE) + ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE) // ErrSsl Local: SSL error - ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL) + ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL) // ErrWaitCoord Local: Waiting for coordinator - ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD) + ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD) // ErrUnknownGroup Local: Unknown group - ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP) + ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP) // ErrInProgress Local: Operation in progress - ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS) + ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS) // ErrPrevInProgress Local: Previous operation in progress - ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS) + ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS) // ErrExistingSubscription Local: Existing subscription - ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION) + ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION) // ErrAssignPartitions Local: Assign partitions - ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) + ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) // ErrRevokePartitions Local: Revoke partitions - ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) + ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) // ErrConflict Local: Conflicting use - ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT) + ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT) // ErrState Local: Erroneous state - ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE) + ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE) // ErrUnknownProtocol Local: Unknown protocol - ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL) + ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL) // ErrNotImplemented Local: Not implemented - ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) + ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) // ErrAuthentication Local: Authentication failure - ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION) + ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION) // ErrNoOffset Local: No offset stored - ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET) + ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET) // ErrOutdated Local: Outdated - ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED) + ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED) // ErrTimedOutQueue Local: Timed out in queue - ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) + ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) // ErrUnsupportedFeature Local: Required feature not supported by broker - ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) + ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) // ErrWaitCache Local: Awaiting cache update - ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE) + ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE) // ErrIntr Local: Operation interrupted - ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR) + ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR) // ErrKeySerialization Local: Key serialization error - ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION) + ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION) // ErrValueSerialization Local: Value serialization error - ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION) + ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION) // ErrKeyDeserialization Local: Key deserialization error - ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION) + ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION) // ErrValueDeserialization Local: Value deserialization error - ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION) + ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION) // ErrPartial Local: Partial response - ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL) + ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL) // ErrReadOnly Local: Read-only object - ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY) + ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY) // ErrNoent Local: No such entry - ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT) + ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT) // ErrUnderflow Local: Read underflow - ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW) + ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW) // ErrInvalidType Local: Invalid type - ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE) + ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE) // ErrRetry Local: Retry operation - ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY) + ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY) // ErrPurgeQueue Local: Purged in queue - ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE) + ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE) // ErrPurgeInflight Local: Purged in flight - ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT) + ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT) // ErrFatal Local: Fatal error - ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL) + ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL) // ErrInconsistent Local: Inconsistent state - ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT) + ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT) // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding - ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) + ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded - ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) + ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) // ErrUnknownBroker Local: Unknown broker - ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER) + ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER) // ErrNotConfigured Local: Functionality not configured - ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED) + ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED) // ErrFenced Local: This instance has been fenced by a newer instance - ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED) + ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED) // ErrApplication Local: Application generated error - ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION) + ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION) // ErrAssignmentLost Local: Group partition assignment lost - ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST) + ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST) // ErrNoop Local: No operation performed - ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP) + ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP) // ErrAutoOffsetReset Local: No offset to automatically reset to - ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) + ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) // ErrUnknown Unknown broker error - ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) + ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) // ErrNoError Success - ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR) + ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR) // ErrOffsetOutOfRange Broker: Offset out of range - ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE) + ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE) // ErrInvalidMsg Broker: Invalid message - ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG) + ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG) // ErrUnknownTopicOrPart Broker: Unknown topic or partition - ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) // ErrInvalidMsgSize Broker: Invalid message size - ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE) + ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE) // ErrLeaderNotAvailable Broker: Leader not available - ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) + ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) // ErrNotLeaderForPartition Broker: Not leader for partition - ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) + ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) // ErrRequestTimedOut Broker: Request timed out - ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT) + ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT) // ErrBrokerNotAvailable Broker: Broker not available - ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE) + ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE) // ErrReplicaNotAvailable Broker: Replica not available - ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE) + ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE) // ErrMsgSizeTooLarge Broker: Message size too large - ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) + ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode - ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH) + ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH) // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large - ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE) + ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE) // ErrNetworkException Broker: Broker disconnected before response received - ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION) + ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION) // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress - ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS) + ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS) // ErrCoordinatorNotAvailable Broker: Coordinator not available - ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE) + ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE) // ErrNotCoordinator Broker: Not coordinator - ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR) + ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR) // ErrTopicException Broker: Invalid topic - ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION) + ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION) // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size - ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE) + ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE) // ErrNotEnoughReplicas Broker: Not enough in-sync replicas - ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS) + ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS) // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas - ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND) + ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND) // ErrInvalidRequiredAcks Broker: Invalid required acks value - ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS) + ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS) // ErrIllegalGeneration Broker: Specified group generation id is not valid - ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) + ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol - ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL) + ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL) // ErrInvalidGroupID Broker: Invalid group.id - ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID) + ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID) // ErrUnknownMemberID Broker: Unknown member - ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) + ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) // ErrInvalidSessionTimeout Broker: Invalid session timeout - ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT) + ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT) // ErrRebalanceInProgress Broker: Group rebalance in progress - ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS) + ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS) // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid - ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE) + ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE) // ErrTopicAuthorizationFailed Broker: Topic authorization failed - ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) + ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) // ErrGroupAuthorizationFailed Broker: Group authorization failed - ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED) + ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED) // ErrClusterAuthorizationFailed Broker: Cluster authorization failed - ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED) + ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED) // ErrInvalidTimestamp Broker: Invalid timestamp - ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP) + ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP) // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism - ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM) + ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM) // ErrIllegalSaslState Broker: Request not valid in current SASL state - ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE) + ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE) // ErrUnsupportedVersion Broker: API version not supported - ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) + ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) // ErrTopicAlreadyExists Broker: Topic already exists - ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) + ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) // ErrInvalidPartitions Broker: Invalid number of partitions - ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS) + ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS) // ErrInvalidReplicationFactor Broker: Invalid replication factor - ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR) + ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR) // ErrInvalidReplicaAssignment Broker: Invalid replica assignment - ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT) + ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT) // ErrInvalidConfig Broker: Configuration is invalid - ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG) + ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG) // ErrNotController Broker: Not controller for cluster - ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER) + ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER) // ErrInvalidRequest Broker: Invalid request - ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST) + ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST) // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request - ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT) + ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT) // ErrPolicyViolation Broker: Policy violation - ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION) + ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION) // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number - ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER) + ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER) // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number - ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER) + ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER) // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch - ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) + ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state - ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE) + ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE) // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id - ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING) + ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING) // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms - ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT) + ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT) // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing - ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS) + ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS) // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer - ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED) + ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED) // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed - ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED) + ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED) // ErrSecurityDisabled Broker: Security features are disabled - ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED) + ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED) // ErrOperationNotAttempted Broker: Operation not attempted - ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED) + ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED) // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk - ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR) + ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR) // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config - ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND) + ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND) // ErrSaslAuthenticationFailed Broker: SASL Authentication failed - ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED) + ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED) // ErrUnknownProducerID Broker: Unknown Producer Id - ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) + ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) // ErrReassignmentInProgress Broker: Partition reassignment is in progress - ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS) + ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS) // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled - ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED) + ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED) // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server - ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND) + ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND) // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer - ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH) + ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH) // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection - ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED) + ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED) // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed - ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED) + ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED) // ErrDelegationTokenExpired Broker: Delegation Token is expired - ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED) + ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED) // ErrInvalidPrincipalType Broker: Supplied principalType is not supported - ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE) + ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE) // ErrNonEmptyGroup Broker: The group is not empty - ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP) + ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP) // ErrGroupIDNotFound Broker: The group id does not exist - ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND) + ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND) // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found - ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND) + ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND) // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid - ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH) + ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH) // ErrListenerNotFound Broker: No matching listener - ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND) + ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND) // ErrTopicDeletionDisabled Broker: Topic deletion is disabled - ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED) + ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED) // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch - ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH) + ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH) // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch - ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) + ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) // ErrUnsupportedCompressionType Broker: Unsupported compression type - ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE) + ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE) // ErrStaleBrokerEpoch Broker: Broker epoch has changed - ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH) + ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH) // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up - ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) + ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) // ErrMemberIDRequired Broker: Group member needs a valid member ID - ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) + ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available - ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE) + ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE) // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size - ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED) + ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED) // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id - ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) + ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available - ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE) + ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE) // ErrElectionNotNeeded Broker: Leader election not needed for topic partition - ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED) + ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED) // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress - ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS) + ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS) // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it - ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC) + ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC) // ErrInvalidRecord Broker: Broker failed to validate record - ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD) + ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD) // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared - ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) + ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded - ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED) + ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED) // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one - ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED) + ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED) // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist - ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND) + ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND) // ErrDuplicateResource Broker: Request illegally referred to the same resource twice - ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE) + ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE) // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability - ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL) + ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL) // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters - ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET) + ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET) // ErrInvalidUpdateVersion Broker: Invalid update version - ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION) + ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION) // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error - ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED) + ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED) // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding - ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE) + ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE) )
func (c ErrorCode) String() string+
func (c ErrorCode) String() string
String returns a human readable representation of an error code
type Event interface {
// String returns a human-readable representation of the event
- String() string
+ String() string
}
type Header struct { - Key string // Header name (utf-8 string) - Value []byte // Header value (nil, empty, or binary) + Key string // Header name (utf-8 string) + Value []byte // Header value (nil, empty, or binary) }
func (h Header) String() string+
func (h Header) String() string
String returns the Header Key and data in a human representable possibly truncated form suitable for displaying to the user.
type LogEvent struct { - Name string // Name of client instance - Tag string // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD") - Message string // Log message - Level int // Log syslog level, lower is more critical. - Timestamp time.Time // Log timestamp + Name string // Name of client instance + Tag string // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD") + Message string // Log message + Level int // Log syslog level, lower is more critical. + Timestamp time.Time // Log timestamp }
func (logEvent LogEvent) String() string+
func (logEvent LogEvent) String() string
type Message struct { TopicPartition TopicPartition - Value []byte - Key []byte - Timestamp time.Time + Value []byte + Key []byte + Timestamp time.Time TimestampType TimestampType Opaque interface{} Headers []Header @@ -3586,21 +4168,21 @@ suitable for displaying to the user.
func (m *Message) String() string+
func (m *Message) String() string
String returns a human readable representation of a Message. Key and payload are not represented.
type Metadata struct { Brokers []BrokerMetadata - Topics map[string]TopicMetadata + Topics map[string]TopicMetadata OriginatingBroker BrokerMetadata }+
+ MockCluster represents a Kafka mock cluster instance which can be used +for testing. +
+type MockCluster struct {
+ // contains filtered or unexported fields
+}
+
+ func NewMockCluster(brokerCount int) (*MockCluster, error)+
+ NewMockCluster provides a mock Kafka cluster with a configurable +number of brokers that support a reasonable subset of Kafka protocol +operations, error injection, etc. +
++ Mock clusters provide localhost listeners that can be used as the bootstrap +servers by multiple Kafka client instances. +
++ Currently supported functionality: +- Producer +- Idempotent Producer +- Transactional Producer +- Low-level consumer +- High-level balanced consumer groups with offset commits +- Topic Metadata and auto creation +
++ Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. +
+func (mc *MockCluster) BootstrapServers() string+
+ BootstrapServers returns the bootstrap.servers property for this MockCluster +
+func (mc *MockCluster) Close()+
+ Close and destroy the MockCluster +
type OAuthBearerTokenRefresh struct {
// Config is the value of the sasl.oauthbearer.config property
- Config string
+ Config string
}
func (o OAuthBearerTokenRefresh) String() string+
func (o OAuthBearerTokenRefresh) String() string
Offset type (int64) with support for canonical names
-type Offset int64+
type Offset int64
func NewOffset(offset interface{}) (Offset, error)+
func NewOffset(offset interface{}) (Offset, error)
NewOffset creates a new Offset using the provided logical string, or an absolute int64 offset value. @@ -3706,7 +4362,7 @@ Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "
func (o *Offset) Set(offset interface{}) error+
func (o *Offset) Set(offset interface{}) error
Set offset value, see NewOffset()
func (o Offset) String() string+
func (o Offset) String() string
type OffsetsCommitted struct { - Error error + Error error Offsets []TopicPartition }
func (o OffsetsCommitted) String() string+
func (o OffsetsCommitted) String() string
type PartitionEOF TopicPartition
func (p PartitionEOF) String() string+
func (p PartitionEOF) String() string
type PartitionMetadata struct { - ID int32 + ID int32 Error Error - Leader int32 - Replicas []int32 - Isrs []int32 + Leader int32 + Replicas []int32 + Isrs []int32 }
type PartitionsSpecification struct { // Topic to create more partitions for. - Topic string + Topic string // New partition count for topic, must be higher than current partition count. - IncreaseTo int + IncreaseTo int // (Optional) Explicit replica assignment. The outer array is // indexed by the new partition index (i.e., 0 for the first added // partition), while the inner per-partition array // contains the replica broker ids. The first broker in each // broker id list will be the preferred replica. - ReplicaAssignment [][]int32 + ReplicaAssignment [][]int32 }
func NewProducer(conf *ConfigMap) (*Producer, error)+
func NewProducer(conf *ConfigMap) (*Producer, error)
NewProducer creates a new high-level Producer instance.
@@ -3888,14 +4544,14 @@ go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-providefunc (p *Producer) AbortTransaction(ctx context.Context) error+
func (p *Producer) AbortTransaction(ctx context.Context) error
AbortTransaction aborts the ongoing transaction.
@@ -3933,14 +4589,14 @@ has been raised by calling `err.(kafka.Error).IsFatal()`.func (p *Producer) BeginTransaction() error+
func (p *Producer) BeginTransaction() error
BeginTransaction starts a new transaction.
@@ -3970,7 +4626,7 @@ transaction, will fail.func (p *Producer) CommitTransaction(ctx context.Context) error+
func (p *Producer) CommitTransaction(ctx context.Context) error
CommitTransaction commits the current transaction.
@@ -4034,7 +4690,7 @@ respectively.func (p *Producer) Flush(timeoutMs int) int+
func (p *Producer) Flush(timeoutMs int) int
Flush and wait for outstanding messages and requests to complete delivery. Includes messages on ProduceChannel. @@ -4063,27 +4719,27 @@ Returns the number of outstanding events still un-flushed.
func (p *Producer) GetFatalError() error+
func (p *Producer) GetFatalError() error
GetFatalError returns an Error object if the client instance has raised a fatal error, else nil.
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)+
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
GetMetadata queries broker for cluster and topic metadata. If topic is non-nil only information about that topic is returned, else if @@ -4093,14 +4749,14 @@ GetMetadata is equivalent to listTopics, describeTopics and describeCluster in t
func (p *Producer) InitTransactions(ctx context.Context) error+
func (p *Producer) InitTransactions(ctx context.Context) error
InitTransactions Initializes transactions for the producer instance.
@@ -4148,14 +4804,14 @@ error has been raised by calling `err.(kafka.Error).IsFatal()`.func (p *Producer) Len() int+
func (p *Producer) Len() int
Len returns the number of messages and requests waiting to be transmitted to the broker as well as delivery reports queued for the application. @@ -4163,7 +4819,7 @@ Includes messages on ProduceChannel.
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)+
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
OffsetsForTimes looks up offsets by timestamp for the given partitions.
@@ -4207,14 +4863,14 @@ Per-partition errors may be returned in the `.Error` field.func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error+
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error
Produce single message. This is an asynchronous call that enqueues the message on the internal @@ -4229,7 +4885,7 @@ Returns an error if message could not be enqueued.
func (p *Producer) Purge(flags int) error+
func (p *Producer) Purge(flags int) error
Purge messages currently handled by this producer instance.
@@ -4283,28 +4939,28 @@ queues are purged.func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)+
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
QueryWatermarkOffsets returns the broker's low and high offsets for the given topic and partition.
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error+
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error
SendOffsetsToTransaction sends a list of topic partition offsets to the consumer group coordinator for `consumerMetadata`, and marks the offsets @@ -4352,14 +5008,14 @@ respectively.
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error+
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
SetOAuthBearerToken sets the the data to be transmitted to a broker during SASL/OAUTHBEARER authentication. It will return nil @@ -4377,14 +5033,14 @@ authentication mechanism.
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error+
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error
SetOAuthBearerTokenFailure sets the error message describing why token retrieval/setting failed; it also schedules a new token refresh event for 10 @@ -4396,34 +5052,34 @@ authentication mechanism.
func (p *Producer) String() string+
func (p *Producer) String() string
String returns a human readable name for a Producer instance
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode+
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode
TestFatalError triggers a fatal error in the underlying client. This is to be used strictly for testing purposes.
type RebalanceCb func(*Consumer, Event) error+
type RebalanceCb func(*Consumer, Event) error+
+ ResourcePatternType enumerates the different types of Kafka resource patterns. +
+type ResourcePatternType int+
func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error)+
+ ResourcePatternTypeFromString translates a resource pattern type name to +a ResourcePatternType value. +
+func (t ResourcePatternType) String() string+
+ String returns the human-readable representation of a ResourcePatternType +
ResourceType represents an Apache Kafka resource type
-type ResourceType int+
type ResourceType int
func ResourceTypeFromString(typeString string) (ResourceType, error)+
func ResourceTypeFromString(typeString string) (ResourceType, error)
ResourceTypeFromString translates a resource type name/string to a ResourceType value.
func (t ResourceType) String() string+
func (t ResourceType) String() string
String returns the human-readable representation of a ResourceType
func (e RevokedPartitions) String() string+
func (e RevokedPartitions) String() string
func (e Stats) String() string+
func (e Stats) String() string
TimestampType is a the Message timestamp type or source
-type TimestampType int+
type TimestampType int
func (t TimestampType) String() string+
func (t TimestampType) String() string
type TopicMetadata struct { - Topic string + Topic string Partitions []PartitionMetadata Error Error }
type TopicPartition struct { - Topic *string - Partition int32 + Topic *string + Partition int32 Offset Offset - Metadata *string - Error error + Metadata *string + Error error }
func (p TopicPartition) String() string+
func (p TopicPartition) String() string
type TopicPartitions []TopicPartition
func (tps TopicPartitions) Len() int+
func (tps TopicPartitions) Len() int
func (tps TopicPartitions) Less(i, j int) bool+
func (tps TopicPartitions) Less(i, j int) bool
func (tps TopicPartitions) Swap(i, j int)+
func (tps TopicPartitions) Swap(i, j int)
type TopicResult struct { // Topic name - Topic string + Topic string // Error, if any, of result. Check with `Error.Code() != ErrNoError`. Error Error }
func (t TopicResult) String() string+
func (t TopicResult) String() string
String returns a human-readable representation of a TopicResult.
type TopicSpecification struct { // Topic name to create. - Topic string + Topic string // Number of partitions in topic. - NumPartitions int + NumPartitions int // Default replication factor for the topic's partitions, or zero // if an explicit ReplicaAssignment is set. - ReplicationFactor int + ReplicationFactor int // (Optional) Explicit replica assignment. The outer array is // indexed by the partition number, while the inner per-partition array // contains the replica broker ids. The first broker in each // broker id list will be the preferred replica. - ReplicaAssignment [][]int32 + ReplicaAssignment [][]int32 // Topic configuration. - Config map[string]string + Config map[string]string }diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go similarity index 52% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin.go rename to vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go index 5b6812f..96ba817 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go @@ -6,8 +6,8 @@ package kafka // #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB -// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin.a -lm -lsasl2 -ldl -lpthread +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_amd64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static darwin from librdkafka-static-bundle-v1.8.2.tgz" +const LibrdkafkaLinkInfo = "static darwin_amd64 from librdkafka-static-bundle-v1.9.2.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go new file mode 100644 index 0000000..d4d35c9 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go @@ -0,0 +1,13 @@ +// +build !dynamic + + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin_arm64.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static darwin_arm64 from librdkafka-static-bundle-v1.9.2.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go index a69638d..c30a134 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.8.2.tgz" +const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.9.2.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go index ed2bee5..44569e0 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.8.2.tgz" +const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.9.2.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go index 1c15c55..0727887 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.8.2.tgz" +const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.9.2.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go index f0c8ecf..a6dfb8d 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2016 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + import ( "fmt" "reflect" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go index ef48330..2e1c9da 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2016-2020 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + import ( "fmt" "math" @@ -46,7 +46,7 @@ type Consumer struct { readerTermChan chan bool rebalanceCb RebalanceCb appReassigned bool - appRebalanceEnable bool // Config setting + appRebalanceEnable bool // SerializerConfig setting } // Strings returns a human readable name for a Consumer instance @@ -424,30 +424,16 @@ func (c *Consumer) Close() (err error) { close(c.events) } - // librdkafka's rd_kafka_consumer_close() will block - // and trigger the rebalance_cb() if one is set, if not, which is the - // case with the Go client since it registers EVENTs rather than callbacks, - // librdkafka will shortcut the rebalance_cb and do a forced unassign. - // But we can't have that since the application might need the final RevokePartitions - // before shutting down. So we trigger an Unsubscribe() first, wait for that to - // propagate (in the Poll loop below), and then close the consumer. - c.Unsubscribe() + C.rd_kafka_consumer_close_queue(c.handle.rk, c.handle.rkq) - // Poll for rebalance events - for { - c.Poll(10 * 1000) - if int(C.rd_kafka_queue_length(c.handle.rkq)) == 0 { - break - } + for C.rd_kafka_consumer_closed(c.handle.rk) != 1 { + c.Poll(100) } // Destroy our queue C.rd_kafka_queue_destroy(c.handle.rkq) c.handle.rkq = nil - // Close the consumer - C.rd_kafka_consumer_close(c.handle.rk) - c.handle.cleanup() C.rd_kafka_destroy(c.handle.rk) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go index 1827f43..e14c1ca 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2016 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + // Automatically generate error codes from librdkafka // See README for instructions //go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go @@ -67,10 +67,8 @@ func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) { return newErrorFromString(ErrorCode(code), str) } -// newErrorFromCError creates a new Error instance and destroys -// the passed cError. -func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { - defer C.rd_kafka_error_destroy(cError) +// newErrorFromCError creates a new Error instance +func newErrorFromCError(cError *C.rd_kafka_error_t) Error { return Error{ code: ErrorCode(C.rd_kafka_error_code(cError)), str: C.GoString(C.rd_kafka_error_string(cError)), @@ -80,6 +78,13 @@ func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { } } +// newErrorFromCErrorDestroy creates a new Error instance and destroys +// the passed cError. +func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { + defer C.rd_kafka_error_destroy(cError) + return newErrorFromCError(cError) +} + // Error returns a human readable representation of an Error // Same as Error.String() func (e Error) Error() string { diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go index 4ceecb3..ccfaba2 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2020 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + // Automatically generate error codes from librdkafka // See README for instructions //go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go index 6357ad8..aefa87b 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2016 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + import ( "fmt" "os" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go index fb828f9..d15e8f5 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go @@ -1,6 +1,6 @@ package kafka -// Copyright 2016-2021 Confluent Inc. -// AUTOMATICALLY GENERATED ON 2021-12-08 12:44:39.243338672 +0100 CET m=+0.000248284 USING librdkafka 1.8.2 +// Copyright 2016-2022 Confluent Inc. +// AUTOMATICALLY GENERATED ON 2022-08-01 22:56:19.86222475 +0200 CEST m=+0.000294735 USING librdkafka 1.9.2 /* #include "select_rdkafka.h" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go index 161a395..dc1c171 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2016 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + import ( "fmt" "strings" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go index f7a73a4..6baa381 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go @@ -1,5 +1,3 @@ -package kafka - /** * Copyright 2018 Confluent Inc. * @@ -16,6 +14,8 @@ package kafka * limitations under the License. */ +package kafka + import ( "fmt" "strconv" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go index 20dc30a..254edbd 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go @@ -20,7 +20,7 @@ // // High-level Consumer // -// * Decide if you want to read messages and events by calling `.Poll()` or +// * Decide if you want to read messages and events by calling `.Poll()` or // the deprecated option of using the `.Events()` channel. (If you want to use // `.Events()` channel then set `"go.events.channel.enable": true`). // diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt index f2aa57d..1ab8a1d 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt @@ -27,6 +27,32 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +LICENSE.cjson +-------------------------------------------------------------- +For cJSON.c and cJSON.h: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + LICENSE.crc32c -------------------------------------------------------------- # For src/crc32c.c copied (with modifications) from diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh index 2f7aeda..9ad738f 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh @@ -14,28 +14,30 @@ usage() { } +# Parse dynamic libraries from linker command line. +# Will print a list matching -lfoo and -framework X.. parse_dynlibs() { - # Parse dynamic libraries from pkg-config file, - # both the ones specified with Libs: but also through Requires: - local pc=$1 local libs= - local req= - local n= - for req in $(grep ^Requires: $pc | sed -e 's/^Requires://'); do - n=$(pkg-config --libs $req) - if [[ $n == -l* ]]; then - libs="${libs} $n" - fi - done - for n in $(grep ^Libs: $pc); do - if [[ $n == -l* ]]; then - libs="${libs} $n" + while [[ $# -gt 0 ]]; do + if [[ $1 == -l* ]]; then + libs="${libs} $1" + elif [[ $1 == -framework ]]; then + libs="${libs} $1 $2" + shift # remove one (extra) arg fi + shift # remove one arg done echo "$libs" } +# Parse dynamic library dependecies from pkg-config file and print +# them to stdout. +parse_pc_dynlibs() { + local pc=$1 + parse_dynlibs $(sed -n 's/^Libs: \(..*\)/\1/p' "$pc") +} + setup_build() { # Copies static library from the temp directory into final location, # extracts dynamic lib list from the pkg-config file, @@ -54,7 +56,7 @@ setup_build() { build_tag="// +build musl" fi - local dynlibs=$(parse_dynlibs $pc) + local dynlibs=$(parse_pc_dynlibs $pc) echo "Copying $apath to $dpath" cp "$apath" "$dpath" @@ -99,7 +101,7 @@ for f in rdkafka.h LICENSES.txt ; do done -for btype in glibc_linux musl_linux darwin windows ; do +for btype in glibc_linux musl_linux darwin_amd64 darwin_arm64 windows ; do lib=$bdir/librdkafka_${btype}.a pc=${lib/%.a/.pc} [[ -f $lib ]] || (echo "Expected file $lib missing" ; exit 1) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh index 7021169..7f920f3 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh @@ -51,12 +51,15 @@ fi curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-) uncommitted=$(git status --untracked-files=no --porcelain) -if [[ $devel != 1 ]] && ( [[ $curr_branch != master ]] || [[ ! -z $uncommitted ]] ); then +if [[ ! -z $uncommitted ]]; then + echo "Error: This script must be run on a clean branch with no uncommitted changes" + echo "Uncommitted files:" + echo "$uncommitted" + exit 1 +fi + +if [[ $devel != 1 ]] && [[ $curr_branch != master ]] ; then echo "Error: This script must be run on an up-to-date, clean, master branch" - if [[ ! -z $uncommitted ]]; then - echo "Uncommitted files:" - echo "$uncommitted" - fi exit 1 fi diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a similarity index 75% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin.a rename to vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a index 6294180..a49b5fa 100644 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin.a and b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a new file mode 100644 index 0000000..85aa548 Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a index c736b84..2df7900 100644 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a and b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a index f876505..eee1fb3 100644 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a and b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a index 7c5909f..4761207 100644 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a and b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h index b85ba90..b424b21 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h @@ -1,7 +1,7 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2020 Magnus Edenhill + * Copyright (c) 2012-2022 Magnus Edenhill * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,13 +60,13 @@ extern "C" { #ifndef WIN32_MEAN_AND_LEAN #define WIN32_MEAN_AND_LEAN #endif -#include