Andrey Kovalev
3 years ago
122 changed files with 33687 additions and 2 deletions
@ -0,0 +1,37 @@ |
|||||||
|
kind: pipeline |
||||||
|
type: docker |
||||||
|
name: build |
||||||
|
|
||||||
|
steps: |
||||||
|
- name: build |
||||||
|
image: plugins/docker |
||||||
|
settings: |
||||||
|
registry: cr.selcloud.ru |
||||||
|
username: |
||||||
|
from_secret: docker_username |
||||||
|
password: |
||||||
|
from_secret: docker_password |
||||||
|
repo: cr.selcloud.ru/russia9/${DRONE_REPO_NAME}/${DRONE_COMMIT_BRANCH} |
||||||
|
tags: |
||||||
|
- latest |
||||||
|
- ${DRONE_COMMIT_SHA} |
||||||
|
cache_from: |
||||||
|
- cr.selcloud.ru/russia9/${DRONE_REPO_NAME}/${DRONE_COMMIT_BRANCH}:latest |
||||||
|
|
||||||
|
# - name: deploy |
||||||
|
# image: appleboy/drone-ssh |
||||||
|
# settings: |
||||||
|
# host: |
||||||
|
# from_secret: ssh_address |
||||||
|
# username: |
||||||
|
# from_secret: ssh_username |
||||||
|
# key: |
||||||
|
# from_secret: ssh_key |
||||||
|
# port: 22 |
||||||
|
# script_stop: true |
||||||
|
# script: |
||||||
|
# - docker-compose -f /srv/cw3-offers/docker-compose.yml pull |
||||||
|
# - docker-compose -f /srv/cw3-offers/docker-compose.yml up -d |
||||||
|
# when: |
||||||
|
# branch: |
||||||
|
# - master |
@ -0,0 +1,202 @@ |
|||||||
|
Apache License |
||||||
|
Version 2.0, January 2004 |
||||||
|
http://www.apache.org/licenses/ |
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||||
|
|
||||||
|
1. Definitions. |
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, |
||||||
|
and distribution as defined by Sections 1 through 9 of this document. |
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by |
||||||
|
the copyright owner that is granting the License. |
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all |
||||||
|
other entities that control, are controlled by, or are under common |
||||||
|
control with that entity. For the purposes of this definition, |
||||||
|
"control" means (i) the power, direct or indirect, to cause the |
||||||
|
direction or management of such entity, whether by contract or |
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity. |
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity |
||||||
|
exercising permissions granted by this License. |
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, |
||||||
|
including but not limited to software source code, documentation |
||||||
|
source, and configuration files. |
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical |
||||||
|
transformation or translation of a Source form, including but |
||||||
|
not limited to compiled object code, generated documentation, |
||||||
|
and conversions to other media types. |
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or |
||||||
|
Object form, made available under the License, as indicated by a |
||||||
|
copyright notice that is included in or attached to the work |
||||||
|
(an example is provided in the Appendix below). |
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object |
||||||
|
form, that is based on (or derived from) the Work and for which the |
||||||
|
editorial revisions, annotations, elaborations, or other modifications |
||||||
|
represent, as a whole, an original work of authorship. For the purposes |
||||||
|
of this License, Derivative Works shall not include works that remain |
||||||
|
separable from, or merely link (or bind by name) to the interfaces of, |
||||||
|
the Work and Derivative Works thereof. |
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including |
||||||
|
the original version of the Work and any modifications or additions |
||||||
|
to that Work or Derivative Works thereof, that is intentionally |
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner |
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of |
||||||
|
the copyright owner. For the purposes of this definition, "submitted" |
||||||
|
means any form of electronic, verbal, or written communication sent |
||||||
|
to the Licensor or its representatives, including but not limited to |
||||||
|
communication on electronic mailing lists, source code control systems, |
||||||
|
and issue tracking systems that are managed by, or on behalf of, the |
||||||
|
Licensor for the purpose of discussing and improving the Work, but |
||||||
|
excluding communication that is conspicuously marked or otherwise |
||||||
|
designated in writing by the copyright owner as "Not a Contribution." |
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||||
|
on behalf of whom a Contribution has been received by Licensor and |
||||||
|
subsequently incorporated within the Work. |
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
copyright license to reproduce, prepare Derivative Works of, |
||||||
|
publicly display, publicly perform, sublicense, and distribute the |
||||||
|
Work and such Derivative Works in Source or Object form. |
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
(except as stated in this section) patent license to make, have made, |
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||||
|
where such license applies only to those patent claims licensable |
||||||
|
by such Contributor that are necessarily infringed by their |
||||||
|
Contribution(s) alone or by combination of their Contribution(s) |
||||||
|
with the Work to which such Contribution(s) was submitted. If You |
||||||
|
institute patent litigation against any entity (including a |
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||||
|
or a Contribution incorporated within the Work constitutes direct |
||||||
|
or contributory patent infringement, then any patent licenses |
||||||
|
granted to You under this License for that Work shall terminate |
||||||
|
as of the date such litigation is filed. |
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the |
||||||
|
Work or Derivative Works thereof in any medium, with or without |
||||||
|
modifications, and in Source or Object form, provided that You |
||||||
|
meet the following conditions: |
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or |
||||||
|
Derivative Works a copy of this License; and |
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices |
||||||
|
stating that You changed the files; and |
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works |
||||||
|
that You distribute, all copyright, patent, trademark, and |
||||||
|
attribution notices from the Source form of the Work, |
||||||
|
excluding those notices that do not pertain to any part of |
||||||
|
the Derivative Works; and |
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its |
||||||
|
distribution, then any Derivative Works that You distribute must |
||||||
|
include a readable copy of the attribution notices contained |
||||||
|
within such NOTICE file, excluding those notices that do not |
||||||
|
pertain to any part of the Derivative Works, in at least one |
||||||
|
of the following places: within a NOTICE text file distributed |
||||||
|
as part of the Derivative Works; within the Source form or |
||||||
|
documentation, if provided along with the Derivative Works; or, |
||||||
|
within a display generated by the Derivative Works, if and |
||||||
|
wherever such third-party notices normally appear. The contents |
||||||
|
of the NOTICE file are for informational purposes only and |
||||||
|
do not modify the License. You may add Your own attribution |
||||||
|
notices within Derivative Works that You distribute, alongside |
||||||
|
or as an addendum to the NOTICE text from the Work, provided |
||||||
|
that such additional attribution notices cannot be construed |
||||||
|
as modifying the License. |
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and |
||||||
|
may provide additional or different license terms and conditions |
||||||
|
for use, reproduction, or distribution of Your modifications, or |
||||||
|
for any such Derivative Works as a whole, provided Your use, |
||||||
|
reproduction, and distribution of the Work otherwise complies with |
||||||
|
the conditions stated in this License. |
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||||
|
any Contribution intentionally submitted for inclusion in the Work |
||||||
|
by You to the Licensor shall be under the terms and conditions of |
||||||
|
this License, without any additional terms or conditions. |
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify |
||||||
|
the terms of any separate license agreement you may have executed |
||||||
|
with Licensor regarding such Contributions. |
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade |
||||||
|
names, trademarks, service marks, or product names of the Licensor, |
||||||
|
except as required for reasonable and customary use in describing the |
||||||
|
origin of the Work and reproducing the content of the NOTICE file. |
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or |
||||||
|
agreed to in writing, Licensor provides the Work (and each |
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||||
|
implied, including, without limitation, any warranties or conditions |
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||||
|
appropriateness of using or redistributing the Work and assume any |
||||||
|
risks associated with Your exercise of permissions under this License. |
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory, |
||||||
|
whether in tort (including negligence), contract, or otherwise, |
||||||
|
unless required by applicable law (such as deliberate and grossly |
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be |
||||||
|
liable to You for damages, including any direct, indirect, special, |
||||||
|
incidental, or consequential damages of any character arising as a |
||||||
|
result of this License or out of the use or inability to use the |
||||||
|
Work (including but not limited to damages for loss of goodwill, |
||||||
|
work stoppage, computer failure or malfunction, or any and all |
||||||
|
other commercial damages or losses), even if such Contributor |
||||||
|
has been advised of the possibility of such damages. |
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing |
||||||
|
the Work or Derivative Works thereof, You may choose to offer, |
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity, |
||||||
|
or other liability obligations and/or rights consistent with this |
||||||
|
License. However, in accepting such obligations, You may act only |
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf |
||||||
|
of any other Contributor, and only if You agree to indemnify, |
||||||
|
defend, and hold each Contributor harmless for any liability |
||||||
|
incurred by, or claims asserted against, such Contributor by reason |
||||||
|
of your accepting any such warranty or additional liability. |
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS |
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work. |
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following |
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}" |
||||||
|
replaced with your own identifying information. (Don't include |
||||||
|
the brackets!) The text should be enclosed in the appropriate |
||||||
|
comment syntax for the file format. We also recommend that a |
||||||
|
file or class name and description of purpose be included on the |
||||||
|
same "printed page" as the copyright notice for easier |
||||||
|
identification within third-party archives. |
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner} |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
||||||
|
|
@ -0,0 +1,2 @@ |
|||||||
|
testconf.json |
||||||
|
go_rdkafka_generr/go_rdkafka_generr |
@ -0,0 +1,58 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016-2019 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
//Minimum required librdkafka version. This is checked both during
|
||||||
|
//build-time and runtime.
|
||||||
|
//Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error
|
||||||
|
//defines and strings in sync.
|
||||||
|
//
|
||||||
|
|
||||||
|
#define MIN_RD_KAFKA_VERSION 0x01060000 |
||||||
|
|
||||||
|
#ifdef __APPLE__ |
||||||
|
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" |
||||||
|
#else |
||||||
|
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" |
||||||
|
#endif |
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION |
||||||
|
#ifdef __APPLE__ |
||||||
|
#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" |
||||||
|
#else |
||||||
|
#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
func versionCheck() error { |
||||||
|
ver, verstr := LibraryVersion() |
||||||
|
if ver < C.MIN_RD_KAFKA_VERSION { |
||||||
|
return newErrorFromString(ErrNotImplemented, |
||||||
|
fmt.Sprintf("%s: librdkafka version %s (0x%x) detected", |
||||||
|
C.MIN_VER_ERRSTR, verstr, ver)) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,145 @@ |
|||||||
|
# Information for confluent-kafka-go developers |
||||||
|
|
||||||
|
Whenever librdkafka error codes are updated make sure to run generate |
||||||
|
before building: |
||||||
|
|
||||||
|
``` |
||||||
|
$ make -f mk/Makefile generr |
||||||
|
$ go build ./... |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Testing |
||||||
|
|
||||||
|
Some of the tests included in this directory, the benchmark and integration tests in particular, |
||||||
|
require an existing Kafka cluster and a testconf.json configuration file to |
||||||
|
provide tests with bootstrap brokers, topic name, etc. |
||||||
|
|
||||||
|
The format of testconf.json is a JSON object: |
||||||
|
``` |
||||||
|
{ |
||||||
|
"Brokers": "<bootstrap-brokers>", |
||||||
|
"Topic": "<test-topic-name>" |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
See testconf-example.json for an example and full set of available options. |
||||||
|
|
||||||
|
|
||||||
|
To run unit-tests: |
||||||
|
``` |
||||||
|
$ go test |
||||||
|
``` |
||||||
|
|
||||||
|
To run benchmark tests: |
||||||
|
``` |
||||||
|
$ go test -bench . |
||||||
|
``` |
||||||
|
|
||||||
|
For the code coverage: |
||||||
|
``` |
||||||
|
$ go test -coverprofile=coverage.out -bench=. |
||||||
|
$ go tool cover -func=coverage.out |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
## Build tags |
||||||
|
|
||||||
|
Different build types are supported through Go build tags (`-tags ..`), |
||||||
|
these tags should be specified on the **application** build/get/install command. |
||||||
|
|
||||||
|
* By default the bundled platform-specific static build of librdkafka will |
||||||
|
be used. This works out of the box on Mac OSX and glibc-based Linux distros, |
||||||
|
such as Ubuntu and CentOS. |
||||||
|
* `-tags musl` - must be specified when building on/for musl-based Linux |
||||||
|
distros, such as Alpine. Will use the bundled static musl build of |
||||||
|
librdkafka. |
||||||
|
* `-tags dynamic` - link librdkafka dynamically. A shared librdkafka library |
||||||
|
must be installed manually through other means (apt-get, yum, build from |
||||||
|
source, etc). |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Generating HTML documentation |
||||||
|
|
||||||
|
To generate one-page HTML documentation run the mk/doc-gen.py script from the |
||||||
|
top-level directory. This script requires the beautifulsoup4 Python package. |
||||||
|
|
||||||
|
``` |
||||||
|
$ source .../your/virtualenv/bin/activate |
||||||
|
$ pip install beautifulsoup4 |
||||||
|
... |
||||||
|
$ make -f mk/Makefile docs |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
## Release process |
||||||
|
|
||||||
|
For each release candidate and final release, perform the following steps: |
||||||
|
|
||||||
|
### Update bundle to latest librdkafka |
||||||
|
|
||||||
|
See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md). |
||||||
|
|
||||||
|
|
||||||
|
### Update librdkafka version requirement |
||||||
|
|
||||||
|
Update the minimum required librdkafka version in `kafka/00version.go` |
||||||
|
and `README.md`. |
||||||
|
|
||||||
|
|
||||||
|
### Update error codes |
||||||
|
|
||||||
|
Error codes can be automatically generated from the current librdkafka version. |
||||||
|
|
||||||
|
|
||||||
|
Update generated error codes: |
||||||
|
|
||||||
|
$ make -f mk/Makefile generr |
||||||
|
# Verify by building |
||||||
|
|
||||||
|
|
||||||
|
### Rebuild everything |
||||||
|
|
||||||
|
$ go clean -i ./... |
||||||
|
$ go build ./... |
||||||
|
|
||||||
|
|
||||||
|
### Run full test suite |
||||||
|
|
||||||
|
Set up a test cluster using whatever mechanism you typically use |
||||||
|
(docker, trivup, ccloud, ..). |
||||||
|
|
||||||
|
Make sure to update `kafka/testconf.json` as needed (broker list, $BROKERS) |
||||||
|
|
||||||
|
Run test suite: |
||||||
|
|
||||||
|
$ go test ./... |
||||||
|
|
||||||
|
|
||||||
|
### Verify examples |
||||||
|
|
||||||
|
Manually verify that the examples/ applications work. |
||||||
|
|
||||||
|
Also make sure the examples in README.md work. |
||||||
|
|
||||||
|
Convert any examples using `github.com/confluentinc/confluent-kafka-go/kafka` to use |
||||||
|
`gopkg.in/confluentinc/confluent-kafka-go.v1/kafka` import path. |
||||||
|
|
||||||
|
$ find examples/ -type f -name *\.go -exec sed -i -e 's|github\.com/confluentinc/confluent-kafka-go/kafka|gopkg\.in/confluentinc/confluent-kafka-go\.v1/kafka|g' {} + |
||||||
|
|
||||||
|
### Commit any changes |
||||||
|
|
||||||
|
Make sure to push to github before creating the tag to have CI tests pass. |
||||||
|
|
||||||
|
|
||||||
|
### Create and push tag |
||||||
|
|
||||||
|
$ git tag v1.3.0 |
||||||
|
$ git push --dry-run origin v1.3.0 |
||||||
|
# Remove --dry-run and re-execute if it looks ok. |
||||||
|
|
||||||
|
|
||||||
|
### Create release notes page on github |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,264 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2018 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include <stdlib.h> |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// AdminOptionOperationTimeout sets the broker's operation timeout, such as the
|
||||||
|
// timeout for CreateTopics to complete the creation of topics on the controller
|
||||||
|
// before returning a result to the application.
|
||||||
|
//
|
||||||
|
// CreateTopics, DeleteTopics, CreatePartitions:
|
||||||
|
// a value 0 will return immediately after triggering topic
|
||||||
|
// creation, while > 0 will wait this long for topic creation to propagate
|
||||||
|
// in cluster.
|
||||||
|
//
|
||||||
|
// Default: 0 (return immediately).
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
|
||||||
|
type AdminOptionOperationTimeout struct { |
||||||
|
isSet bool |
||||||
|
val time.Duration |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionOperationTimeout) supportsCreateTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionOperationTimeout) supportsDeleteTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionOperationTimeout) supportsCreatePartitions() { |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { |
||||||
|
if !ao.isSet { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_AdminOptions_set_operation_timeout( |
||||||
|
cOptions, C.int(durationToMilliseconds(ao.val)), |
||||||
|
cErrstr, cErrstrSize) |
||||||
|
if cErr != 0 { |
||||||
|
C.rd_kafka_AdminOptions_destroy(cOptions) |
||||||
|
return newCErrorFromString(cErr, |
||||||
|
fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr))) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetAdminOperationTimeout sets the broker's operation timeout, such as the
|
||||||
|
// timeout for CreateTopics to complete the creation of topics on the controller
|
||||||
|
// before returning a result to the application.
|
||||||
|
//
|
||||||
|
// CreateTopics, DeleteTopics, CreatePartitions:
|
||||||
|
// a value 0 will return immediately after triggering topic
|
||||||
|
// creation, while > 0 will wait this long for topic creation to propagate
|
||||||
|
// in cluster.
|
||||||
|
//
|
||||||
|
// Default: 0 (return immediately).
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
|
||||||
|
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) { |
||||||
|
ao.isSet = true |
||||||
|
ao.val = t |
||||||
|
return ao |
||||||
|
} |
||||||
|
|
||||||
|
// AdminOptionRequestTimeout sets the overall request timeout, including broker
|
||||||
|
// lookup, request transmission, operation time on broker, and response.
|
||||||
|
//
|
||||||
|
// Default: `socket.timeout.ms`.
|
||||||
|
//
|
||||||
|
// Valid for all Admin API methods.
|
||||||
|
type AdminOptionRequestTimeout struct { |
||||||
|
isSet bool |
||||||
|
val time.Duration |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionRequestTimeout) supportsCreateTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsDeleteTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsCreatePartitions() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsAlterConfigs() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() { |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { |
||||||
|
if !ao.isSet { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_AdminOptions_set_request_timeout( |
||||||
|
cOptions, C.int(durationToMilliseconds(ao.val)), |
||||||
|
cErrstr, cErrstrSize) |
||||||
|
if cErr != 0 { |
||||||
|
C.rd_kafka_AdminOptions_destroy(cOptions) |
||||||
|
return newCErrorFromString(cErr, |
||||||
|
fmt.Sprintf("%s", C.GoString(cErrstr))) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetAdminRequestTimeout sets the overall request timeout, including broker
|
||||||
|
// lookup, request transmission, operation time on broker, and response.
|
||||||
|
//
|
||||||
|
// Default: `socket.timeout.ms`.
|
||||||
|
//
|
||||||
|
// Valid for all Admin API methods.
|
||||||
|
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) { |
||||||
|
ao.isSet = true |
||||||
|
ao.val = t |
||||||
|
return ao |
||||||
|
} |
||||||
|
|
||||||
|
// AdminOptionValidateOnly tells the broker to only validate the request,
|
||||||
|
// without performing the requested operation (create topics, etc).
|
||||||
|
//
|
||||||
|
// Default: false.
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, CreatePartitions, AlterConfigs
|
||||||
|
type AdminOptionValidateOnly struct { |
||||||
|
isSet bool |
||||||
|
val bool |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionValidateOnly) supportsCreateTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionValidateOnly) supportsCreatePartitions() { |
||||||
|
} |
||||||
|
func (ao AdminOptionValidateOnly) supportsAlterConfigs() { |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error { |
||||||
|
if !ao.isSet { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_AdminOptions_set_validate_only( |
||||||
|
cOptions, bool2cint(ao.val), |
||||||
|
cErrstr, cErrstrSize) |
||||||
|
if cErr != 0 { |
||||||
|
C.rd_kafka_AdminOptions_destroy(cOptions) |
||||||
|
return newCErrorFromString(cErr, |
||||||
|
fmt.Sprintf("%s", C.GoString(cErrstr))) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetAdminValidateOnly tells the broker to only validate the request,
|
||||||
|
// without performing the requested operation (create topics, etc).
|
||||||
|
//
|
||||||
|
// Default: false.
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs
|
||||||
|
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) { |
||||||
|
ao.isSet = true |
||||||
|
ao.val = validateOnly |
||||||
|
return ao |
||||||
|
} |
||||||
|
|
||||||
|
// CreateTopicsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
|
||||||
|
type CreateTopicsAdminOption interface { |
||||||
|
supportsCreateTopics() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteTopicsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminOperationTimeout.
|
||||||
|
type DeleteTopicsAdminOption interface { |
||||||
|
supportsDeleteTopics() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// CreatePartitionsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
|
||||||
|
type CreatePartitionsAdminOption interface { |
||||||
|
supportsCreatePartitions() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// AlterConfigsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental.
|
||||||
|
type AlterConfigsAdminOption interface { |
||||||
|
supportsAlterConfigs() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// DescribeConfigsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout.
|
||||||
|
type DescribeConfigsAdminOption interface { |
||||||
|
supportsDescribeConfigs() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// AdminOption is a generic type not to be used directly.
|
||||||
|
//
|
||||||
|
// See CreateTopicsAdminOption et.al.
|
||||||
|
type AdminOption interface { |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) { |
||||||
|
|
||||||
|
cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType) |
||||||
|
for _, opt := range options { |
||||||
|
if opt == nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
err := opt.apply(cOptions) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return cOptions, nil |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin.a -lm -lsasl2 -ldl -lpthread
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static darwin from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,9 @@ |
|||||||
|
// +build dynamic
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo pkg-config: rdkafka
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "dynamically linked to librdkafka" |
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
// +build !musl
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux.a -lm -ldl -lpthread -lrt
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
// +build musl
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux.a -lm -ldl -lpthread -lrt -lpthread -lrt
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_windows.a -lws2_32 -lsecur32 -lcrypt32
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,280 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"reflect" |
||||||
|
"strings" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// ConfigValue supports the following types:
|
||||||
|
// bool, int, string, any type with the standard String() interface
|
||||||
|
type ConfigValue interface{} |
||||||
|
|
||||||
|
// ConfigMap is a map containing standard librdkafka configuration properties as documented in:
|
||||||
|
// https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md
|
||||||
|
//
|
||||||
|
// The special property "default.topic.config" (optional) is a ConfigMap
|
||||||
|
// containing default topic configuration properties.
|
||||||
|
//
|
||||||
|
// The use of "default.topic.config" is deprecated,
|
||||||
|
// topic configuration properties shall be specified in the standard ConfigMap.
|
||||||
|
// For backwards compatibility, "default.topic.config" (if supplied)
|
||||||
|
// takes precedence.
|
||||||
|
type ConfigMap map[string]ConfigValue |
||||||
|
|
||||||
|
// SetKey sets configuration property key to value.
|
||||||
|
//
|
||||||
|
// For user convenience a key prefixed with {topic}. will be
|
||||||
|
// set on the "default.topic.config" sub-map, this use is deprecated.
|
||||||
|
func (m ConfigMap) SetKey(key string, value ConfigValue) error { |
||||||
|
if strings.HasPrefix(key, "{topic}.") { |
||||||
|
_, found := m["default.topic.config"] |
||||||
|
if !found { |
||||||
|
m["default.topic.config"] = ConfigMap{} |
||||||
|
} |
||||||
|
m["default.topic.config"].(ConfigMap)[strings.TrimPrefix(key, "{topic}.")] = value |
||||||
|
} else { |
||||||
|
m[key] = value |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Set implements flag.Set (command line argument parser) as a convenience
|
||||||
|
// for `-X key=value` config.
|
||||||
|
func (m ConfigMap) Set(kv string) error { |
||||||
|
i := strings.Index(kv, "=") |
||||||
|
if i == -1 { |
||||||
|
return newErrorFromString(ErrInvalidArg, "Expected key=value") |
||||||
|
} |
||||||
|
|
||||||
|
k := kv[:i] |
||||||
|
v := kv[i+1:] |
||||||
|
|
||||||
|
return m.SetKey(k, v) |
||||||
|
} |
||||||
|
|
||||||
|
func value2string(v ConfigValue) (ret string, errstr string) { |
||||||
|
|
||||||
|
switch x := v.(type) { |
||||||
|
case bool: |
||||||
|
if x { |
||||||
|
ret = "true" |
||||||
|
} else { |
||||||
|
ret = "false" |
||||||
|
} |
||||||
|
case int: |
||||||
|
ret = fmt.Sprintf("%d", x) |
||||||
|
case string: |
||||||
|
ret = x |
||||||
|
case fmt.Stringer: |
||||||
|
ret = x.String() |
||||||
|
default: |
||||||
|
return "", fmt.Sprintf("Invalid value type %T", v) |
||||||
|
} |
||||||
|
|
||||||
|
return ret, "" |
||||||
|
} |
||||||
|
|
||||||
|
// rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t
|
||||||
|
// into a common interface.
|
||||||
|
type rdkAnyconf interface { |
||||||
|
set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t |
||||||
|
} |
||||||
|
|
||||||
|
func anyconfSet(anyconf rdkAnyconf, key string, val ConfigValue) (err error) { |
||||||
|
value, errstr := value2string(val) |
||||||
|
if errstr != "" { |
||||||
|
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s for key %s (expected string,bool,int,ConfigMap)", errstr, key)) |
||||||
|
} |
||||||
|
cKey := C.CString(key) |
||||||
|
defer C.free(unsafe.Pointer(cKey)) |
||||||
|
cVal := C.CString(value) |
||||||
|
defer C.free(unsafe.Pointer(cVal)) |
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(128))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
if anyconf.set(cKey, cVal, cErrstr, 128) != C.RD_KAFKA_CONF_OK { |
||||||
|
return newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// we need these typedefs to workaround a crash in golint
|
||||||
|
// when parsing the set() methods below
|
||||||
|
type rdkConf C.rd_kafka_conf_t |
||||||
|
type rdkTopicConf C.rd_kafka_topic_conf_t |
||||||
|
|
||||||
|
func (cConf *rdkConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { |
||||||
|
return C.rd_kafka_conf_set((*C.rd_kafka_conf_t)(cConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) |
||||||
|
} |
||||||
|
|
||||||
|
func (ctopicConf *rdkTopicConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { |
||||||
|
return C.rd_kafka_topic_conf_set((*C.rd_kafka_topic_conf_t)(ctopicConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) |
||||||
|
} |
||||||
|
|
||||||
|
func configConvertAnyconf(m ConfigMap, anyconf rdkAnyconf) (err error) { |
||||||
|
// set plugins first, any plugin-specific configuration depends on
|
||||||
|
// the plugin to have already been set
|
||||||
|
pluginPaths, ok := m["plugin.library.paths"] |
||||||
|
if ok { |
||||||
|
err = anyconfSet(anyconf, "plugin.library.paths", pluginPaths) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
for k, v := range m { |
||||||
|
if k == "plugin.library.paths" { |
||||||
|
continue |
||||||
|
} |
||||||
|
switch v.(type) { |
||||||
|
case ConfigMap: |
||||||
|
/* Special sub-ConfigMap, only used for default.topic.config */ |
||||||
|
|
||||||
|
if k != "default.topic.config" { |
||||||
|
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("Invalid type for key %s", k)) |
||||||
|
} |
||||||
|
|
||||||
|
var cTopicConf = C.rd_kafka_topic_conf_new() |
||||||
|
|
||||||
|
err = configConvertAnyconf(v.(ConfigMap), |
||||||
|
(*rdkTopicConf)(cTopicConf)) |
||||||
|
if err != nil { |
||||||
|
C.rd_kafka_topic_conf_destroy(cTopicConf) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
C.rd_kafka_conf_set_default_topic_conf( |
||||||
|
(*C.rd_kafka_conf_t)(anyconf.(*rdkConf)), |
||||||
|
(*C.rd_kafka_topic_conf_t)((*rdkTopicConf)(cTopicConf))) |
||||||
|
|
||||||
|
default: |
||||||
|
err = anyconfSet(anyconf, k, v) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// convert ConfigMap to C rd_kafka_conf_t *
|
||||||
|
func (m ConfigMap) convert() (cConf *C.rd_kafka_conf_t, err error) { |
||||||
|
cConf = C.rd_kafka_conf_new() |
||||||
|
|
||||||
|
// Set the client.software.name and .version (use librdkafka version).
|
||||||
|
_, librdkafkaVersion := LibraryVersion() |
||||||
|
anyconfSet((*rdkConf)(cConf), "client.software.name", "confluent-kafka-go") |
||||||
|
anyconfSet((*rdkConf)(cConf), "client.software.version", librdkafkaVersion) |
||||||
|
|
||||||
|
err = configConvertAnyconf(m, (*rdkConf)(cConf)) |
||||||
|
if err != nil { |
||||||
|
C.rd_kafka_conf_destroy(cConf) |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return cConf, nil |
||||||
|
} |
||||||
|
|
||||||
|
// get finds key in the configmap and returns its value.
|
||||||
|
// If the key is not found defval is returned.
|
||||||
|
// If the key is found but the type is mismatched an error is returned.
|
||||||
|
func (m ConfigMap) get(key string, defval ConfigValue) (ConfigValue, error) { |
||||||
|
if strings.HasPrefix(key, "{topic}.") { |
||||||
|
defconfCv, found := m["default.topic.config"] |
||||||
|
if !found { |
||||||
|
return defval, nil |
||||||
|
} |
||||||
|
return defconfCv.(ConfigMap).get(strings.TrimPrefix(key, "{topic}."), defval) |
||||||
|
} |
||||||
|
|
||||||
|
v, ok := m[key] |
||||||
|
if !ok { |
||||||
|
return defval, nil |
||||||
|
} |
||||||
|
|
||||||
|
if defval != nil && reflect.TypeOf(defval) != reflect.TypeOf(v) { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s expects type %T, not %T", key, defval, v)) |
||||||
|
} |
||||||
|
|
||||||
|
return v, nil |
||||||
|
} |
||||||
|
|
||||||
|
// extract performs a get() and if found deletes the key.
|
||||||
|
func (m ConfigMap) extract(key string, defval ConfigValue) (ConfigValue, error) { |
||||||
|
|
||||||
|
v, err := m.get(key, defval) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
delete(m, key) |
||||||
|
|
||||||
|
return v, nil |
||||||
|
} |
||||||
|
|
||||||
|
// extractLogConfig extracts generic go.logs.* configuration properties.
|
||||||
|
func (m ConfigMap) extractLogConfig() (logsChanEnable bool, logsChan chan LogEvent, err error) { |
||||||
|
v, err := m.extract("go.logs.channel.enable", false) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
logsChanEnable = v.(bool) |
||||||
|
|
||||||
|
v, err = m.extract("go.logs.channel", nil) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
if v != nil { |
||||||
|
logsChan = v.(chan LogEvent) |
||||||
|
} |
||||||
|
|
||||||
|
if logsChanEnable { |
||||||
|
// Tell librdkafka to forward logs to the log queue
|
||||||
|
m.Set("log.queue=true") |
||||||
|
} |
||||||
|
|
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func (m ConfigMap) clone() ConfigMap { |
||||||
|
m2 := make(ConfigMap) |
||||||
|
for k, v := range m { |
||||||
|
m2[k] = v |
||||||
|
} |
||||||
|
return m2 |
||||||
|
} |
||||||
|
|
||||||
|
// Get finds the given key in the ConfigMap and returns its value.
|
||||||
|
// If the key is not found `defval` is returned.
|
||||||
|
// If the key is found but the type does not match that of `defval` (unless nil)
|
||||||
|
// an ErrInvalidArg error is returned.
|
||||||
|
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) { |
||||||
|
return m.get(key, defval) |
||||||
|
} |
@ -0,0 +1,923 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016-2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
|
||||||
|
static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { |
||||||
|
return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// RebalanceCb provides a per-Subscribe*() rebalance event callback.
|
||||||
|
// The passed Event will be either AssignedPartitions or RevokedPartitions
|
||||||
|
type RebalanceCb func(*Consumer, Event) error |
||||||
|
|
||||||
|
// Consumer implements a High-level Apache Kafka Consumer instance
|
||||||
|
type Consumer struct { |
||||||
|
events chan Event |
||||||
|
handle handle |
||||||
|
eventsChanEnable bool |
||||||
|
readerTermChan chan bool |
||||||
|
rebalanceCb RebalanceCb |
||||||
|
appReassigned bool |
||||||
|
appRebalanceEnable bool // Config setting
|
||||||
|
} |
||||||
|
|
||||||
|
// Strings returns a human readable name for a Consumer instance
|
||||||
|
func (c *Consumer) String() string { |
||||||
|
return c.handle.String() |
||||||
|
} |
||||||
|
|
||||||
|
// getHandle implements the Handle interface
|
||||||
|
func (c *Consumer) gethandle() *handle { |
||||||
|
return &c.handle |
||||||
|
} |
||||||
|
|
||||||
|
// Subscribe to a single topic
|
||||||
|
// This replaces the current subscription
|
||||||
|
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error { |
||||||
|
return c.SubscribeTopics([]string{topic}, rebalanceCb) |
||||||
|
} |
||||||
|
|
||||||
|
// SubscribeTopics subscribes to the provided list of topics.
|
||||||
|
// This replaces the current subscription.
|
||||||
|
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) { |
||||||
|
ctopics := C.rd_kafka_topic_partition_list_new(C.int(len(topics))) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(ctopics) |
||||||
|
|
||||||
|
for _, topic := range topics { |
||||||
|
ctopic := C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
C.rd_kafka_topic_partition_list_add(ctopics, ctopic, C.RD_KAFKA_PARTITION_UA) |
||||||
|
} |
||||||
|
|
||||||
|
e := C.rd_kafka_subscribe(c.handle.rk, ctopics) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
c.rebalanceCb = rebalanceCb |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Unsubscribe from the current subscription, if any.
|
||||||
|
func (c *Consumer) Unsubscribe() (err error) { |
||||||
|
C.rd_kafka_unsubscribe(c.handle.rk) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Assign an atomic set of partitions to consume.
|
||||||
|
//
|
||||||
|
// The .Offset field of each TopicPartition must either be set to an absolute
|
||||||
|
// starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc),
|
||||||
|
// but should typically be set to `kafka.OffsetStored` to have the consumer
|
||||||
|
// use the committed offset as a start position, with a fallback to
|
||||||
|
// `auto.offset.reset` if there is no committed offset.
|
||||||
|
//
|
||||||
|
// This replaces the current assignment.
|
||||||
|
func (c *Consumer) Assign(partitions []TopicPartition) (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
|
||||||
|
e := C.rd_kafka_assign(c.handle.rk, cparts) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Unassign the current set of partitions to consume.
|
||||||
|
func (c *Consumer) Unassign() (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
e := C.rd_kafka_assign(c.handle.rk, nil) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// IncrementalAssign adds the specified partitions to the current set of
|
||||||
|
// partitions to consume.
|
||||||
|
//
|
||||||
|
// The .Offset field of each TopicPartition must either be set to an absolute
|
||||||
|
// starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc),
|
||||||
|
// but should typically be set to `kafka.OffsetStored` to have the consumer
|
||||||
|
// use the committed offset as a start position, with a fallback to
|
||||||
|
// `auto.offset.reset` if there is no committed offset.
|
||||||
|
//
|
||||||
|
// The new partitions must not be part of the current assignment.
|
||||||
|
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
|
||||||
|
cError := C.rd_kafka_incremental_assign(c.handle.rk, cparts) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// IncrementalUnassign removes the specified partitions from the current set of
|
||||||
|
// partitions to consume.
|
||||||
|
//
|
||||||
|
// The .Offset field of the TopicPartition is ignored.
|
||||||
|
//
|
||||||
|
// The removed partitions must be part of the current assignment.
|
||||||
|
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
|
||||||
|
cError := C.rd_kafka_incremental_unassign(c.handle.rk, cparts) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// GetRebalanceProtocol returns the current consumer group rebalance protocol,
|
||||||
|
// which is either "EAGER" or "COOPERATIVE".
|
||||||
|
// If the rebalance protocol is not known in the current state an empty string
|
||||||
|
// is returned.
|
||||||
|
// Should typically only be called during rebalancing.
|
||||||
|
func (c *Consumer) GetRebalanceProtocol() string { |
||||||
|
cStr := C.rd_kafka_rebalance_protocol(c.handle.rk) |
||||||
|
if cStr == nil { |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
return C.GoString(cStr) |
||||||
|
} |
||||||
|
|
||||||
|
// AssignmentLost returns true if current partition assignment has been lost.
|
||||||
|
// This method is only applicable for use with a subscribing consumer when
|
||||||
|
// handling a rebalance event or callback.
|
||||||
|
// Partitions that have been lost may already be owned by other members in the
|
||||||
|
// group and therefore commiting offsets, for example, may fail.
|
||||||
|
func (c *Consumer) AssignmentLost() bool { |
||||||
|
return cint2bool(C.rd_kafka_assignment_lost(c.handle.rk)) |
||||||
|
} |
||||||
|
|
||||||
|
// commit offsets for specified offsets.
|
||||||
|
// If offsets is nil the currently assigned partitions' offsets are committed.
|
||||||
|
// This is a blocking call, caller will need to wrap in go-routine to
|
||||||
|
// get async or throw-away behaviour.
|
||||||
|
func (c *Consumer) commit(offsets []TopicPartition) (committedOffsets []TopicPartition, err error) { |
||||||
|
var rkqu *C.rd_kafka_queue_t |
||||||
|
|
||||||
|
rkqu = C.rd_kafka_queue_new(c.handle.rk) |
||||||
|
defer C.rd_kafka_queue_destroy(rkqu) |
||||||
|
|
||||||
|
var coffsets *C.rd_kafka_topic_partition_list_t |
||||||
|
if offsets != nil { |
||||||
|
coffsets = newCPartsFromTopicPartitions(offsets) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(coffsets) |
||||||
|
} |
||||||
|
|
||||||
|
cErr := C.rd_kafka_commit_queue(c.handle.rk, coffsets, rkqu, nil, nil) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
rkev := C.rd_kafka_queue_poll(rkqu, C.int(-1)) |
||||||
|
if rkev == nil { |
||||||
|
// shouldn't happen
|
||||||
|
return nil, newError(C.RD_KAFKA_RESP_ERR__DESTROY) |
||||||
|
} |
||||||
|
defer C.rd_kafka_event_destroy(rkev) |
||||||
|
|
||||||
|
if C.rd_kafka_event_type(rkev) != C.RD_KAFKA_EVENT_OFFSET_COMMIT { |
||||||
|
panic(fmt.Sprintf("Expected OFFSET_COMMIT, got %s", |
||||||
|
C.GoString(C.rd_kafka_event_name(rkev)))) |
||||||
|
} |
||||||
|
|
||||||
|
cErr = C.rd_kafka_event_error(rkev) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) |
||||||
|
} |
||||||
|
|
||||||
|
cRetoffsets := C.rd_kafka_event_topic_partition_list(rkev) |
||||||
|
if cRetoffsets == nil { |
||||||
|
// no offsets, no error
|
||||||
|
return nil, nil |
||||||
|
} |
||||||
|
committedOffsets = newTopicPartitionsFromCparts(cRetoffsets) |
||||||
|
|
||||||
|
return committedOffsets, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Commit offsets for currently assigned partitions
|
||||||
|
// This is a blocking call.
|
||||||
|
// Returns the committed offsets on success.
|
||||||
|
func (c *Consumer) Commit() ([]TopicPartition, error) { |
||||||
|
return c.commit(nil) |
||||||
|
} |
||||||
|
|
||||||
|
// CommitMessage commits offset based on the provided message.
|
||||||
|
// This is a blocking call.
|
||||||
|
// Returns the committed offsets on success.
|
||||||
|
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) { |
||||||
|
if m.TopicPartition.Error != nil { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Can't commit errored message") |
||||||
|
} |
||||||
|
offsets := []TopicPartition{m.TopicPartition} |
||||||
|
offsets[0].Offset++ |
||||||
|
return c.commit(offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// CommitOffsets commits the provided list of offsets
|
||||||
|
// This is a blocking call.
|
||||||
|
// Returns the committed offsets on success.
|
||||||
|
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) { |
||||||
|
return c.commit(offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// StoreOffsets stores the provided list of offsets that will be committed
|
||||||
|
// to the offset store according to `auto.commit.interval.ms` or manual
|
||||||
|
// offset-less Commit().
|
||||||
|
//
|
||||||
|
// Returns the stored offsets on success. If at least one offset couldn't be stored,
|
||||||
|
// an error and a list of offsets is returned. Each offset can be checked for
|
||||||
|
// specific errors via its `.Error` member.
|
||||||
|
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) { |
||||||
|
coffsets := newCPartsFromTopicPartitions(offsets) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(coffsets) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_offsets_store(c.handle.rk, coffsets) |
||||||
|
|
||||||
|
// coffsets might be annotated with an error
|
||||||
|
storedOffsets = newTopicPartitionsFromCparts(coffsets) |
||||||
|
|
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return storedOffsets, newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return storedOffsets, nil |
||||||
|
} |
||||||
|
|
||||||
|
// StoreMessage stores offset based on the provided message.
|
||||||
|
// This is a convenience method that uses StoreOffsets to do the actual work.
|
||||||
|
func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) { |
||||||
|
if m.TopicPartition.Error != nil { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Can't store errored message") |
||||||
|
} |
||||||
|
if m.TopicPartition.Offset < 0 { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Can't store message with offset less than 0") |
||||||
|
} |
||||||
|
offsets := []TopicPartition{m.TopicPartition} |
||||||
|
offsets[0].Offset++ |
||||||
|
return c.StoreOffsets(offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// Seek seeks the given topic partitions using the offset from the TopicPartition.
|
||||||
|
//
|
||||||
|
// If timeoutMs is not 0 the call will wait this long for the
|
||||||
|
// seek to be performed. If the timeout is reached the internal state
|
||||||
|
// will be unknown and this function returns ErrTimedOut.
|
||||||
|
// If timeoutMs is 0 it will initiate the seek but return
|
||||||
|
// immediately without any error reporting (e.g., async).
|
||||||
|
//
|
||||||
|
// Seek() may only be used for partitions already being consumed
|
||||||
|
// (through Assign() or implicitly through a self-rebalanced Subscribe()).
|
||||||
|
// To set the starting offset it is preferred to use Assign() and provide
|
||||||
|
// a starting offset for each partition.
|
||||||
|
//
|
||||||
|
// Returns an error on failure or nil otherwise.
|
||||||
|
func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error { |
||||||
|
rkt := c.handle.getRkt(*partition.Topic) |
||||||
|
cErr := C.rd_kafka_seek(rkt, |
||||||
|
C.int32_t(partition.Partition), |
||||||
|
C.int64_t(partition.Offset), |
||||||
|
C.int(timeoutMs)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Poll the consumer for messages or events.
|
||||||
|
//
|
||||||
|
// Will block for at most timeoutMs milliseconds
|
||||||
|
//
|
||||||
|
// The following callbacks may be triggered:
|
||||||
|
// Subscribe()'s rebalanceCb
|
||||||
|
//
|
||||||
|
// Returns nil on timeout, else an Event
|
||||||
|
func (c *Consumer) Poll(timeoutMs int) (event Event) { |
||||||
|
ev, _ := c.handle.eventPoll(nil, timeoutMs, 1, nil) |
||||||
|
return ev |
||||||
|
} |
||||||
|
|
||||||
|
// Events returns the Events channel (if enabled)
|
||||||
|
func (c *Consumer) Events() chan Event { |
||||||
|
return c.events |
||||||
|
} |
||||||
|
|
||||||
|
// Logs returns the log channel if enabled, or nil otherwise.
|
||||||
|
func (c *Consumer) Logs() chan LogEvent { |
||||||
|
return c.handle.logs |
||||||
|
} |
||||||
|
|
||||||
|
// ReadMessage polls the consumer for a message.
|
||||||
|
//
|
||||||
|
// This is a convenience API that wraps Poll() and only returns
|
||||||
|
// messages or errors. All other event types are discarded.
|
||||||
|
//
|
||||||
|
// The call will block for at most `timeout` waiting for
|
||||||
|
// a new message or error. `timeout` may be set to -1 for
|
||||||
|
// indefinite wait.
|
||||||
|
//
|
||||||
|
// Timeout is returned as (nil, err) where err is `err.(kafka.Error).Code() == kafka.ErrTimedOut`.
|
||||||
|
//
|
||||||
|
// Messages are returned as (msg, nil),
|
||||||
|
// while general errors are returned as (nil, err),
|
||||||
|
// and partition-specific errors are returned as (msg, err) where
|
||||||
|
// msg.TopicPartition provides partition-specific information (such as topic, partition and offset).
|
||||||
|
//
|
||||||
|
// All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded.
|
||||||
|
//
|
||||||
|
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) { |
||||||
|
|
||||||
|
var absTimeout time.Time |
||||||
|
var timeoutMs int |
||||||
|
|
||||||
|
if timeout > 0 { |
||||||
|
absTimeout = time.Now().Add(timeout) |
||||||
|
timeoutMs = (int)(timeout.Seconds() * 1000.0) |
||||||
|
} else { |
||||||
|
timeoutMs = (int)(timeout) |
||||||
|
} |
||||||
|
|
||||||
|
for { |
||||||
|
ev := c.Poll(timeoutMs) |
||||||
|
|
||||||
|
switch e := ev.(type) { |
||||||
|
case *Message: |
||||||
|
if e.TopicPartition.Error != nil { |
||||||
|
return e, e.TopicPartition.Error |
||||||
|
} |
||||||
|
return e, nil |
||||||
|
case Error: |
||||||
|
return nil, e |
||||||
|
default: |
||||||
|
// Ignore other event types
|
||||||
|
} |
||||||
|
|
||||||
|
if timeout > 0 { |
||||||
|
// Calculate remaining time
|
||||||
|
timeoutMs = int(math.Max(0.0, absTimeout.Sub(time.Now()).Seconds()*1000.0)) |
||||||
|
} |
||||||
|
|
||||||
|
if timeoutMs == 0 && ev == nil { |
||||||
|
return nil, newError(C.RD_KAFKA_RESP_ERR__TIMED_OUT) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// Close Consumer instance.
|
||||||
|
// The object is no longer usable after this call.
|
||||||
|
func (c *Consumer) Close() (err error) { |
||||||
|
|
||||||
|
// Wait for consumerReader() or pollLogEvents to terminate (by closing readerTermChan)
|
||||||
|
close(c.readerTermChan) |
||||||
|
c.handle.waitGroup.Wait() |
||||||
|
if c.eventsChanEnable { |
||||||
|
close(c.events) |
||||||
|
} |
||||||
|
|
||||||
|
// librdkafka's rd_kafka_consumer_close() will block
|
||||||
|
// and trigger the rebalance_cb() if one is set, if not, which is the
|
||||||
|
// case with the Go client since it registers EVENTs rather than callbacks,
|
||||||
|
// librdkafka will shortcut the rebalance_cb and do a forced unassign.
|
||||||
|
// But we can't have that since the application might need the final RevokePartitions
|
||||||
|
// before shutting down. So we trigger an Unsubscribe() first, wait for that to
|
||||||
|
// propagate (in the Poll loop below), and then close the consumer.
|
||||||
|
c.Unsubscribe() |
||||||
|
|
||||||
|
// Poll for rebalance events
|
||||||
|
for { |
||||||
|
c.Poll(10 * 1000) |
||||||
|
if int(C.rd_kafka_queue_length(c.handle.rkq)) == 0 { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Destroy our queue
|
||||||
|
C.rd_kafka_queue_destroy(c.handle.rkq) |
||||||
|
c.handle.rkq = nil |
||||||
|
|
||||||
|
// Close the consumer
|
||||||
|
C.rd_kafka_consumer_close(c.handle.rk) |
||||||
|
|
||||||
|
c.handle.cleanup() |
||||||
|
|
||||||
|
C.rd_kafka_destroy(c.handle.rk) |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// NewConsumer creates a new high-level Consumer instance.
|
||||||
|
//
|
||||||
|
// conf is a *ConfigMap with standard librdkafka configuration properties.
|
||||||
|
//
|
||||||
|
// Supported special configuration properties:
|
||||||
|
// go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
|
||||||
|
// If set to true the app must handle the AssignedPartitions and
|
||||||
|
// RevokedPartitions events and call Assign() and Unassign()
|
||||||
|
// respectively.
|
||||||
|
// go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled.
|
||||||
|
// go.events.channel.size (int, 1000) - Events() channel size
|
||||||
|
// go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
|
||||||
|
// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
|
||||||
|
//
|
||||||
|
// WARNING: Due to the buffering nature of channels (and queues in general) the
|
||||||
|
// use of the events channel risks receiving outdated events and
|
||||||
|
// messages. Minimizing go.events.channel.size reduces the risk
|
||||||
|
// and number of outdated events and messages but does not eliminate
|
||||||
|
// the factor completely. With a channel size of 1 at most one
|
||||||
|
// event or message may be outdated.
|
||||||
|
func NewConsumer(conf *ConfigMap) (*Consumer, error) { |
||||||
|
|
||||||
|
err := versionCheck() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// before we do anything with the configuration, create a copy such that
|
||||||
|
// the original is not mutated.
|
||||||
|
confCopy := conf.clone() |
||||||
|
|
||||||
|
groupid, _ := confCopy.get("group.id", nil) |
||||||
|
if groupid == nil { |
||||||
|
// without a group.id the underlying cgrp subsystem in librdkafka wont get started
|
||||||
|
// and without it there is no way to consume assigned partitions.
|
||||||
|
// So for now require the group.id, this might change in the future.
|
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Required property group.id not set") |
||||||
|
} |
||||||
|
|
||||||
|
c := &Consumer{} |
||||||
|
|
||||||
|
v, err := confCopy.extract("go.application.rebalance.enable", false) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
c.appRebalanceEnable = v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.events.channel.enable", false) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
c.eventsChanEnable = v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.events.channel.size", 1000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
eventsChanSize := v.(int) |
||||||
|
|
||||||
|
logsChanEnable, logsChan, err := confCopy.extractLogConfig() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cConf, err := confCopy.convert() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(256))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_REBALANCE|C.RD_KAFKA_EVENT_OFFSET_COMMIT|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) |
||||||
|
|
||||||
|
c.handle.rk = C.rd_kafka_new(C.RD_KAFKA_CONSUMER, cConf, cErrstr, 256) |
||||||
|
if c.handle.rk == nil { |
||||||
|
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
C.rd_kafka_poll_set_consumer(c.handle.rk) |
||||||
|
|
||||||
|
c.handle.c = c |
||||||
|
c.handle.setup() |
||||||
|
c.readerTermChan = make(chan bool) |
||||||
|
c.handle.rkq = C.rd_kafka_queue_get_consumer(c.handle.rk) |
||||||
|
if c.handle.rkq == nil { |
||||||
|
// no cgrp (no group.id configured), revert to main queue.
|
||||||
|
c.handle.rkq = C.rd_kafka_queue_get_main(c.handle.rk) |
||||||
|
} |
||||||
|
|
||||||
|
if logsChanEnable { |
||||||
|
c.handle.setupLogQueue(logsChan, c.readerTermChan) |
||||||
|
} |
||||||
|
|
||||||
|
if c.eventsChanEnable { |
||||||
|
c.events = make(chan Event, eventsChanSize) |
||||||
|
/* Start rdkafka consumer queue reader -> events writer goroutine */ |
||||||
|
c.handle.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
consumerReader(c, c.readerTermChan) |
||||||
|
c.handle.waitGroup.Done() |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
return c, nil |
||||||
|
} |
||||||
|
|
||||||
|
// consumerReader reads messages and events from the librdkafka consumer queue
|
||||||
|
// and posts them on the consumer channel.
|
||||||
|
// Runs until termChan closes
|
||||||
|
func consumerReader(c *Consumer, termChan chan bool) { |
||||||
|
for { |
||||||
|
select { |
||||||
|
case _ = <-termChan: |
||||||
|
return |
||||||
|
default: |
||||||
|
_, term := c.handle.eventPoll(c.events, 100, 1000, termChan) |
||||||
|
if term { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// GetMetadata queries broker for cluster and topic metadata.
|
||||||
|
// If topic is non-nil only information about that topic is returned, else if
|
||||||
|
// allTopics is false only information about locally used topics is returned,
|
||||||
|
// else information about all topics is returned.
|
||||||
|
// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
|
||||||
|
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { |
||||||
|
return getMetadata(c, topic, allTopics, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition.
|
||||||
|
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { |
||||||
|
return queryWatermarkOffsets(c, topic, partition, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// GetWatermarkOffsets returns the cached low and high offsets for the given topic
|
||||||
|
// and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets.
|
||||||
|
// The low offset is populated every statistics.interval.ms if that value is set.
|
||||||
|
// OffsetInvalid will be returned if there is no cached offset for either value.
|
||||||
|
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error) { |
||||||
|
return getWatermarkOffsets(c, topic, partition) |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetsForTimes looks up offsets by timestamp for the given partitions.
|
||||||
|
//
|
||||||
|
// The returned offset for each partition is the earliest offset whose
|
||||||
|
// timestamp is greater than or equal to the given timestamp in the
|
||||||
|
// corresponding partition. If the provided timestamp exceeds that of the
|
||||||
|
// last message in the partition, a value of -1 will be returned.
|
||||||
|
//
|
||||||
|
// The timestamps to query are represented as `.Offset` in the `times`
|
||||||
|
// argument and the looked up offsets are represented as `.Offset` in the returned
|
||||||
|
// `offsets` list.
|
||||||
|
//
|
||||||
|
// The function will block for at most timeoutMs milliseconds.
|
||||||
|
//
|
||||||
|
// Duplicate Topic+Partitions are not supported.
|
||||||
|
// Per-partition errors may be returned in the `.Error` field.
|
||||||
|
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
return offsetsForTimes(c, times, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// Subscription returns the current subscription as set by Subscribe()
|
||||||
|
func (c *Consumer) Subscription() (topics []string, err error) { |
||||||
|
var cTopics *C.rd_kafka_topic_partition_list_t |
||||||
|
|
||||||
|
cErr := C.rd_kafka_subscription(c.handle.rk, &cTopics) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cTopics) |
||||||
|
|
||||||
|
topicCnt := int(cTopics.cnt) |
||||||
|
topics = make([]string, topicCnt) |
||||||
|
for i := 0; i < topicCnt; i++ { |
||||||
|
crktpar := C._c_rdkafka_topic_partition_list_entry(cTopics, |
||||||
|
C.int(i)) |
||||||
|
topics[i] = C.GoString(crktpar.topic) |
||||||
|
} |
||||||
|
|
||||||
|
return topics, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Assignment returns the current partition assignments
|
||||||
|
func (c *Consumer) Assignment() (partitions []TopicPartition, err error) { |
||||||
|
var cParts *C.rd_kafka_topic_partition_list_t |
||||||
|
|
||||||
|
cErr := C.rd_kafka_assignment(c.handle.rk, &cParts) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cParts) |
||||||
|
|
||||||
|
partitions = newTopicPartitionsFromCparts(cParts) |
||||||
|
|
||||||
|
return partitions, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Committed retrieves committed offsets for the given set of partitions
|
||||||
|
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_committed(c.handle.rk, cparts, C.int(timeoutMs)) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cerr) |
||||||
|
} |
||||||
|
|
||||||
|
return newTopicPartitionsFromCparts(cparts), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Position returns the current consume position for the given partitions.
|
||||||
|
// Typical use is to call Assignment() to get the partition list
|
||||||
|
// and then pass it to Position() to get the current consume position for
|
||||||
|
// each of the assigned partitions.
|
||||||
|
// The consume position is the next message to read from the partition.
|
||||||
|
// i.e., the offset of the last message seen by the application + 1.
|
||||||
|
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_position(c.handle.rk, cparts) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cerr) |
||||||
|
} |
||||||
|
|
||||||
|
return newTopicPartitionsFromCparts(cparts), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Pause consumption for the provided list of partitions
|
||||||
|
//
|
||||||
|
// Note that messages already enqueued on the consumer's Event channel
|
||||||
|
// (if `go.events.channel.enable` has been set) will NOT be purged by
|
||||||
|
// this call, set `go.events.channel.size` accordingly.
|
||||||
|
func (c *Consumer) Pause(partitions []TopicPartition) (err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_pause_partitions(c.handle.rk, cparts) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cerr) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Resume consumption for the provided list of partitions
|
||||||
|
func (c *Consumer) Resume(partitions []TopicPartition) (err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_resume_partitions(c.handle.rk, cparts) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cerr) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerToken sets the the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
|
||||||
|
// on success, otherwise an error if:
|
||||||
|
// 1) the token data is invalid (meaning an expiration time in the past
|
||||||
|
// or either a token value or an extension key or value that does not meet
|
||||||
|
// the regular expression requirements as per
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1);
|
||||||
|
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { |
||||||
|
return c.handle.setOAuthBearerToken(oauthBearerToken) |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerTokenFailure sets the error message describing why token
|
||||||
|
// retrieval/setting failed; it also schedules a new token refresh event for 10
|
||||||
|
// seconds later so the attempt may be retried. It will return nil on
|
||||||
|
// success, otherwise an error if:
|
||||||
|
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error { |
||||||
|
return c.handle.setOAuthBearerTokenFailure(errstr) |
||||||
|
} |
||||||
|
|
||||||
|
// ConsumerGroupMetadata reflects the current consumer group member metadata.
|
||||||
|
type ConsumerGroupMetadata struct { |
||||||
|
serialized []byte |
||||||
|
} |
||||||
|
|
||||||
|
// serializeConsumerGroupMetadata converts a C metadata object to its
|
||||||
|
// binary representation so we don't have to hold on to the C object,
|
||||||
|
// which would require an explicit .Close().
|
||||||
|
func serializeConsumerGroupMetadata(cgmd *C.rd_kafka_consumer_group_metadata_t) ([]byte, error) { |
||||||
|
var cBuffer *C.void |
||||||
|
var cSize C.size_t |
||||||
|
cError := C.rd_kafka_consumer_group_metadata_write(cgmd, |
||||||
|
(*unsafe.Pointer)(unsafe.Pointer(&cBuffer)), &cSize) |
||||||
|
if cError != nil { |
||||||
|
return nil, newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
defer C.rd_kafka_mem_free(nil, unsafe.Pointer(cBuffer)) |
||||||
|
|
||||||
|
return C.GoBytes(unsafe.Pointer(cBuffer), C.int(cSize)), nil |
||||||
|
} |
||||||
|
|
||||||
|
// deserializeConsumerGroupMetadata converts a serialized metadata object
|
||||||
|
// back to a C object.
|
||||||
|
func deserializeConsumerGroupMetadata(serialized []byte) (*C.rd_kafka_consumer_group_metadata_t, error) { |
||||||
|
var cgmd *C.rd_kafka_consumer_group_metadata_t |
||||||
|
|
||||||
|
cSerialized := C.CBytes(serialized) |
||||||
|
defer C.free(cSerialized) |
||||||
|
|
||||||
|
cError := C.rd_kafka_consumer_group_metadata_read( |
||||||
|
&cgmd, cSerialized, C.size_t(len(serialized))) |
||||||
|
if cError != nil { |
||||||
|
return nil, newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return cgmd, nil |
||||||
|
} |
||||||
|
|
||||||
|
// GetConsumerGroupMetadata returns the consumer's current group metadata.
|
||||||
|
// This object should be passed to the transactional producer's
|
||||||
|
// SendOffsetsToTransaction() API.
|
||||||
|
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error) { |
||||||
|
cgmd := C.rd_kafka_consumer_group_metadata(c.handle.rk) |
||||||
|
if cgmd == nil { |
||||||
|
return nil, NewError(ErrState, "Consumer group metadata not available", false) |
||||||
|
} |
||||||
|
defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) |
||||||
|
|
||||||
|
serialized, err := serializeConsumerGroupMetadata(cgmd) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return &ConsumerGroupMetadata{serialized}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// NewTestConsumerGroupMetadata creates a new consumer group metadata instance
|
||||||
|
// mainly for testing use.
|
||||||
|
// Use GetConsumerGroupMetadata() to retrieve the real metadata.
|
||||||
|
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error) { |
||||||
|
cGroupID := C.CString(groupID) |
||||||
|
defer C.free(unsafe.Pointer(cGroupID)) |
||||||
|
|
||||||
|
cgmd := C.rd_kafka_consumer_group_metadata_new(cGroupID) |
||||||
|
if cgmd == nil { |
||||||
|
return nil, NewError(ErrInvalidArg, "Failed to create metadata object", false) |
||||||
|
} |
||||||
|
|
||||||
|
defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) |
||||||
|
serialized, err := serializeConsumerGroupMetadata(cgmd) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return &ConsumerGroupMetadata{serialized}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// cEventToRebalanceEvent returns an Event (AssignedPartitions or RevokedPartitions)
|
||||||
|
// based on the specified rkev.
|
||||||
|
func cEventToRebalanceEvent(rkev *C.rd_kafka_event_t) Event { |
||||||
|
if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS { |
||||||
|
var ev AssignedPartitions |
||||||
|
ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
return ev |
||||||
|
} else if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS { |
||||||
|
var ev RevokedPartitions |
||||||
|
ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
return ev |
||||||
|
} else { |
||||||
|
panic(fmt.Sprintf("Unable to create rebalance event from C type %s", |
||||||
|
C.GoString(C.rd_kafka_err2name(C.rd_kafka_event_error(rkev))))) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// handleRebalanceEvent handles a assign/rebalance rebalance event.
|
||||||
|
//
|
||||||
|
// If the app provided a RebalanceCb to Subscribe*() or
|
||||||
|
// has go.application.rebalance.enable=true we create an event
|
||||||
|
// and forward it to the application thru the RebalanceCb or the
|
||||||
|
// Events channel respectively.
|
||||||
|
// Since librdkafka requires the rebalance event to be "acked" by
|
||||||
|
// the application (by calling *assign()) to synchronize state we keep track
|
||||||
|
// of if the application performed *Assign() or *Unassign(), but this only
|
||||||
|
// works for the non-channel case. For the channel case we assume the
|
||||||
|
// application calls *Assign() or *Unassign().
|
||||||
|
// Failure to do so will "hang" the consumer, e.g., it wont start consuming
|
||||||
|
// and it wont close cleanly, so this error case should be visible
|
||||||
|
// immediately to the application developer.
|
||||||
|
//
|
||||||
|
// In the polling case (not channel based consumer) the rebalance event
|
||||||
|
// is returned in retval, else nil is returned.
|
||||||
|
|
||||||
|
func (c *Consumer) handleRebalanceEvent(channel chan Event, rkev *C.rd_kafka_event_t) (retval Event) { |
||||||
|
|
||||||
|
var ev Event |
||||||
|
|
||||||
|
if c.rebalanceCb != nil || c.appRebalanceEnable { |
||||||
|
// Application has a rebalance callback or has enabled
|
||||||
|
// rebalances on the events channel, create the appropriate Event.
|
||||||
|
ev = cEventToRebalanceEvent(rkev) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
if channel != nil && c.appRebalanceEnable && c.rebalanceCb == nil { |
||||||
|
// Channel-based consumer with rebalancing enabled,
|
||||||
|
// return the rebalance event and rely on the application
|
||||||
|
// to call *Assign() / *Unassign().
|
||||||
|
return ev |
||||||
|
} |
||||||
|
|
||||||
|
// Call the application's rebalance callback, if any.
|
||||||
|
if c.rebalanceCb != nil { |
||||||
|
// Mark .appReassigned as false to keep track of whether the
|
||||||
|
// application called *Assign() / *Unassign().
|
||||||
|
c.appReassigned = false |
||||||
|
|
||||||
|
c.rebalanceCb(c, ev) |
||||||
|
|
||||||
|
if c.appReassigned { |
||||||
|
// Rebalance event handled by application.
|
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Either there was no rebalance callback, or the application
|
||||||
|
// did not call *Assign / *Unassign, so we need to do it.
|
||||||
|
|
||||||
|
isCooperative := c.GetRebalanceProtocol() == "COOPERATIVE" |
||||||
|
var cError *C.rd_kafka_error_t |
||||||
|
var cErr C.rd_kafka_resp_err_t |
||||||
|
|
||||||
|
if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS { |
||||||
|
// Assign partitions
|
||||||
|
if isCooperative { |
||||||
|
cError = C.rd_kafka_incremental_assign( |
||||||
|
c.handle.rk, |
||||||
|
C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
} else { |
||||||
|
cErr = C.rd_kafka_assign( |
||||||
|
c.handle.rk, |
||||||
|
C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
} |
||||||
|
} else { |
||||||
|
// Revoke partitions
|
||||||
|
|
||||||
|
if isCooperative { |
||||||
|
cError = C.rd_kafka_incremental_unassign( |
||||||
|
c.handle.rk, |
||||||
|
C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
} else { |
||||||
|
cErr = C.rd_kafka_assign(c.handle.rk, nil) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// If the *assign() call returned error, forward it to the
|
||||||
|
// the consumer's Events() channel for visibility.
|
||||||
|
if cError != nil { |
||||||
|
c.events <- newErrorFromCErrorDestroy(cError) |
||||||
|
} else if cErr != 0 { |
||||||
|
c.events <- newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,31 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2019 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// Timeout returns the remaining time after which work done on behalf of this context should be
|
||||||
|
// canceled, or ok==false if no deadline/timeout is set.
|
||||||
|
func timeout(ctx context.Context) (timeout time.Duration, ok bool) { |
||||||
|
if deadline, ok := ctx.Deadline(); ok { |
||||||
|
return deadline.Sub(time.Now()), true |
||||||
|
} |
||||||
|
return 0, false |
||||||
|
} |
@ -0,0 +1,154 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// Automatically generate error codes from librdkafka
|
||||||
|
// See README for instructions
|
||||||
|
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
|
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
// Error provides a Kafka-specific error container
|
||||||
|
type Error struct { |
||||||
|
code ErrorCode |
||||||
|
str string |
||||||
|
fatal bool |
||||||
|
retriable bool |
||||||
|
txnRequiresAbort bool |
||||||
|
} |
||||||
|
|
||||||
|
func newError(code C.rd_kafka_resp_err_t) (err Error) { |
||||||
|
return Error{code: ErrorCode(code)} |
||||||
|
} |
||||||
|
|
||||||
|
// NewError creates a new Error.
|
||||||
|
func NewError(code ErrorCode, str string, fatal bool) (err Error) { |
||||||
|
return Error{code: code, str: str, fatal: fatal} |
||||||
|
} |
||||||
|
|
||||||
|
func newErrorFromString(code ErrorCode, str string) (err Error) { |
||||||
|
return Error{code: code, str: str} |
||||||
|
} |
||||||
|
|
||||||
|
func newErrorFromCString(code C.rd_kafka_resp_err_t, cstr *C.char) (err Error) { |
||||||
|
var str string |
||||||
|
if cstr != nil { |
||||||
|
str = C.GoString(cstr) |
||||||
|
} else { |
||||||
|
str = "" |
||||||
|
} |
||||||
|
return Error{code: ErrorCode(code), str: str} |
||||||
|
} |
||||||
|
|
||||||
|
func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) { |
||||||
|
return newErrorFromString(ErrorCode(code), str) |
||||||
|
} |
||||||
|
|
||||||
|
// newErrorFromCError creates a new Error instance and destroys
|
||||||
|
// the passed cError.
|
||||||
|
func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { |
||||||
|
defer C.rd_kafka_error_destroy(cError) |
||||||
|
return Error{ |
||||||
|
code: ErrorCode(C.rd_kafka_error_code(cError)), |
||||||
|
str: C.GoString(C.rd_kafka_error_string(cError)), |
||||||
|
fatal: cint2bool(C.rd_kafka_error_is_fatal(cError)), |
||||||
|
retriable: cint2bool(C.rd_kafka_error_is_retriable(cError)), |
||||||
|
txnRequiresAbort: cint2bool(C.rd_kafka_error_txn_requires_abort(cError)), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Error returns a human readable representation of an Error
|
||||||
|
// Same as Error.String()
|
||||||
|
func (e Error) Error() string { |
||||||
|
return e.String() |
||||||
|
} |
||||||
|
|
||||||
|
// String returns a human readable representation of an Error
|
||||||
|
func (e Error) String() string { |
||||||
|
var errstr string |
||||||
|
if len(e.str) > 0 { |
||||||
|
errstr = e.str |
||||||
|
} else { |
||||||
|
errstr = e.code.String() |
||||||
|
} |
||||||
|
|
||||||
|
if e.IsFatal() { |
||||||
|
return fmt.Sprintf("Fatal error: %s", errstr) |
||||||
|
} |
||||||
|
|
||||||
|
return errstr |
||||||
|
} |
||||||
|
|
||||||
|
// Code returns the ErrorCode of an Error
|
||||||
|
func (e Error) Code() ErrorCode { |
||||||
|
return e.code |
||||||
|
} |
||||||
|
|
||||||
|
// IsFatal returns true if the error is a fatal error.
|
||||||
|
// A fatal error indicates the client instance is no longer operable and
|
||||||
|
// should be terminated. Typical causes include non-recoverable
|
||||||
|
// idempotent producer errors.
|
||||||
|
func (e Error) IsFatal() bool { |
||||||
|
return e.fatal |
||||||
|
} |
||||||
|
|
||||||
|
// IsRetriable returns true if the operation that caused this error
|
||||||
|
// may be retried.
|
||||||
|
// This flag is currently only set by the Transactional producer API.
|
||||||
|
func (e Error) IsRetriable() bool { |
||||||
|
return e.retriable |
||||||
|
} |
||||||
|
|
||||||
|
// TxnRequiresAbort returns true if the error is an abortable transaction error
|
||||||
|
// that requires the application to abort the current transaction with
|
||||||
|
// AbortTransaction() and start a new transaction with BeginTransaction()
|
||||||
|
// if it wishes to proceed with transactional operations.
|
||||||
|
// This flag is only set by the Transactional producer API.
|
||||||
|
func (e Error) TxnRequiresAbort() bool { |
||||||
|
return e.txnRequiresAbort |
||||||
|
} |
||||||
|
|
||||||
|
// getFatalError returns an Error object if the client instance has raised a fatal error, else nil.
|
||||||
|
func getFatalError(H Handle) error { |
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(512))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_fatal_error(H.gethandle().rk, cErrstr, 512) |
||||||
|
if int(cErr) == 0 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
err := newErrorFromCString(cErr, cErrstr) |
||||||
|
err.fatal = true |
||||||
|
|
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// testFatalError triggers a fatal error in the underlying client.
|
||||||
|
// This is to be used strictly for testing purposes.
|
||||||
|
func testFatalError(H Handle, code ErrorCode, str string) ErrorCode { |
||||||
|
return ErrorCode(C.rd_kafka_test_fatal_error(H.gethandle().rk, C.rd_kafka_resp_err_t(code), C.CString(str))) |
||||||
|
} |
@ -0,0 +1,112 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// Automatically generate error codes from librdkafka
|
||||||
|
// See README for instructions
|
||||||
|
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
|
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
static const char *errdesc_to_string (const struct rd_kafka_err_desc *ed, int idx) { |
||||||
|
return ed[idx].name; |
||||||
|
} |
||||||
|
|
||||||
|
static const char *errdesc_to_desc (const struct rd_kafka_err_desc *ed, int idx) { |
||||||
|
return ed[idx].desc; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// camelCase transforms a snake_case string to camelCase.
|
||||||
|
func camelCase(s string) string { |
||||||
|
ret := "" |
||||||
|
for _, v := range strings.Split(s, "_") { |
||||||
|
if len(v) == 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
ret += strings.ToUpper((string)(v[0])) + strings.ToLower(v[1:]) |
||||||
|
} |
||||||
|
return ret |
||||||
|
} |
||||||
|
|
||||||
|
// WriteErrorCodes writes Go error code constants to file from the
|
||||||
|
// librdkafka error codes.
|
||||||
|
// This function is not intended for public use.
|
||||||
|
func WriteErrorCodes(f *os.File) { |
||||||
|
f.WriteString("package kafka\n") |
||||||
|
now := time.Now() |
||||||
|
f.WriteString(fmt.Sprintf("// Copyright 2016-%d Confluent Inc.\n", now.Year())) |
||||||
|
f.WriteString(fmt.Sprintf("// AUTOMATICALLY GENERATED ON %v USING librdkafka %s\n", |
||||||
|
now, C.GoString(C.rd_kafka_version_str()))) |
||||||
|
|
||||||
|
var errdescs *C.struct_rd_kafka_err_desc |
||||||
|
var csize C.size_t |
||||||
|
C.rd_kafka_get_err_descs(&errdescs, &csize) |
||||||
|
|
||||||
|
f.WriteString(` |
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// ErrorCode is the integer representation of local and broker error codes
|
||||||
|
type ErrorCode int |
||||||
|
|
||||||
|
// String returns a human readable representation of an error code
|
||||||
|
func (c ErrorCode) String() string { |
||||||
|
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
`) |
||||||
|
|
||||||
|
for i := 0; i < int(csize); i++ { |
||||||
|
orig := C.GoString(C.errdesc_to_string(errdescs, C.int(i))) |
||||||
|
if len(orig) == 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
desc := C.GoString(C.errdesc_to_desc(errdescs, C.int(i))) |
||||||
|
if len(desc) == 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
errname := "Err" + camelCase(orig) |
||||||
|
|
||||||
|
// Special handling to please golint
|
||||||
|
// Eof -> EOF
|
||||||
|
// Id -> ID
|
||||||
|
errname = strings.Replace(errname, "Eof", "EOF", -1) |
||||||
|
errname = strings.Replace(errname, "Id", "ID", -1) |
||||||
|
|
||||||
|
f.WriteString(fmt.Sprintf(" // %s %s\n", errname, desc)) |
||||||
|
f.WriteString(fmt.Sprintf(" %s ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_%s)\n", |
||||||
|
errname, orig)) |
||||||
|
} |
||||||
|
|
||||||
|
f.WriteString(")\n") |
||||||
|
|
||||||
|
} |
@ -0,0 +1,316 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
|
||||||
|
|
||||||
|
void chdrs_to_tmphdrs (glue_msg_t *gMsg) { |
||||||
|
size_t i = 0; |
||||||
|
const char *name; |
||||||
|
const void *val; |
||||||
|
size_t size; |
||||||
|
rd_kafka_headers_t *chdrs; |
||||||
|
|
||||||
|
if (rd_kafka_message_headers(gMsg->msg, &chdrs)) { |
||||||
|
gMsg->tmphdrs = NULL; |
||||||
|
gMsg->tmphdrsCnt = 0; |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
gMsg->tmphdrsCnt = rd_kafka_header_cnt(chdrs); |
||||||
|
gMsg->tmphdrs = malloc(sizeof(*gMsg->tmphdrs) * gMsg->tmphdrsCnt); |
||||||
|
|
||||||
|
while (!rd_kafka_header_get_all(chdrs, i, |
||||||
|
&gMsg->tmphdrs[i].key, |
||||||
|
&gMsg->tmphdrs[i].val, |
||||||
|
(size_t *)&gMsg->tmphdrs[i].size)) |
||||||
|
i++; |
||||||
|
} |
||||||
|
|
||||||
|
rd_kafka_event_t *_rk_queue_poll (rd_kafka_queue_t *rkq, int timeoutMs, |
||||||
|
rd_kafka_event_type_t *evtype, |
||||||
|
glue_msg_t *gMsg, |
||||||
|
rd_kafka_event_t *prev_rkev) { |
||||||
|
rd_kafka_event_t *rkev; |
||||||
|
|
||||||
|
if (prev_rkev) |
||||||
|
rd_kafka_event_destroy(prev_rkev); |
||||||
|
|
||||||
|
rkev = rd_kafka_queue_poll(rkq, timeoutMs); |
||||||
|
*evtype = rd_kafka_event_type(rkev); |
||||||
|
|
||||||
|
if (*evtype == RD_KAFKA_EVENT_FETCH) { |
||||||
|
gMsg->msg = (rd_kafka_message_t *)rd_kafka_event_message_next(rkev); |
||||||
|
gMsg->ts = rd_kafka_message_timestamp(gMsg->msg, &gMsg->tstype); |
||||||
|
|
||||||
|
if (gMsg->want_hdrs) |
||||||
|
chdrs_to_tmphdrs(gMsg); |
||||||
|
} |
||||||
|
|
||||||
|
return rkev; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
func chdrsToTmphdrs(gMsg *C.glue_msg_t) { |
||||||
|
C.chdrs_to_tmphdrs(gMsg) |
||||||
|
} |
||||||
|
|
||||||
|
// Event generic interface
|
||||||
|
type Event interface { |
||||||
|
// String returns a human-readable representation of the event
|
||||||
|
String() string |
||||||
|
} |
||||||
|
|
||||||
|
// Specific event types
|
||||||
|
|
||||||
|
// Stats statistics event
|
||||||
|
type Stats struct { |
||||||
|
statsJSON string |
||||||
|
} |
||||||
|
|
||||||
|
func (e Stats) String() string { |
||||||
|
return e.statsJSON |
||||||
|
} |
||||||
|
|
||||||
|
// AssignedPartitions consumer group rebalance event: assigned partition set
|
||||||
|
type AssignedPartitions struct { |
||||||
|
Partitions []TopicPartition |
||||||
|
} |
||||||
|
|
||||||
|
func (e AssignedPartitions) String() string { |
||||||
|
return fmt.Sprintf("AssignedPartitions: %v", e.Partitions) |
||||||
|
} |
||||||
|
|
||||||
|
// RevokedPartitions consumer group rebalance event: revoked partition set
|
||||||
|
type RevokedPartitions struct { |
||||||
|
Partitions []TopicPartition |
||||||
|
} |
||||||
|
|
||||||
|
func (e RevokedPartitions) String() string { |
||||||
|
return fmt.Sprintf("RevokedPartitions: %v", e.Partitions) |
||||||
|
} |
||||||
|
|
||||||
|
// PartitionEOF consumer reached end of partition
|
||||||
|
// Needs to be explicitly enabled by setting the `enable.partition.eof`
|
||||||
|
// configuration property to true.
|
||||||
|
type PartitionEOF TopicPartition |
||||||
|
|
||||||
|
func (p PartitionEOF) String() string { |
||||||
|
return fmt.Sprintf("EOF at %s", TopicPartition(p)) |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetsCommitted reports committed offsets
|
||||||
|
type OffsetsCommitted struct { |
||||||
|
Error error |
||||||
|
Offsets []TopicPartition |
||||||
|
} |
||||||
|
|
||||||
|
func (o OffsetsCommitted) String() string { |
||||||
|
return fmt.Sprintf("OffsetsCommitted (%v, %v)", o.Error, o.Offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// OAuthBearerTokenRefresh indicates token refresh is required
|
||||||
|
type OAuthBearerTokenRefresh struct { |
||||||
|
// Config is the value of the sasl.oauthbearer.config property
|
||||||
|
Config string |
||||||
|
} |
||||||
|
|
||||||
|
func (o OAuthBearerTokenRefresh) String() string { |
||||||
|
return "OAuthBearerTokenRefresh" |
||||||
|
} |
||||||
|
|
||||||
|
// eventPoll polls an event from the handler's C rd_kafka_queue_t,
|
||||||
|
// translates it into an Event type and then sends on `channel` if non-nil, else returns the Event.
|
||||||
|
// term_chan is an optional channel to monitor along with producing to channel
|
||||||
|
// to indicate that `channel` is being terminated.
|
||||||
|
// returns (event Event, terminate Bool) tuple, where Terminate indicates
|
||||||
|
// if termChan received a termination event.
|
||||||
|
func (h *handle) eventPoll(channel chan Event, timeoutMs int, maxEvents int, termChan chan bool) (Event, bool) { |
||||||
|
|
||||||
|
var prevRkev *C.rd_kafka_event_t |
||||||
|
term := false |
||||||
|
|
||||||
|
var retval Event |
||||||
|
|
||||||
|
if channel == nil { |
||||||
|
maxEvents = 1 |
||||||
|
} |
||||||
|
out: |
||||||
|
for evcnt := 0; evcnt < maxEvents; evcnt++ { |
||||||
|
var evtype C.rd_kafka_event_type_t |
||||||
|
var gMsg C.glue_msg_t |
||||||
|
gMsg.want_hdrs = C.int8_t(bool2cint(h.msgFields.Headers)) |
||||||
|
rkev := C._rk_queue_poll(h.rkq, C.int(timeoutMs), &evtype, &gMsg, prevRkev) |
||||||
|
prevRkev = rkev |
||||||
|
timeoutMs = 0 |
||||||
|
|
||||||
|
retval = nil |
||||||
|
|
||||||
|
switch evtype { |
||||||
|
case C.RD_KAFKA_EVENT_FETCH: |
||||||
|
// Consumer fetch event, new message.
|
||||||
|
// Extracted into temporary gMsg for optimization
|
||||||
|
retval = h.newMessageFromGlueMsg(&gMsg) |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_REBALANCE: |
||||||
|
// Consumer rebalance event
|
||||||
|
retval = h.c.handleRebalanceEvent(channel, rkev) |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_ERROR: |
||||||
|
// Error event
|
||||||
|
cErr := C.rd_kafka_event_error(rkev) |
||||||
|
if cErr == C.RD_KAFKA_RESP_ERR__PARTITION_EOF { |
||||||
|
crktpar := C.rd_kafka_event_topic_partition(rkev) |
||||||
|
if crktpar == nil { |
||||||
|
break |
||||||
|
} |
||||||
|
|
||||||
|
defer C.rd_kafka_topic_partition_destroy(crktpar) |
||||||
|
var peof PartitionEOF |
||||||
|
setupTopicPartitionFromCrktpar((*TopicPartition)(&peof), crktpar) |
||||||
|
|
||||||
|
retval = peof |
||||||
|
|
||||||
|
} else if int(C.rd_kafka_event_error_is_fatal(rkev)) != 0 { |
||||||
|
// A fatal error has been raised.
|
||||||
|
// Extract the actual error from the client
|
||||||
|
// instance and return a new Error with
|
||||||
|
// fatal set to true.
|
||||||
|
cFatalErrstrSize := C.size_t(512) |
||||||
|
cFatalErrstr := (*C.char)(C.malloc(cFatalErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cFatalErrstr)) |
||||||
|
cFatalErr := C.rd_kafka_fatal_error(h.rk, cFatalErrstr, cFatalErrstrSize) |
||||||
|
fatalErr := newErrorFromCString(cFatalErr, cFatalErrstr) |
||||||
|
fatalErr.fatal = true |
||||||
|
retval = fatalErr |
||||||
|
|
||||||
|
} else { |
||||||
|
retval = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) |
||||||
|
} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_STATS: |
||||||
|
retval = &Stats{C.GoString(C.rd_kafka_event_stats(rkev))} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_DR: |
||||||
|
// Producer Delivery Report event
|
||||||
|
// Each such event contains delivery reports for all
|
||||||
|
// messages in the produced batch.
|
||||||
|
// Forward delivery reports to per-message's response channel
|
||||||
|
// or to the global Producer.Events channel, or none.
|
||||||
|
rkmessages := make([]*C.rd_kafka_message_t, int(C.rd_kafka_event_message_count(rkev))) |
||||||
|
|
||||||
|
cnt := int(C.rd_kafka_event_message_array(rkev, (**C.rd_kafka_message_t)(unsafe.Pointer(&rkmessages[0])), C.size_t(len(rkmessages)))) |
||||||
|
|
||||||
|
for _, rkmessage := range rkmessages[:cnt] { |
||||||
|
msg := h.newMessageFromC(rkmessage) |
||||||
|
var ch *chan Event |
||||||
|
|
||||||
|
if rkmessage._private != nil { |
||||||
|
// Find cgoif by id
|
||||||
|
cg, found := h.cgoGet((int)((uintptr)(rkmessage._private))) |
||||||
|
if found { |
||||||
|
cdr := cg.(cgoDr) |
||||||
|
|
||||||
|
if cdr.deliveryChan != nil { |
||||||
|
ch = &cdr.deliveryChan |
||||||
|
} |
||||||
|
msg.Opaque = cdr.opaque |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if ch == nil && h.fwdDr { |
||||||
|
ch = &channel |
||||||
|
} |
||||||
|
|
||||||
|
if ch != nil { |
||||||
|
select { |
||||||
|
case *ch <- msg: |
||||||
|
case <-termChan: |
||||||
|
retval = nil |
||||||
|
term = true |
||||||
|
break out |
||||||
|
} |
||||||
|
|
||||||
|
} else { |
||||||
|
retval = msg |
||||||
|
break out |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_OFFSET_COMMIT: |
||||||
|
// Offsets committed
|
||||||
|
cErr := C.rd_kafka_event_error(rkev) |
||||||
|
coffsets := C.rd_kafka_event_topic_partition_list(rkev) |
||||||
|
var offsets []TopicPartition |
||||||
|
if coffsets != nil { |
||||||
|
offsets = newTopicPartitionsFromCparts(coffsets) |
||||||
|
} |
||||||
|
|
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
retval = OffsetsCommitted{newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)), offsets} |
||||||
|
} else { |
||||||
|
retval = OffsetsCommitted{nil, offsets} |
||||||
|
} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: |
||||||
|
ev := OAuthBearerTokenRefresh{C.GoString(C.rd_kafka_event_config_string(rkev))} |
||||||
|
retval = ev |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_NONE: |
||||||
|
// poll timed out: no events available
|
||||||
|
break out |
||||||
|
|
||||||
|
default: |
||||||
|
if rkev != nil { |
||||||
|
fmt.Fprintf(os.Stderr, "Ignored event %s\n", |
||||||
|
C.GoString(C.rd_kafka_event_name(rkev))) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
if retval != nil { |
||||||
|
if channel != nil { |
||||||
|
select { |
||||||
|
case channel <- retval: |
||||||
|
case <-termChan: |
||||||
|
retval = nil |
||||||
|
term = true |
||||||
|
break out |
||||||
|
} |
||||||
|
} else { |
||||||
|
break out |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if prevRkev != nil { |
||||||
|
C.rd_kafka_event_destroy(prevRkev) |
||||||
|
} |
||||||
|
|
||||||
|
return retval, term |
||||||
|
} |
@ -0,0 +1,337 @@ |
|||||||
|
package kafka |
||||||
|
// Copyright 2016-2021 Confluent Inc.
|
||||||
|
// AUTOMATICALLY GENERATED ON 2021-12-08 12:44:39.243338672 +0100 CET m=+0.000248284 USING librdkafka 1.8.2
|
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// ErrorCode is the integer representation of local and broker error codes
|
||||||
|
type ErrorCode int |
||||||
|
|
||||||
|
// String returns a human readable representation of an error code
|
||||||
|
func (c ErrorCode) String() string { |
||||||
|
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
// ErrBadMsg Local: Bad message format
|
||||||
|
ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG) |
||||||
|
// ErrBadCompression Local: Invalid compressed data
|
||||||
|
ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION) |
||||||
|
// ErrDestroy Local: Broker handle destroyed
|
||||||
|
ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY) |
||||||
|
// ErrFail Local: Communication failure with broker
|
||||||
|
ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL) |
||||||
|
// ErrTransport Local: Broker transport failure
|
||||||
|
ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT) |
||||||
|
// ErrCritSysResource Local: Critical system resource failure
|
||||||
|
ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE) |
||||||
|
// ErrResolve Local: Host resolution failure
|
||||||
|
ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE) |
||||||
|
// ErrMsgTimedOut Local: Message timed out
|
||||||
|
ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) |
||||||
|
// ErrPartitionEOF Broker: No more messages
|
||||||
|
ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF) |
||||||
|
// ErrUnknownPartition Local: Unknown partition
|
||||||
|
ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) |
||||||
|
// ErrFs Local: File or filesystem error
|
||||||
|
ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS) |
||||||
|
// ErrUnknownTopic Local: Unknown topic
|
||||||
|
ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) |
||||||
|
// ErrAllBrokersDown Local: All broker connections are down
|
||||||
|
ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) |
||||||
|
// ErrInvalidArg Local: Invalid argument or configuration
|
||||||
|
ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG) |
||||||
|
// ErrTimedOut Local: Timed out
|
||||||
|
ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT) |
||||||
|
// ErrQueueFull Local: Queue full
|
||||||
|
ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL) |
||||||
|
// ErrIsrInsuff Local: ISR count insufficient
|
||||||
|
ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF) |
||||||
|
// ErrNodeUpdate Local: Broker node update
|
||||||
|
ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE) |
||||||
|
// ErrSsl Local: SSL error
|
||||||
|
ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL) |
||||||
|
// ErrWaitCoord Local: Waiting for coordinator
|
||||||
|
ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD) |
||||||
|
// ErrUnknownGroup Local: Unknown group
|
||||||
|
ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP) |
||||||
|
// ErrInProgress Local: Operation in progress
|
||||||
|
ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS) |
||||||
|
// ErrPrevInProgress Local: Previous operation in progress
|
||||||
|
ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS) |
||||||
|
// ErrExistingSubscription Local: Existing subscription
|
||||||
|
ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION) |
||||||
|
// ErrAssignPartitions Local: Assign partitions
|
||||||
|
ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) |
||||||
|
// ErrRevokePartitions Local: Revoke partitions
|
||||||
|
ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) |
||||||
|
// ErrConflict Local: Conflicting use
|
||||||
|
ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT) |
||||||
|
// ErrState Local: Erroneous state
|
||||||
|
ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE) |
||||||
|
// ErrUnknownProtocol Local: Unknown protocol
|
||||||
|
ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL) |
||||||
|
// ErrNotImplemented Local: Not implemented
|
||||||
|
ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) |
||||||
|
// ErrAuthentication Local: Authentication failure
|
||||||
|
ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION) |
||||||
|
// ErrNoOffset Local: No offset stored
|
||||||
|
ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET) |
||||||
|
// ErrOutdated Local: Outdated
|
||||||
|
ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED) |
||||||
|
// ErrTimedOutQueue Local: Timed out in queue
|
||||||
|
ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) |
||||||
|
// ErrUnsupportedFeature Local: Required feature not supported by broker
|
||||||
|
ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) |
||||||
|
// ErrWaitCache Local: Awaiting cache update
|
||||||
|
ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE) |
||||||
|
// ErrIntr Local: Operation interrupted
|
||||||
|
ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR) |
||||||
|
// ErrKeySerialization Local: Key serialization error
|
||||||
|
ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION) |
||||||
|
// ErrValueSerialization Local: Value serialization error
|
||||||
|
ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION) |
||||||
|
// ErrKeyDeserialization Local: Key deserialization error
|
||||||
|
ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION) |
||||||
|
// ErrValueDeserialization Local: Value deserialization error
|
||||||
|
ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION) |
||||||
|
// ErrPartial Local: Partial response
|
||||||
|
ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL) |
||||||
|
// ErrReadOnly Local: Read-only object
|
||||||
|
ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY) |
||||||
|
// ErrNoent Local: No such entry
|
||||||
|
ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT) |
||||||
|
// ErrUnderflow Local: Read underflow
|
||||||
|
ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW) |
||||||
|
// ErrInvalidType Local: Invalid type
|
||||||
|
ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE) |
||||||
|
// ErrRetry Local: Retry operation
|
||||||
|
ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY) |
||||||
|
// ErrPurgeQueue Local: Purged in queue
|
||||||
|
ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE) |
||||||
|
// ErrPurgeInflight Local: Purged in flight
|
||||||
|
ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT) |
||||||
|
// ErrFatal Local: Fatal error
|
||||||
|
ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL) |
||||||
|
// ErrInconsistent Local: Inconsistent state
|
||||||
|
ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT) |
||||||
|
// ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
|
||||||
|
ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) |
||||||
|
// ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
|
||||||
|
ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) |
||||||
|
// ErrUnknownBroker Local: Unknown broker
|
||||||
|
ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER) |
||||||
|
// ErrNotConfigured Local: Functionality not configured
|
||||||
|
ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED) |
||||||
|
// ErrFenced Local: This instance has been fenced by a newer instance
|
||||||
|
ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED) |
||||||
|
// ErrApplication Local: Application generated error
|
||||||
|
ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION) |
||||||
|
// ErrAssignmentLost Local: Group partition assignment lost
|
||||||
|
ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST) |
||||||
|
// ErrNoop Local: No operation performed
|
||||||
|
ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP) |
||||||
|
// ErrAutoOffsetReset Local: No offset to automatically reset to
|
||||||
|
ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) |
||||||
|
// ErrUnknown Unknown broker error
|
||||||
|
ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) |
||||||
|
// ErrNoError Success
|
||||||
|
ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR) |
||||||
|
// ErrOffsetOutOfRange Broker: Offset out of range
|
||||||
|
ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE) |
||||||
|
// ErrInvalidMsg Broker: Invalid message
|
||||||
|
ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG) |
||||||
|
// ErrUnknownTopicOrPart Broker: Unknown topic or partition
|
||||||
|
ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) |
||||||
|
// ErrInvalidMsgSize Broker: Invalid message size
|
||||||
|
ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE) |
||||||
|
// ErrLeaderNotAvailable Broker: Leader not available
|
||||||
|
ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) |
||||||
|
// ErrNotLeaderForPartition Broker: Not leader for partition
|
||||||
|
ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) |
||||||
|
// ErrRequestTimedOut Broker: Request timed out
|
||||||
|
ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT) |
||||||
|
// ErrBrokerNotAvailable Broker: Broker not available
|
||||||
|
ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE) |
||||||
|
// ErrReplicaNotAvailable Broker: Replica not available
|
||||||
|
ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE) |
||||||
|
// ErrMsgSizeTooLarge Broker: Message size too large
|
||||||
|
ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) |
||||||
|
// ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
|
||||||
|
ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH) |
||||||
|
// ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
|
||||||
|
ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE) |
||||||
|
// ErrNetworkException Broker: Broker disconnected before response received
|
||||||
|
ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION) |
||||||
|
// ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
|
||||||
|
ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS) |
||||||
|
// ErrCoordinatorNotAvailable Broker: Coordinator not available
|
||||||
|
ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE) |
||||||
|
// ErrNotCoordinator Broker: Not coordinator
|
||||||
|
ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR) |
||||||
|
// ErrTopicException Broker: Invalid topic
|
||||||
|
ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION) |
||||||
|
// ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
|
||||||
|
ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE) |
||||||
|
// ErrNotEnoughReplicas Broker: Not enough in-sync replicas
|
||||||
|
ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS) |
||||||
|
// ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
|
||||||
|
ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND) |
||||||
|
// ErrInvalidRequiredAcks Broker: Invalid required acks value
|
||||||
|
ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS) |
||||||
|
// ErrIllegalGeneration Broker: Specified group generation id is not valid
|
||||||
|
ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) |
||||||
|
// ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
|
||||||
|
ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL) |
||||||
|
// ErrInvalidGroupID Broker: Invalid group.id
|
||||||
|
ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID) |
||||||
|
// ErrUnknownMemberID Broker: Unknown member
|
||||||
|
ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) |
||||||
|
// ErrInvalidSessionTimeout Broker: Invalid session timeout
|
||||||
|
ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT) |
||||||
|
// ErrRebalanceInProgress Broker: Group rebalance in progress
|
||||||
|
ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS) |
||||||
|
// ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
|
||||||
|
ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE) |
||||||
|
// ErrTopicAuthorizationFailed Broker: Topic authorization failed
|
||||||
|
ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) |
||||||
|
// ErrGroupAuthorizationFailed Broker: Group authorization failed
|
||||||
|
ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED) |
||||||
|
// ErrClusterAuthorizationFailed Broker: Cluster authorization failed
|
||||||
|
ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED) |
||||||
|
// ErrInvalidTimestamp Broker: Invalid timestamp
|
||||||
|
ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP) |
||||||
|
// ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
|
||||||
|
ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM) |
||||||
|
// ErrIllegalSaslState Broker: Request not valid in current SASL state
|
||||||
|
ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE) |
||||||
|
// ErrUnsupportedVersion Broker: API version not supported
|
||||||
|
ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) |
||||||
|
// ErrTopicAlreadyExists Broker: Topic already exists
|
||||||
|
ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) |
||||||
|
// ErrInvalidPartitions Broker: Invalid number of partitions
|
||||||
|
ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS) |
||||||
|
// ErrInvalidReplicationFactor Broker: Invalid replication factor
|
||||||
|
ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR) |
||||||
|
// ErrInvalidReplicaAssignment Broker: Invalid replica assignment
|
||||||
|
ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT) |
||||||
|
// ErrInvalidConfig Broker: Configuration is invalid
|
||||||
|
ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG) |
||||||
|
// ErrNotController Broker: Not controller for cluster
|
||||||
|
ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER) |
||||||
|
// ErrInvalidRequest Broker: Invalid request
|
||||||
|
ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST) |
||||||
|
// ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
|
||||||
|
ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT) |
||||||
|
// ErrPolicyViolation Broker: Policy violation
|
||||||
|
ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION) |
||||||
|
// ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
|
||||||
|
ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER) |
||||||
|
// ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
|
||||||
|
ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER) |
||||||
|
// ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
|
||||||
|
ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) |
||||||
|
// ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
|
||||||
|
ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE) |
||||||
|
// ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
|
||||||
|
ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING) |
||||||
|
// ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
|
||||||
|
ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT) |
||||||
|
// ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
|
||||||
|
ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS) |
||||||
|
// ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
|
||||||
|
ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED) |
||||||
|
// ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
|
||||||
|
ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED) |
||||||
|
// ErrSecurityDisabled Broker: Security features are disabled
|
||||||
|
ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED) |
||||||
|
// ErrOperationNotAttempted Broker: Operation not attempted
|
||||||
|
ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED) |
||||||
|
// ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
|
||||||
|
ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR) |
||||||
|
// ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
|
||||||
|
ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND) |
||||||
|
// ErrSaslAuthenticationFailed Broker: SASL Authentication failed
|
||||||
|
ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED) |
||||||
|
// ErrUnknownProducerID Broker: Unknown Producer Id
|
||||||
|
ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) |
||||||
|
// ErrReassignmentInProgress Broker: Partition reassignment is in progress
|
||||||
|
ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS) |
||||||
|
// ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
|
||||||
|
ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED) |
||||||
|
// ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
|
||||||
|
ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND) |
||||||
|
// ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
|
||||||
|
ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH) |
||||||
|
// ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
|
||||||
|
ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED) |
||||||
|
// ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
|
||||||
|
ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED) |
||||||
|
// ErrDelegationTokenExpired Broker: Delegation Token is expired
|
||||||
|
ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED) |
||||||
|
// ErrInvalidPrincipalType Broker: Supplied principalType is not supported
|
||||||
|
ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE) |
||||||
|
// ErrNonEmptyGroup Broker: The group is not empty
|
||||||
|
ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP) |
||||||
|
// ErrGroupIDNotFound Broker: The group id does not exist
|
||||||
|
ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND) |
||||||
|
// ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
|
||||||
|
ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND) |
||||||
|
// ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
|
||||||
|
ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH) |
||||||
|
// ErrListenerNotFound Broker: No matching listener
|
||||||
|
ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND) |
||||||
|
// ErrTopicDeletionDisabled Broker: Topic deletion is disabled
|
||||||
|
ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED) |
||||||
|
// ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
|
||||||
|
ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH) |
||||||
|
// ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
|
||||||
|
ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) |
||||||
|
// ErrUnsupportedCompressionType Broker: Unsupported compression type
|
||||||
|
ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE) |
||||||
|
// ErrStaleBrokerEpoch Broker: Broker epoch has changed
|
||||||
|
ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH) |
||||||
|
// ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
|
||||||
|
ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) |
||||||
|
// ErrMemberIDRequired Broker: Group member needs a valid member ID
|
||||||
|
ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) |
||||||
|
// ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
|
||||||
|
ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE) |
||||||
|
// ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
|
||||||
|
ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED) |
||||||
|
// ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
|
||||||
|
ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) |
||||||
|
// ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
|
||||||
|
ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE) |
||||||
|
// ErrElectionNotNeeded Broker: Leader election not needed for topic partition
|
||||||
|
ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED) |
||||||
|
// ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
|
||||||
|
ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS) |
||||||
|
// ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
|
||||||
|
ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC) |
||||||
|
// ErrInvalidRecord Broker: Broker failed to validate record
|
||||||
|
ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD) |
||||||
|
// ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
|
||||||
|
ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) |
||||||
|
// ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
|
||||||
|
ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED) |
||||||
|
// ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
|
||||||
|
ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED) |
||||||
|
// ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
|
||||||
|
ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND) |
||||||
|
// ErrDuplicateResource Broker: Request illegally referred to the same resource twice
|
||||||
|
ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE) |
||||||
|
// ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
|
||||||
|
ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL) |
||||||
|
// ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
|
||||||
|
ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET) |
||||||
|
// ErrInvalidUpdateVersion Broker: Invalid update version
|
||||||
|
ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION) |
||||||
|
// ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
|
||||||
|
ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED) |
||||||
|
// ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
|
||||||
|
ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE) |
||||||
|
) |
@ -0,0 +1,48 @@ |
|||||||
|
/**
|
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
#pragma once |
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Glue between Go, Cgo and librdkafka |
||||||
|
*/ |
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Temporary C to Go header representation |
||||||
|
*/ |
||||||
|
typedef struct tmphdr_s { |
||||||
|
const char *key; |
||||||
|
const void *val; // producer: malloc()ed by Go code if size > 0
|
||||||
|
// consumer: owned by librdkafka
|
||||||
|
ssize_t size; |
||||||
|
} tmphdr_t; |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @struct This is a glue struct used by the C code in this client to |
||||||
|
* effectively map fields from a librdkafka rd_kafka_message_t |
||||||
|
* to something usable in Go with as few CGo calls as possible. |
||||||
|
*/ |
||||||
|
typedef struct glue_msg_s { |
||||||
|
rd_kafka_message_t *msg; |
||||||
|
rd_kafka_timestamp_type_t tstype; |
||||||
|
int64_t ts; |
||||||
|
tmphdr_t *tmphdrs; |
||||||
|
size_t tmphdrsCnt; |
||||||
|
int8_t want_hdrs; /**< If true, copy headers */ |
||||||
|
} glue_msg_t; |
@ -0,0 +1,379 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include <stdlib.h> |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// OAuthBearerToken represents the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication.
|
||||||
|
type OAuthBearerToken struct { |
||||||
|
// Token value, often (but not necessarily) a JWS compact serialization
|
||||||
|
// as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
|
||||||
|
// the regular expression for a SASL/OAUTHBEARER value defined at
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1
|
||||||
|
TokenValue string |
||||||
|
// Metadata about the token indicating when it expires (local time);
|
||||||
|
// it must represent a time in the future
|
||||||
|
Expiration time.Time |
||||||
|
// Metadata about the token indicating the Kafka principal name
|
||||||
|
// to which it applies (for example, "admin")
|
||||||
|
Principal string |
||||||
|
// SASL extensions, if any, to be communicated to the broker during
|
||||||
|
// authentication (all keys and values of which must meet the regular
|
||||||
|
// expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
|
||||||
|
// and it must not contain the reserved "auth" key)
|
||||||
|
Extensions map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
// Handle represents a generic client handle containing common parts for
|
||||||
|
// both Producer and Consumer.
|
||||||
|
type Handle interface { |
||||||
|
// SetOAuthBearerToken sets the the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
|
||||||
|
// on success, otherwise an error if:
|
||||||
|
// 1) the token data is invalid (meaning an expiration time in the past
|
||||||
|
// or either a token value or an extension key or value that does not meet
|
||||||
|
// the regular expression requirements as per
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1);
|
||||||
|
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error |
||||||
|
|
||||||
|
// SetOAuthBearerTokenFailure sets the error message describing why token
|
||||||
|
// retrieval/setting failed; it also schedules a new token refresh event for 10
|
||||||
|
// seconds later so the attempt may be retried. It will return nil on
|
||||||
|
// success, otherwise an error if:
|
||||||
|
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
SetOAuthBearerTokenFailure(errstr string) error |
||||||
|
|
||||||
|
// gethandle() returns the internal handle struct pointer
|
||||||
|
gethandle() *handle |
||||||
|
} |
||||||
|
|
||||||
|
// Common instance handle for both Producer and Consumer
|
||||||
|
type handle struct { |
||||||
|
rk *C.rd_kafka_t |
||||||
|
rkq *C.rd_kafka_queue_t |
||||||
|
|
||||||
|
// Forward logs from librdkafka log queue to logs channel.
|
||||||
|
logs chan LogEvent |
||||||
|
logq *C.rd_kafka_queue_t |
||||||
|
closeLogsChan bool |
||||||
|
|
||||||
|
// Topic <-> rkt caches
|
||||||
|
rktCacheLock sync.Mutex |
||||||
|
// topic name -> rkt cache
|
||||||
|
rktCache map[string]*C.rd_kafka_topic_t |
||||||
|
// rkt -> topic name cache
|
||||||
|
rktNameCache map[*C.rd_kafka_topic_t]string |
||||||
|
|
||||||
|
// Cached instance name to avoid CGo call in String()
|
||||||
|
name string |
||||||
|
|
||||||
|
//
|
||||||
|
// cgo map
|
||||||
|
// Maps C callbacks based on cgoid back to its Go object
|
||||||
|
cgoLock sync.Mutex |
||||||
|
cgoidNext uintptr |
||||||
|
cgomap map[int]cgoif |
||||||
|
|
||||||
|
//
|
||||||
|
// producer
|
||||||
|
//
|
||||||
|
p *Producer |
||||||
|
|
||||||
|
// Forward delivery reports on Producer.Events channel
|
||||||
|
fwdDr bool |
||||||
|
|
||||||
|
// Enabled message fields for delivery reports and consumed messages.
|
||||||
|
msgFields *messageFields |
||||||
|
|
||||||
|
//
|
||||||
|
// consumer
|
||||||
|
//
|
||||||
|
c *Consumer |
||||||
|
|
||||||
|
// WaitGroup to wait for spawned go-routines to finish.
|
||||||
|
waitGroup sync.WaitGroup |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) String() string { |
||||||
|
return h.name |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) setup() { |
||||||
|
h.rktCache = make(map[string]*C.rd_kafka_topic_t) |
||||||
|
h.rktNameCache = make(map[*C.rd_kafka_topic_t]string) |
||||||
|
h.cgomap = make(map[int]cgoif) |
||||||
|
h.name = C.GoString(C.rd_kafka_name(h.rk)) |
||||||
|
if h.msgFields == nil { |
||||||
|
h.msgFields = newMessageFields() |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) cleanup() { |
||||||
|
if h.logs != nil { |
||||||
|
C.rd_kafka_queue_destroy(h.logq) |
||||||
|
if h.closeLogsChan { |
||||||
|
close(h.logs) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
for _, crkt := range h.rktCache { |
||||||
|
C.rd_kafka_topic_destroy(crkt) |
||||||
|
} |
||||||
|
|
||||||
|
if h.rkq != nil { |
||||||
|
C.rd_kafka_queue_destroy(h.rkq) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) setupLogQueue(logsChan chan LogEvent, termChan chan bool) { |
||||||
|
if logsChan == nil { |
||||||
|
logsChan = make(chan LogEvent, 10000) |
||||||
|
h.closeLogsChan = true |
||||||
|
} |
||||||
|
|
||||||
|
h.logs = logsChan |
||||||
|
|
||||||
|
// Let librdkafka forward logs to our log queue instead of the main queue
|
||||||
|
h.logq = C.rd_kafka_queue_new(h.rk) |
||||||
|
C.rd_kafka_set_log_queue(h.rk, h.logq) |
||||||
|
|
||||||
|
// Start a polling goroutine to consume the log queue
|
||||||
|
h.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
h.pollLogEvents(h.logs, 100, termChan) |
||||||
|
h.waitGroup.Done() |
||||||
|
}() |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// getRkt0 finds or creates and returns a C topic_t object from the local cache.
|
||||||
|
func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) { |
||||||
|
if doLock { |
||||||
|
h.rktCacheLock.Lock() |
||||||
|
defer h.rktCacheLock.Unlock() |
||||||
|
} |
||||||
|
crkt, ok := h.rktCache[topic] |
||||||
|
if ok { |
||||||
|
return crkt |
||||||
|
} |
||||||
|
|
||||||
|
if ctopic == nil { |
||||||
|
ctopic = C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
} |
||||||
|
|
||||||
|
crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil) |
||||||
|
if crkt == nil { |
||||||
|
panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s", |
||||||
|
topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error())))) |
||||||
|
} |
||||||
|
|
||||||
|
h.rktCache[topic] = crkt |
||||||
|
h.rktNameCache[crkt] = topic |
||||||
|
|
||||||
|
return crkt |
||||||
|
} |
||||||
|
|
||||||
|
// getRkt finds or creates and returns a C topic_t object from the local cache.
|
||||||
|
func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) { |
||||||
|
return h.getRkt0(topic, nil, true) |
||||||
|
} |
||||||
|
|
||||||
|
// getTopicNameFromRkt returns the topic name for a C topic_t object, preferably
|
||||||
|
// using the local cache to avoid a cgo call.
|
||||||
|
func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) { |
||||||
|
h.rktCacheLock.Lock() |
||||||
|
defer h.rktCacheLock.Unlock() |
||||||
|
|
||||||
|
topic, ok := h.rktNameCache[crkt] |
||||||
|
if ok { |
||||||
|
return topic |
||||||
|
} |
||||||
|
|
||||||
|
// we need our own copy/refcount of the crkt
|
||||||
|
ctopic := C.rd_kafka_topic_name(crkt) |
||||||
|
topic = C.GoString(ctopic) |
||||||
|
|
||||||
|
crkt = h.getRkt0(topic, ctopic, false /* dont lock */) |
||||||
|
|
||||||
|
return topic |
||||||
|
} |
||||||
|
|
||||||
|
// cgoif is a generic interface for holding Go state passed as opaque
|
||||||
|
// value to the C code.
|
||||||
|
// Since pointers to complex Go types cannot be passed to C we instead create
|
||||||
|
// a cgoif object, generate a unique id that is added to the cgomap,
|
||||||
|
// and then pass that id to the C code. When the C code callback is called we
|
||||||
|
// use the id to look up the cgoif object in the cgomap.
|
||||||
|
type cgoif interface{} |
||||||
|
|
||||||
|
// delivery report cgoif container
|
||||||
|
type cgoDr struct { |
||||||
|
deliveryChan chan Event |
||||||
|
opaque interface{} |
||||||
|
} |
||||||
|
|
||||||
|
// cgoPut adds object cg to the handle's cgo map and returns a
|
||||||
|
// unique id for the added entry.
|
||||||
|
// Thread-safe.
|
||||||
|
// FIXME: the uniquity of the id is questionable over time.
|
||||||
|
func (h *handle) cgoPut(cg cgoif) (cgoid int) { |
||||||
|
h.cgoLock.Lock() |
||||||
|
defer h.cgoLock.Unlock() |
||||||
|
|
||||||
|
h.cgoidNext++ |
||||||
|
if h.cgoidNext == 0 { |
||||||
|
h.cgoidNext++ |
||||||
|
} |
||||||
|
cgoid = (int)(h.cgoidNext) |
||||||
|
h.cgomap[cgoid] = cg |
||||||
|
return cgoid |
||||||
|
} |
||||||
|
|
||||||
|
// cgoGet looks up cgoid in the cgo map, deletes the reference from the map
|
||||||
|
// and returns the object, if found. Else returns nil, false.
|
||||||
|
// Thread-safe.
|
||||||
|
func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) { |
||||||
|
if cgoid == 0 { |
||||||
|
return nil, false |
||||||
|
} |
||||||
|
|
||||||
|
h.cgoLock.Lock() |
||||||
|
defer h.cgoLock.Unlock() |
||||||
|
cg, found = h.cgomap[cgoid] |
||||||
|
if found { |
||||||
|
delete(h.cgomap, cgoid) |
||||||
|
} |
||||||
|
|
||||||
|
return cg, found |
||||||
|
} |
||||||
|
|
||||||
|
// setOauthBearerToken - see rd_kafka_oauthbearer_set_token()
|
||||||
|
func (h *handle) setOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { |
||||||
|
cTokenValue := C.CString(oauthBearerToken.TokenValue) |
||||||
|
defer C.free(unsafe.Pointer(cTokenValue)) |
||||||
|
|
||||||
|
cPrincipal := C.CString(oauthBearerToken.Principal) |
||||||
|
defer C.free(unsafe.Pointer(cPrincipal)) |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cExtensions := make([]*C.char, 2*len(oauthBearerToken.Extensions)) |
||||||
|
extensionSize := 0 |
||||||
|
for key, value := range oauthBearerToken.Extensions { |
||||||
|
cExtensions[extensionSize] = C.CString(key) |
||||||
|
defer C.free(unsafe.Pointer(cExtensions[extensionSize])) |
||||||
|
extensionSize++ |
||||||
|
cExtensions[extensionSize] = C.CString(value) |
||||||
|
defer C.free(unsafe.Pointer(cExtensions[extensionSize])) |
||||||
|
extensionSize++ |
||||||
|
} |
||||||
|
|
||||||
|
var cExtensionsToUse **C.char |
||||||
|
if extensionSize > 0 { |
||||||
|
cExtensionsToUse = (**C.char)(unsafe.Pointer(&cExtensions[0])) |
||||||
|
} |
||||||
|
|
||||||
|
cErr := C.rd_kafka_oauthbearer_set_token(h.rk, cTokenValue, |
||||||
|
C.int64_t(oauthBearerToken.Expiration.UnixNano()/(1000*1000)), cPrincipal, |
||||||
|
cExtensionsToUse, C.size_t(extensionSize), cErrstr, cErrstrSize) |
||||||
|
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return newErrorFromCString(cErr, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
// setOauthBearerTokenFailure - see rd_kafka_oauthbearer_set_token_failure()
|
||||||
|
func (h *handle) setOAuthBearerTokenFailure(errstr string) error { |
||||||
|
cerrstr := C.CString(errstr) |
||||||
|
defer C.free(unsafe.Pointer(cerrstr)) |
||||||
|
cErr := C.rd_kafka_oauthbearer_set_token_failure(h.rk, cerrstr) |
||||||
|
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
// messageFields controls which fields are made available for producer delivery reports & consumed messages.
|
||||||
|
// true values indicate that the field should be included
|
||||||
|
type messageFields struct { |
||||||
|
Key bool |
||||||
|
Value bool |
||||||
|
Headers bool |
||||||
|
} |
||||||
|
|
||||||
|
// disableAll disable all fields
|
||||||
|
func (mf *messageFields) disableAll() { |
||||||
|
mf.Key = false |
||||||
|
mf.Value = false |
||||||
|
mf.Headers = false |
||||||
|
} |
||||||
|
|
||||||
|
// newMessageFields returns a new messageFields with all fields enabled
|
||||||
|
func newMessageFields() *messageFields { |
||||||
|
return &messageFields{ |
||||||
|
Key: true, |
||||||
|
Value: true, |
||||||
|
Headers: true, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// newMessageFieldsFrom constructs a new messageFields from the given configuration value
|
||||||
|
func newMessageFieldsFrom(v ConfigValue) (*messageFields, error) { |
||||||
|
msgFields := newMessageFields() |
||||||
|
switch v { |
||||||
|
case "all": |
||||||
|
// nothing to do
|
||||||
|
case "", "none": |
||||||
|
msgFields.disableAll() |
||||||
|
default: |
||||||
|
msgFields.disableAll() |
||||||
|
for _, value := range strings.Split(v.(string), ",") { |
||||||
|
switch value { |
||||||
|
case "key": |
||||||
|
msgFields.Key = true |
||||||
|
case "value": |
||||||
|
msgFields.Value = true |
||||||
|
case "headers": |
||||||
|
msgFields.Headers = true |
||||||
|
default: |
||||||
|
return nil, fmt.Errorf("unknown message field: %s", value) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return msgFields, nil |
||||||
|
} |
@ -0,0 +1,67 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2018 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"strconv" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <string.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// Header represents a single Kafka message header.
|
||||||
|
//
|
||||||
|
// Message headers are made up of a list of Header elements, retaining their original insert
|
||||||
|
// order and allowing for duplicate Keys.
|
||||||
|
//
|
||||||
|
// Key is a human readable string identifying the header.
|
||||||
|
// Value is the key's binary value, Kafka does not put any restrictions on the format of
|
||||||
|
// of the Value but it should be made relatively compact.
|
||||||
|
// The value may be a byte array, empty, or nil.
|
||||||
|
//
|
||||||
|
// NOTE: Message headers are not available on producer delivery report messages.
|
||||||
|
type Header struct { |
||||||
|
Key string // Header name (utf-8 string)
|
||||||
|
Value []byte // Header value (nil, empty, or binary)
|
||||||
|
} |
||||||
|
|
||||||
|
// String returns the Header Key and data in a human representable possibly truncated form
|
||||||
|
// suitable for displaying to the user.
|
||||||
|
func (h Header) String() string { |
||||||
|
if h.Value == nil { |
||||||
|
return fmt.Sprintf("%s=nil", h.Key) |
||||||
|
} |
||||||
|
|
||||||
|
valueLen := len(h.Value) |
||||||
|
if valueLen == 0 { |
||||||
|
return fmt.Sprintf("%s=<empty>", h.Key) |
||||||
|
} |
||||||
|
|
||||||
|
truncSize := valueLen |
||||||
|
trunc := "" |
||||||
|
if valueLen > 50+15 { |
||||||
|
truncSize = 50 |
||||||
|
trunc = fmt.Sprintf("(%d more bytes)", valueLen-truncSize) |
||||||
|
} |
||||||
|
|
||||||
|
return fmt.Sprintf("%s=%s%s", h.Key, strconv.Quote(string(h.Value[:truncSize])), trunc) |
||||||
|
} |
@ -0,0 +1,375 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// Package kafka provides high-level Apache Kafka producer and consumers
|
||||||
|
// using bindings on-top of the librdkafka C library.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// High-level Consumer
|
||||||
|
//
|
||||||
|
// * Decide if you want to read messages and events by calling `.Poll()` or
|
||||||
|
// the deprecated option of using the `.Events()` channel. (If you want to use
|
||||||
|
// `.Events()` channel then set `"go.events.channel.enable": true`).
|
||||||
|
//
|
||||||
|
// * Create a Consumer with `kafka.NewConsumer()` providing at
|
||||||
|
// least the `bootstrap.servers` and `group.id` configuration properties.
|
||||||
|
//
|
||||||
|
// * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics)
|
||||||
|
// to join the group with the specified subscription set.
|
||||||
|
// Subscriptions are atomic, calling `.Subscribe*()` again will leave
|
||||||
|
// the group and rejoin with the new set of topics.
|
||||||
|
//
|
||||||
|
// * Start reading events and messages from either the `.Events` channel
|
||||||
|
// or by calling `.Poll()`.
|
||||||
|
//
|
||||||
|
// * When the group has rebalanced each client member is assigned a
|
||||||
|
// (sub-)set of topic+partitions.
|
||||||
|
// By default the consumer will start fetching messages for its assigned
|
||||||
|
// partitions at this point, but your application may enable rebalance
|
||||||
|
// events to get an insight into what the assigned partitions where
|
||||||
|
// as well as set the initial offsets. To do this you need to pass
|
||||||
|
// `"go.application.rebalance.enable": true` to the `NewConsumer()` call
|
||||||
|
// mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event
|
||||||
|
// with the assigned partition set. You can optionally modify the initial
|
||||||
|
// offsets (they'll default to stored offsets and if there are no previously stored
|
||||||
|
// offsets it will fall back to `"auto.offset.reset"`
|
||||||
|
// which defaults to the `latest` message) and then call `.Assign(partitions)`
|
||||||
|
// to start consuming. If you don't need to modify the initial offsets you will
|
||||||
|
// not need to call `.Assign()`, the client will do so automatically for you if
|
||||||
|
// you dont, unless you are using the channel-based consumer in which case
|
||||||
|
// you MUST call `.Assign()` when receiving the `AssignedPartitions` and
|
||||||
|
// `RevokedPartitions` events.
|
||||||
|
//
|
||||||
|
// * As messages are fetched they will be made available on either the
|
||||||
|
// `.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`.
|
||||||
|
//
|
||||||
|
// * Handle messages, events and errors to your liking.
|
||||||
|
//
|
||||||
|
// * When you are done consuming call `.Close()` to commit final offsets
|
||||||
|
// and leave the consumer group.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Producer
|
||||||
|
//
|
||||||
|
// * Create a Producer with `kafka.NewProducer()` providing at least
|
||||||
|
// the `bootstrap.servers` configuration properties.
|
||||||
|
//
|
||||||
|
// * Messages may now be produced either by sending a `*kafka.Message`
|
||||||
|
// on the `.ProduceChannel` or by calling `.Produce()`.
|
||||||
|
//
|
||||||
|
// * Producing is an asynchronous operation so the client notifies the application
|
||||||
|
// of per-message produce success or failure through something called delivery reports.
|
||||||
|
// Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message`
|
||||||
|
// and you should check `msg.TopicPartition.Error` for `nil` to find out if the message
|
||||||
|
// was succesfully delivered or not.
|
||||||
|
// It is also possible to direct delivery reports to alternate channels
|
||||||
|
// by providing a non-nil `chan Event` channel to `.Produce()`.
|
||||||
|
// If no delivery reports are wanted they can be completely disabled by
|
||||||
|
// setting configuration property `"go.delivery.reports": false`.
|
||||||
|
//
|
||||||
|
// * When you are done producing messages you will need to make sure all messages
|
||||||
|
// are indeed delivered to the broker (or failed), remember that this is
|
||||||
|
// an asynchronous client so some of your messages may be lingering in internal
|
||||||
|
// channels or tranmission queues.
|
||||||
|
// To do this you can either keep track of the messages you've produced
|
||||||
|
// and wait for their corresponding delivery reports, or call the convenience
|
||||||
|
// function `.Flush()` that will block until all message deliveries are done
|
||||||
|
// or the provided timeout elapses.
|
||||||
|
//
|
||||||
|
// * Finally call `.Close()` to decommission the producer.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Transactional producer API
|
||||||
|
//
|
||||||
|
// The transactional producer operates on top of the idempotent producer,
|
||||||
|
// and provides full exactly-once semantics (EOS) for Apache Kafka when used
|
||||||
|
// with the transaction aware consumer (`isolation.level=read_committed`).
|
||||||
|
//
|
||||||
|
// A producer instance is configured for transactions by setting the
|
||||||
|
// `transactional.id` to an identifier unique for the application. This
|
||||||
|
// id will be used to fence stale transactions from previous instances of
|
||||||
|
// the application, typically following an outage or crash.
|
||||||
|
//
|
||||||
|
// After creating the transactional producer instance using `NewProducer()`
|
||||||
|
// the transactional state must be initialized by calling
|
||||||
|
// `InitTransactions()`. This is a blocking call that will
|
||||||
|
// acquire a runtime producer id from the transaction coordinator broker
|
||||||
|
// as well as abort any stale transactions and fence any still running producer
|
||||||
|
// instances with the same `transactional.id`.
|
||||||
|
//
|
||||||
|
// Once transactions are initialized the application may begin a new
|
||||||
|
// transaction by calling `BeginTransaction()`.
|
||||||
|
// A producer instance may only have one single on-going transaction.
|
||||||
|
//
|
||||||
|
// Any messages produced after the transaction has been started will
|
||||||
|
// belong to the ongoing transaction and will be committed or aborted
|
||||||
|
// atomically.
|
||||||
|
// It is not permitted to produce messages outside a transaction
|
||||||
|
// boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`,
|
||||||
|
// `AbortTransaction()` or if the current transaction has failed.
|
||||||
|
//
|
||||||
|
// If consumed messages are used as input to the transaction, the consumer
|
||||||
|
// instance must be configured with `enable.auto.commit` set to `false`.
|
||||||
|
// To commit the consumed offsets along with the transaction pass the
|
||||||
|
// list of consumed partitions and the last offset processed + 1 to
|
||||||
|
// `SendOffsetsToTransaction()` prior to committing the transaction.
|
||||||
|
// This allows an aborted transaction to be restarted using the previously
|
||||||
|
// committed offsets.
|
||||||
|
//
|
||||||
|
// To commit the produced messages, and any consumed offsets, to the
|
||||||
|
// current transaction, call `CommitTransaction()`.
|
||||||
|
// This call will block until the transaction has been fully committed or
|
||||||
|
// failed (typically due to fencing by a newer producer instance).
|
||||||
|
//
|
||||||
|
// Alternatively, if processing fails, or an abortable transaction error is
|
||||||
|
// raised, the transaction needs to be aborted by calling
|
||||||
|
// `AbortTransaction()` which marks any produced messages and
|
||||||
|
// offset commits as aborted.
|
||||||
|
//
|
||||||
|
// After the current transaction has been committed or aborted a new
|
||||||
|
// transaction may be started by calling `BeginTransaction()` again.
|
||||||
|
//
|
||||||
|
// Retriable errors:
|
||||||
|
// Some error cases allow the attempted operation to be retried, this is
|
||||||
|
// indicated by the error object having the retriable flag set which can
|
||||||
|
// be detected by calling `err.(kafka.Error).IsRetriable()`.
|
||||||
|
// When this flag is set the application may retry the operation immediately
|
||||||
|
// or preferably after a shorter grace period (to avoid busy-looping).
|
||||||
|
// Retriable errors include timeouts, broker transport failures, etc.
|
||||||
|
//
|
||||||
|
// Abortable errors:
|
||||||
|
// An ongoing transaction may fail permanently due to various errors,
|
||||||
|
// such as transaction coordinator becoming unavailable, write failures to the
|
||||||
|
// Apache Kafka log, under-replicated partitions, etc.
|
||||||
|
// At this point the producer application must abort the current transaction
|
||||||
|
// using `AbortTransaction()` and optionally start a new transaction
|
||||||
|
// by calling `BeginTransaction()`.
|
||||||
|
// Whether an error is abortable or not is detected by calling
|
||||||
|
// `err.(kafka.Error).TxnRequiresAbort()` on the returned error object.
|
||||||
|
//
|
||||||
|
// Fatal errors:
|
||||||
|
// While the underlying idempotent producer will typically only raise
|
||||||
|
// fatal errors for unrecoverable cluster errors where the idempotency
|
||||||
|
// guarantees can't be maintained, most of these are treated as abortable by
|
||||||
|
// the transactional producer since transactions may be aborted and retried
|
||||||
|
// in their entirety;
|
||||||
|
// The transactional producer on the other hand introduces a set of additional
|
||||||
|
// fatal errors which the application needs to handle by shutting down the
|
||||||
|
// producer and terminate. There is no way for a producer instance to recover
|
||||||
|
// from fatal errors.
|
||||||
|
// Whether an error is fatal or not is detected by calling
|
||||||
|
// `err.(kafka.Error).IsFatal()` on the returned error object or by checking
|
||||||
|
// the global `GetFatalError()`.
|
||||||
|
//
|
||||||
|
// Handling of other errors:
|
||||||
|
// For errors that have neither retriable, abortable or the fatal flag set
|
||||||
|
// it is not always obvious how to handle them. While some of these errors
|
||||||
|
// may be indicative of bugs in the application code, such as when
|
||||||
|
// an invalid parameter is passed to a method, other errors might originate
|
||||||
|
// from the broker and be passed thru as-is to the application.
|
||||||
|
// The general recommendation is to treat these errors, that have
|
||||||
|
// neither the retriable or abortable flags set, as fatal.
|
||||||
|
//
|
||||||
|
// Error handling example:
|
||||||
|
// retry:
|
||||||
|
//
|
||||||
|
// err := producer.CommitTransaction(...)
|
||||||
|
// if err == nil {
|
||||||
|
// return nil
|
||||||
|
// } else if err.(kafka.Error).TxnRequiresAbort() {
|
||||||
|
// do_abort_transaction_and_reset_inputs()
|
||||||
|
// } else if err.(kafka.Error).IsRetriable() {
|
||||||
|
// goto retry
|
||||||
|
// } else { // treat all other errors as fatal errors
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Events
|
||||||
|
//
|
||||||
|
// Apart from emitting messages and delivery reports the client also communicates
|
||||||
|
// with the application through a number of different event types.
|
||||||
|
// An application may choose to handle or ignore these events.
|
||||||
|
//
|
||||||
|
// Consumer events
|
||||||
|
//
|
||||||
|
// * `*kafka.Message` - a fetched message.
|
||||||
|
//
|
||||||
|
// * `AssignedPartitions` - The assigned partition set for this client following a rebalance.
|
||||||
|
// Requires `go.application.rebalance.enable`
|
||||||
|
//
|
||||||
|
// * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance.
|
||||||
|
// `AssignedPartitions` and `RevokedPartitions` are symmetrical.
|
||||||
|
// Requires `go.application.rebalance.enable`
|
||||||
|
//
|
||||||
|
// * `PartitionEOF` - Consumer has reached the end of a partition.
|
||||||
|
// NOTE: The consumer will keep trying to fetch new messages for the partition.
|
||||||
|
//
|
||||||
|
// * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled).
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Producer events
|
||||||
|
//
|
||||||
|
// * `*kafka.Message` - delivery report for produced message.
|
||||||
|
// Check `.TopicPartition.Error` for delivery result.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Generic events for both Consumer and Producer
|
||||||
|
//
|
||||||
|
// * `KafkaError` - client (error codes are prefixed with _) or broker error.
|
||||||
|
// These errors are normally just informational since the
|
||||||
|
// client will try its best to automatically recover (eventually).
|
||||||
|
//
|
||||||
|
// * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required.
|
||||||
|
// This event only occurs with sasl.mechanism=OAUTHBEARER.
|
||||||
|
// Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient
|
||||||
|
// instance when a successful token retrieval is completed, otherwise be sure to
|
||||||
|
// invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or
|
||||||
|
// if setting the token failed, which could happen if an extension doesn't meet
|
||||||
|
// the required regular expression); invoking SetOAuthBearerTokenFailure() will
|
||||||
|
// schedule a new event for 10 seconds later so another retrieval can be attempted.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Hint: If your application registers a signal notification
|
||||||
|
// (signal.Notify) makes sure the signals channel is buffered to avoid
|
||||||
|
// possible complications with blocking Poll() calls.
|
||||||
|
//
|
||||||
|
// Note: The Confluent Kafka Go client is safe for concurrent use.
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
// Make sure librdkafka_vendor/ sub-directory is included in vendor pulls.
|
||||||
|
_ "github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include <string.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { |
||||||
|
return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// PartitionAny represents any partition (for partitioning),
|
||||||
|
// or unspecified value (for all other cases)
|
||||||
|
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA) |
||||||
|
|
||||||
|
// TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset.
|
||||||
|
type TopicPartition struct { |
||||||
|
Topic *string |
||||||
|
Partition int32 |
||||||
|
Offset Offset |
||||||
|
Metadata *string |
||||||
|
Error error |
||||||
|
} |
||||||
|
|
||||||
|
func (p TopicPartition) String() string { |
||||||
|
topic := "<null>" |
||||||
|
if p.Topic != nil { |
||||||
|
topic = *p.Topic |
||||||
|
} |
||||||
|
if p.Error != nil { |
||||||
|
return fmt.Sprintf("%s[%d]@%s(%s)", |
||||||
|
topic, p.Partition, p.Offset, p.Error) |
||||||
|
} |
||||||
|
return fmt.Sprintf("%s[%d]@%s", |
||||||
|
topic, p.Partition, p.Offset) |
||||||
|
} |
||||||
|
|
||||||
|
// TopicPartitions is a slice of TopicPartitions that also implements
|
||||||
|
// the sort interface
|
||||||
|
type TopicPartitions []TopicPartition |
||||||
|
|
||||||
|
func (tps TopicPartitions) Len() int { |
||||||
|
return len(tps) |
||||||
|
} |
||||||
|
|
||||||
|
func (tps TopicPartitions) Less(i, j int) bool { |
||||||
|
if *tps[i].Topic < *tps[j].Topic { |
||||||
|
return true |
||||||
|
} else if *tps[i].Topic > *tps[j].Topic { |
||||||
|
return false |
||||||
|
} |
||||||
|
return tps[i].Partition < tps[j].Partition |
||||||
|
} |
||||||
|
|
||||||
|
func (tps TopicPartitions) Swap(i, j int) { |
||||||
|
tps[i], tps[j] = tps[j], tps[i] |
||||||
|
} |
||||||
|
|
||||||
|
// new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t
|
||||||
|
// from a TopicPartition array.
|
||||||
|
func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) { |
||||||
|
cparts = C.rd_kafka_topic_partition_list_new(C.int(len(partitions))) |
||||||
|
for _, part := range partitions { |
||||||
|
ctopic := C.CString(*part.Topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
rktpar := C.rd_kafka_topic_partition_list_add(cparts, ctopic, C.int32_t(part.Partition)) |
||||||
|
rktpar.offset = C.int64_t(part.Offset) |
||||||
|
|
||||||
|
if part.Metadata != nil { |
||||||
|
cmetadata := C.CString(*part.Metadata) |
||||||
|
rktpar.metadata = unsafe.Pointer(cmetadata) |
||||||
|
rktpar.metadata_size = C.size_t(len(*part.Metadata)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return cparts |
||||||
|
} |
||||||
|
|
||||||
|
func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kafka_topic_partition_t) { |
||||||
|
|
||||||
|
topic := C.GoString(crktpar.topic) |
||||||
|
partition.Topic = &topic |
||||||
|
partition.Partition = int32(crktpar.partition) |
||||||
|
partition.Offset = Offset(crktpar.offset) |
||||||
|
if crktpar.metadata_size > 0 { |
||||||
|
size := C.int(crktpar.metadata_size) |
||||||
|
cstr := (*C.char)(unsafe.Pointer(crktpar.metadata)) |
||||||
|
metadata := C.GoStringN(cstr, size) |
||||||
|
partition.Metadata = &metadata |
||||||
|
} |
||||||
|
if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
partition.Error = newError(crktpar.err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) { |
||||||
|
|
||||||
|
partcnt := int(cparts.cnt) |
||||||
|
|
||||||
|
partitions = make([]TopicPartition, partcnt) |
||||||
|
for i := 0; i < partcnt; i++ { |
||||||
|
crktpar := C._c_rdkafka_topic_partition_list_entry(cparts, C.int(i)) |
||||||
|
setupTopicPartitionFromCrktpar(&partitions[i], crktpar) |
||||||
|
} |
||||||
|
|
||||||
|
return partitions |
||||||
|
} |
||||||
|
|
||||||
|
// LibraryVersion returns the underlying librdkafka library version as a
|
||||||
|
// (version_int, version_str) tuple.
|
||||||
|
func LibraryVersion() (int, string) { |
||||||
|
ver := (int)(C.rd_kafka_version()) |
||||||
|
verstr := C.GoString(C.rd_kafka_version_str()) |
||||||
|
return ver, verstr |
||||||
|
} |
@ -0,0 +1,3 @@ |
|||||||
|
*.tar.gz |
||||||
|
*.tgz |
||||||
|
tmp* |
@ -0,0 +1,366 @@ |
|||||||
|
LICENSE |
||||||
|
-------------------------------------------------------------- |
||||||
|
librdkafka - Apache Kafka C driver library |
||||||
|
|
||||||
|
Copyright (c) 2012-2020, Magnus Edenhill |
||||||
|
All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are met: |
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, |
||||||
|
this list of conditions and the following disclaimer. |
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice, |
||||||
|
this list of conditions and the following disclaimer in the documentation |
||||||
|
and/or other materials provided with the distribution. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||||||
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||||||
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||||||
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||||||
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||||||
|
POSSIBILITY OF SUCH DAMAGE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.crc32c |
||||||
|
-------------------------------------------------------------- |
||||||
|
# For src/crc32c.c copied (with modifications) from |
||||||
|
# http://stackoverflow.com/a/17646775/1821055 |
||||||
|
|
||||||
|
/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction |
||||||
|
* Copyright (C) 2013 Mark Adler |
||||||
|
* Version 1.1 1 Aug 2013 Mark Adler |
||||||
|
*/ |
||||||
|
|
||||||
|
/* |
||||||
|
This software is provided 'as-is', without any express or implied |
||||||
|
warranty. In no event will the author be held liable for any damages |
||||||
|
arising from the use of this software. |
||||||
|
|
||||||
|
Permission is granted to anyone to use this software for any purpose, |
||||||
|
including commercial applications, and to alter it and redistribute it |
||||||
|
freely, subject to the following restrictions: |
||||||
|
|
||||||
|
1. The origin of this software must not be misrepresented; you must not |
||||||
|
claim that you wrote the original software. If you use this software |
||||||
|
in a product, an acknowledgment in the product documentation would be |
||||||
|
appreciated but is not required. |
||||||
|
2. Altered source versions must be plainly marked as such, and must not be |
||||||
|
misrepresented as being the original software. |
||||||
|
3. This notice may not be removed or altered from any source distribution. |
||||||
|
|
||||||
|
Mark Adler |
||||||
|
madler@alumni.caltech.edu |
||||||
|
*/ |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.fnv1a |
||||||
|
-------------------------------------------------------------- |
||||||
|
parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c |
||||||
|
|
||||||
|
|
||||||
|
Please do not copyright this code. This code is in the public domain. |
||||||
|
|
||||||
|
LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, |
||||||
|
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO |
||||||
|
EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR |
||||||
|
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF |
||||||
|
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR |
||||||
|
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
||||||
|
PERFORMANCE OF THIS SOFTWARE. |
||||||
|
|
||||||
|
By: |
||||||
|
chongo <Landon Curt Noll> /\oo/\ |
||||||
|
http://www.isthe.com/chongo/ |
||||||
|
|
||||||
|
Share and Enjoy! :-) |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.hdrhistogram |
||||||
|
-------------------------------------------------------------- |
||||||
|
This license covers src/rdhdrhistogram.c which is a C port of |
||||||
|
Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram |
||||||
|
at revision 3a0bb77429bd3a61596f5e8a3172445844342120 |
||||||
|
|
||||||
|
----------------------------------------------------------------------------- |
||||||
|
|
||||||
|
The MIT License (MIT) |
||||||
|
|
||||||
|
Copyright (c) 2014 Coda Hale |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in |
||||||
|
all copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||||
|
THE SOFTWARE |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.lz4 |
||||||
|
-------------------------------------------------------------- |
||||||
|
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3 |
||||||
|
|
||||||
|
LZ4 Library |
||||||
|
Copyright (c) 2011-2016, Yann Collet |
||||||
|
All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification, |
||||||
|
are permitted provided that the following conditions are met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this |
||||||
|
list of conditions and the following disclaimer. |
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice, this |
||||||
|
list of conditions and the following disclaimer in the documentation and/or |
||||||
|
other materials provided with the distribution. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR |
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
||||||
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.murmur2 |
||||||
|
-------------------------------------------------------------- |
||||||
|
parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git |
||||||
|
|
||||||
|
|
||||||
|
MurMurHash2 Library |
||||||
|
//----------------------------------------------------------------------------- |
||||||
|
// MurmurHash2 was written by Austin Appleby, and is placed in the public |
||||||
|
// domain. The author hereby disclaims copyright to this source code. |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all |
||||||
|
copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||||
|
SOFTWARE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.pycrc |
||||||
|
-------------------------------------------------------------- |
||||||
|
The following license applies to the files rdcrc32.c and rdcrc32.h which |
||||||
|
have been generated by the pycrc tool. |
||||||
|
============================================================================ |
||||||
|
|
||||||
|
Copyright (c) 2006-2012, Thomas Pircher <tehpeh@gmx.net> |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in |
||||||
|
all copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||||
|
THE SOFTWARE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.queue |
||||||
|
-------------------------------------------------------------- |
||||||
|
For sys/queue.h: |
||||||
|
|
||||||
|
* Copyright (c) 1991, 1993 |
||||||
|
* The Regents of the University of California. All rights reserved. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or without |
||||||
|
* modification, are permitted provided that the following conditions |
||||||
|
* are met: |
||||||
|
* 1. Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer in the |
||||||
|
* documentation and/or other materials provided with the distribution. |
||||||
|
* 4. Neither the name of the University nor the names of its contributors |
||||||
|
* may be used to endorse or promote products derived from this software |
||||||
|
* without specific prior written permission. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
||||||
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
||||||
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
||||||
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
||||||
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
||||||
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
||||||
|
* SUCH DAMAGE. |
||||||
|
* |
||||||
|
* @(#)queue.h 8.5 (Berkeley) 8/20/94 |
||||||
|
* $FreeBSD$ |
||||||
|
|
||||||
|
LICENSE.regexp |
||||||
|
-------------------------------------------------------------- |
||||||
|
regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684 |
||||||
|
|
||||||
|
" |
||||||
|
These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution. |
||||||
|
" |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.snappy |
||||||
|
-------------------------------------------------------------- |
||||||
|
###################################################################### |
||||||
|
# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h # |
||||||
|
# originally retrieved from http://github.com/andikleen/snappy-c # |
||||||
|
# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 # |
||||||
|
###################################################################### |
||||||
|
|
||||||
|
The snappy-c code is under the same license as the original snappy source |
||||||
|
|
||||||
|
Copyright 2011 Intel Corporation All Rights Reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are |
||||||
|
met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright |
||||||
|
notice, this list of conditions and the following disclaimer. |
||||||
|
* Redistributions in binary form must reproduce the above |
||||||
|
copyright notice, this list of conditions and the following disclaimer |
||||||
|
in the documentation and/or other materials provided with the |
||||||
|
distribution. |
||||||
|
* Neither the name of Intel Corporation nor the names of its |
||||||
|
contributors may be used to endorse or promote products derived from |
||||||
|
this software without specific prior written permission. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
LICENSE.tinycthread |
||||||
|
-------------------------------------------------------------- |
||||||
|
From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9 |
||||||
|
|
||||||
|
License |
||||||
|
------- |
||||||
|
|
||||||
|
Copyright (c) 2012 Marcus Geelnard |
||||||
|
2013-2014 Evan Nemerson |
||||||
|
|
||||||
|
This software is provided 'as-is', without any express or implied |
||||||
|
warranty. In no event will the authors be held liable for any damages |
||||||
|
arising from the use of this software. |
||||||
|
|
||||||
|
Permission is granted to anyone to use this software for any purpose, |
||||||
|
including commercial applications, and to alter it and redistribute it |
||||||
|
freely, subject to the following restrictions: |
||||||
|
|
||||||
|
1. The origin of this software must not be misrepresented; you must not |
||||||
|
claim that you wrote the original software. If you use this software |
||||||
|
in a product, an acknowledgment in the product documentation would be |
||||||
|
appreciated but is not required. |
||||||
|
|
||||||
|
2. Altered source versions must be plainly marked as such, and must not be |
||||||
|
misrepresented as being the original software. |
||||||
|
|
||||||
|
3. This notice may not be removed or altered from any source |
||||||
|
distribution. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.wingetopt |
||||||
|
-------------------------------------------------------------- |
||||||
|
For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt |
||||||
|
|
||||||
|
/* |
||||||
|
* Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com> |
||||||
|
* |
||||||
|
* Permission to use, copy, modify, and distribute this software for any |
||||||
|
* purpose with or without fee is hereby granted, provided that the above |
||||||
|
* copyright notice and this permission notice appear in all copies. |
||||||
|
* |
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
||||||
|
* |
||||||
|
* Sponsored in part by the Defense Advanced Research Projects |
||||||
|
* Agency (DARPA) and Air Force Research Laboratory, Air Force |
||||||
|
* Materiel Command, USAF, under agreement number F39502-99-1-0512. |
||||||
|
*/ |
||||||
|
/*- |
||||||
|
* Copyright (c) 2000 The NetBSD Foundation, Inc. |
||||||
|
* All rights reserved. |
||||||
|
* |
||||||
|
* This code is derived from software contributed to The NetBSD Foundation |
||||||
|
* by Dieter Baron and Thomas Klausner. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or without |
||||||
|
* modification, are permitted provided that the following conditions |
||||||
|
* are met: |
||||||
|
* 1. Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer in the |
||||||
|
* documentation and/or other materials provided with the distribution. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
||||||
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
||||||
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
||||||
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
||||||
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||||||
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||||||
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||||||
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||||||
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||||||
|
* POSSIBILITY OF SUCH DAMAGE. |
||||||
|
*/ |
||||||
|
|
||||||
|
|
@ -0,0 +1,24 @@ |
|||||||
|
# Bundling prebuilt librdkafka |
||||||
|
|
||||||
|
confluent-kafka-go bundles prebuilt statically linked |
||||||
|
versions of librdkafka for the following platforms: |
||||||
|
|
||||||
|
* MacOSX x64 (aka Darwin) |
||||||
|
* Linux glibc x64 (Ubuntu, CentOS, etc) |
||||||
|
* Linux musl x64 (Alpine) |
||||||
|
|
||||||
|
## Import static librdkafka bundle |
||||||
|
|
||||||
|
First create the static librdkafka bundle following the instructions in |
||||||
|
librdkafka's packaging/nuget/README.md. |
||||||
|
|
||||||
|
Then import the new version by using the import.sh script here, this script |
||||||
|
will create a branch, import the bundle, create a commit and push the |
||||||
|
branch to Github for PR review. This PR must be manually opened, reviewed |
||||||
|
and then finally merged (make sure to merge it, DO NOT squash or rebase). |
||||||
|
|
||||||
|
$ ./import.sh ~/path/to/librdkafka-static-bundle-v1.4.0.tgz |
||||||
|
|
||||||
|
This will copy the static library and the rdkafka.h header file |
||||||
|
to this directory, as well as generate a new ../build_..go file |
||||||
|
for this platform + variant. |
@ -0,0 +1,113 @@ |
|||||||
|
#!/bin/bash |
||||||
|
# |
||||||
|
# Updates the bundled prebuilt librdkafka libraries to specified version. |
||||||
|
# |
||||||
|
|
||||||
|
set -e |
||||||
|
|
||||||
|
|
||||||
|
usage() { |
||||||
|
echo "Usage: $0 librdkafka-static-bundle-<VERSION>.tgz" |
||||||
|
echo "" |
||||||
|
echo "This tool must be run from the TOPDIR/kafka/librdkafka_vendor directory" |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
parse_dynlibs() { |
||||||
|
# Parse dynamic libraries from pkg-config file, |
||||||
|
# both the ones specified with Libs: but also through Requires: |
||||||
|
local pc=$1 |
||||||
|
local libs= |
||||||
|
local req= |
||||||
|
local n= |
||||||
|
for req in $(grep ^Requires: $pc | sed -e 's/^Requires://'); do |
||||||
|
n=$(pkg-config --libs $req) |
||||||
|
if [[ $n == -l* ]]; then |
||||||
|
libs="${libs} $n" |
||||||
|
fi |
||||||
|
done |
||||||
|
for n in $(grep ^Libs: $pc); do |
||||||
|
if [[ $n == -l* ]]; then |
||||||
|
libs="${libs} $n" |
||||||
|
fi |
||||||
|
done |
||||||
|
|
||||||
|
echo "$libs" |
||||||
|
} |
||||||
|
|
||||||
|
setup_build() { |
||||||
|
# Copies static library from the temp directory into final location, |
||||||
|
# extracts dynamic lib list from the pkg-config file, |
||||||
|
# and generates the build_..go file |
||||||
|
local btype=$1 |
||||||
|
local apath=$2 |
||||||
|
local pc=$3 |
||||||
|
local srcinfo=$4 |
||||||
|
local build_tag= |
||||||
|
local gpath="../build_${btype}.go" |
||||||
|
local dpath="librdkafka_${btype}.a" |
||||||
|
|
||||||
|
if [[ $btype == glibc_linux ]]; then |
||||||
|
build_tag="// +build !musl" |
||||||
|
elif [[ $btype == musl_linux ]]; then |
||||||
|
build_tag="// +build musl" |
||||||
|
fi |
||||||
|
|
||||||
|
local dynlibs=$(parse_dynlibs $pc) |
||||||
|
|
||||||
|
echo "Copying $apath to $dpath" |
||||||
|
cp "$apath" "$dpath" |
||||||
|
|
||||||
|
echo "Generating $gpath (extra build tag: $build_tag)" |
||||||
|
|
||||||
|
cat >$gpath <<EOF |
||||||
|
// +build !dynamic |
||||||
|
$build_tag |
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB |
||||||
|
// #cgo LDFLAGS: \${SRCDIR}/librdkafka_vendor/${dpath} $dynlibs |
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client |
||||||
|
const LibrdkafkaLinkInfo = "static ${btype} from ${srcinfo}" |
||||||
|
EOF |
||||||
|
|
||||||
|
git add "$dpath" "$gpath" |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
bundle="$1" |
||||||
|
[[ -f $bundle ]] || usage |
||||||
|
|
||||||
|
bundlename=$(basename "$bundle") |
||||||
|
|
||||||
|
bdir=$(mktemp -d tmpXXXXXX) |
||||||
|
|
||||||
|
echo "Extracting bundle $bundle:" |
||||||
|
tar -xzvf "$bundle" -C "$bdir/" |
||||||
|
|
||||||
|
echo "Copying librdkafka files" |
||||||
|
for f in rdkafka.h LICENSES.txt ; do |
||||||
|
cp $bdir/$f . || true |
||||||
|
git add "$f" |
||||||
|
done |
||||||
|
|
||||||
|
|
||||||
|
for btype in glibc_linux musl_linux darwin windows ; do |
||||||
|
lib=$bdir/librdkafka_${btype}.a |
||||||
|
pc=${lib/%.a/.pc} |
||||||
|
[[ -f $lib ]] || (echo "Expected file $lib missing" ; exit 1) |
||||||
|
[[ -f $pc ]] || (echo "Expected file $pc missing" ; exit 1) |
||||||
|
|
||||||
|
setup_build $btype $lib $pc $bundlename |
||||||
|
done |
||||||
|
|
||||||
|
rm -rf "$bdir" |
||||||
|
|
||||||
|
echo "All done" |
@ -0,0 +1,110 @@ |
|||||||
|
#!/bin/bash |
||||||
|
# |
||||||
|
# |
||||||
|
# Import a new version of librdkafka based on a librdkafka static bundle. |
||||||
|
# This will create a separate branch, import librdkafka, make a commit, |
||||||
|
# and then ask you to push the branch to github, have it reviewed, |
||||||
|
# and then later merged (NOT squashed or rebased). |
||||||
|
# Having a merge per import allows future shallow clones to skip and ignore |
||||||
|
# older imports, hopefully reducing the amount of git history data 'go get' |
||||||
|
# needs to download. |
||||||
|
|
||||||
|
set -e |
||||||
|
|
||||||
|
usage() { |
||||||
|
echo "Usage: $0 [--devel] path/to/librdkafka-static-bundle-<VERSION>.tgz" |
||||||
|
echo "" |
||||||
|
echo "This tool must be run from the TOPDIR/kafka/librdkafka directory" |
||||||
|
echo "" |
||||||
|
echo "Options:" |
||||||
|
echo " --devel - Development use: No branch checks and does not push to github" |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
error_cleanup() { |
||||||
|
echo "Error occurred, cleaning up" |
||||||
|
git checkout $currbranch |
||||||
|
git branch -D $import_branch |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
devel=0 |
||||||
|
if [[ $1 == --devel ]]; then |
||||||
|
devel=1 |
||||||
|
shift |
||||||
|
fi |
||||||
|
|
||||||
|
bundle="$1" |
||||||
|
[[ -f $bundle ]] || usage |
||||||
|
|
||||||
|
# Parse the librdkafka version from the bundle |
||||||
|
bundlename=$(basename $bundle) |
||||||
|
version=${bundlename#librdkafka-static-bundle-} |
||||||
|
version=${version%.tgz} |
||||||
|
|
||||||
|
if [[ -z $version ]]; then |
||||||
|
echo "Error: Could not parse version from bundle $bundle" |
||||||
|
exit 1 |
||||||
|
fi |
||||||
|
|
||||||
|
# Verify branch state |
||||||
|
curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-) |
||||||
|
uncommitted=$(git status --untracked-files=no --porcelain) |
||||||
|
|
||||||
|
if [[ $devel != 1 ]] && ( [[ $curr_branch != master ]] || [[ ! -z $uncommitted ]] ); then |
||||||
|
echo "Error: This script must be run on an up-to-date, clean, master branch" |
||||||
|
if [[ ! -z $uncommitted ]]; then |
||||||
|
echo "Uncommitted files:" |
||||||
|
echo "$uncommitted" |
||||||
|
fi |
||||||
|
exit 1 |
||||||
|
fi |
||||||
|
|
||||||
|
|
||||||
|
# Create import branch, import bundle, commit. |
||||||
|
import_branch="import_$version" |
||||||
|
|
||||||
|
exists=$(git branch -rlq | grep "/$import_branch\$" || true) |
||||||
|
if [[ ! -z $exists ]]; then |
||||||
|
echo "Error: This version branch already seems to exist: $exists: already imorted?" |
||||||
|
[[ $devel != 1 ]] && exit 1 |
||||||
|
fi |
||||||
|
|
||||||
|
echo "Checking for existing commits that match this version (should be none)" |
||||||
|
git log --oneline | grep "^librdkafka static bundle $version\$" && exit 1 |
||||||
|
|
||||||
|
|
||||||
|
echo "Creating import branch $import_branch" |
||||||
|
git checkout -b $import_branch |
||||||
|
|
||||||
|
echo "Importing bundle $bundle" |
||||||
|
./bundle-import.sh "$bundle" || error_cleanup |
||||||
|
|
||||||
|
echo "Committing $version" |
||||||
|
git commit -a -m "librdkafka static bundle $version" || error_cleanup |
||||||
|
|
||||||
|
echo "Updating error codes and docs" |
||||||
|
pushd ../../ |
||||||
|
make -f mk/Makefile docs || error_cleanup |
||||||
|
git commit -a -m "Documentation and error code update for librdkafka $version" \ |
||||||
|
|| error_cleanup |
||||||
|
popd |
||||||
|
|
||||||
|
if [[ $devel != 1 ]]; then |
||||||
|
echo "Pushing branch" |
||||||
|
git push origin $import_branch || error_cleanup |
||||||
|
fi |
||||||
|
|
||||||
|
git checkout $curr_branch |
||||||
|
|
||||||
|
if [[ $devel != 1 ]]; then |
||||||
|
git branch -D $import_branch |
||||||
|
fi |
||||||
|
|
||||||
|
echo "" |
||||||
|
echo "############## IMPORT OF $version COMPLETE ##############" |
||||||
|
if [[ $devel != 1 ]]; then |
||||||
|
echo "Branch $import_branch has been pushed." |
||||||
|
echo "Create a PR, have it reviewed and then merge it (do NOT squash or rebase)." |
||||||
|
fi |
||||||
|
|
@ -0,0 +1,21 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package librdkafka |
||||||
|
|
||||||
|
// LibrdkafkaGoSubdir is a dummy variable needed to export something so the
|
||||||
|
// file is not empty.
|
||||||
|
var LibrdkafkaGoSubdir = true |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,89 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// LogEvent represent the log from librdkafka internal log queue
|
||||||
|
type LogEvent struct { |
||||||
|
Name string // Name of client instance
|
||||||
|
Tag string // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
|
||||||
|
Message string // Log message
|
||||||
|
Level int // Log syslog level, lower is more critical.
|
||||||
|
Timestamp time.Time // Log timestamp
|
||||||
|
} |
||||||
|
|
||||||
|
// newLogEvent creates a new LogEvent from the given rd_kafka_event_t.
|
||||||
|
//
|
||||||
|
// This function does not take ownership of the cEvent pointer. You need to
|
||||||
|
// free its resources using C.rd_kafka_event_destroy afterwards.
|
||||||
|
//
|
||||||
|
// The cEvent object needs to be of type C.RD_KAFKA_EVENT_LOG. Calling this
|
||||||
|
// function with an object of another type has undefined behaviour.
|
||||||
|
func (h *handle) newLogEvent(cEvent *C.rd_kafka_event_t) LogEvent { |
||||||
|
var tag, message *C.char |
||||||
|
var level C.int |
||||||
|
|
||||||
|
C.rd_kafka_event_log(cEvent, &(tag), &(message), &(level)) |
||||||
|
|
||||||
|
return LogEvent{ |
||||||
|
Name: h.name, |
||||||
|
Tag: C.GoString(tag), |
||||||
|
Message: C.GoString(message), |
||||||
|
Level: int(level), |
||||||
|
Timestamp: time.Now(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// pollLogEvents polls log events from librdkafka and pushes them to toChannel,
|
||||||
|
// until doneChan is closed.
|
||||||
|
//
|
||||||
|
// Each call to librdkafka times out after timeoutMs. If a call to librdkafka
|
||||||
|
// is ongoing when doneChan is closed, the function will wait until the call
|
||||||
|
// returns or times out, whatever happens first.
|
||||||
|
func (h *handle) pollLogEvents(toChannel chan LogEvent, timeoutMs int, doneChan chan bool) { |
||||||
|
for { |
||||||
|
select { |
||||||
|
case <-doneChan: |
||||||
|
return |
||||||
|
|
||||||
|
default: |
||||||
|
cEvent := C.rd_kafka_queue_poll(h.logq, C.int(timeoutMs)) |
||||||
|
if cEvent == nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
if C.rd_kafka_event_type(cEvent) != C.RD_KAFKA_EVENT_LOG { |
||||||
|
C.rd_kafka_event_destroy(cEvent) |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
logEvent := h.newLogEvent(cEvent) |
||||||
|
C.rd_kafka_event_destroy(cEvent) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-doneChan: |
||||||
|
return |
||||||
|
|
||||||
|
case toChannel <- logEvent: |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (logEvent LogEvent) String() string { |
||||||
|
return fmt.Sprintf( |
||||||
|
"[%v][%s][%s][%d]%s", |
||||||
|
logEvent.Timestamp.Format(time.RFC3339), |
||||||
|
logEvent.Name, |
||||||
|
logEvent.Tag, |
||||||
|
logEvent.Level, |
||||||
|
logEvent.Message) |
||||||
|
} |
@ -0,0 +1,223 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <string.h> |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
|
||||||
|
void setup_rkmessage (rd_kafka_message_t *rkmessage, |
||||||
|
rd_kafka_topic_t *rkt, int32_t partition, |
||||||
|
const void *payload, size_t len, |
||||||
|
void *key, size_t keyLen, void *opaque) { |
||||||
|
rkmessage->rkt = rkt; |
||||||
|
rkmessage->partition = partition; |
||||||
|
rkmessage->payload = (void *)payload; |
||||||
|
rkmessage->len = len; |
||||||
|
rkmessage->key = (void *)key; |
||||||
|
rkmessage->key_len = keyLen; |
||||||
|
rkmessage->_private = opaque; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// TimestampType is a the Message timestamp type or source
|
||||||
|
//
|
||||||
|
type TimestampType int |
||||||
|
|
||||||
|
const ( |
||||||
|
// TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
|
||||||
|
TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) |
||||||
|
// TimestampCreateTime indicates timestamp set by producer (source time)
|
||||||
|
TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME) |
||||||
|
// TimestampLogAppendTime indicates timestamp set set by broker (store time)
|
||||||
|
TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) |
||||||
|
) |
||||||
|
|
||||||
|
func (t TimestampType) String() string { |
||||||
|
switch t { |
||||||
|
case TimestampCreateTime: |
||||||
|
return "CreateTime" |
||||||
|
case TimestampLogAppendTime: |
||||||
|
return "LogAppendTime" |
||||||
|
case TimestampNotAvailable: |
||||||
|
fallthrough |
||||||
|
default: |
||||||
|
return "NotAvailable" |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Message represents a Kafka message
|
||||||
|
type Message struct { |
||||||
|
TopicPartition TopicPartition |
||||||
|
Value []byte |
||||||
|
Key []byte |
||||||
|
Timestamp time.Time |
||||||
|
TimestampType TimestampType |
||||||
|
Opaque interface{} |
||||||
|
Headers []Header |
||||||
|
} |
||||||
|
|
||||||
|
// String returns a human readable representation of a Message.
|
||||||
|
// Key and payload are not represented.
|
||||||
|
func (m *Message) String() string { |
||||||
|
var topic string |
||||||
|
if m.TopicPartition.Topic != nil { |
||||||
|
topic = *m.TopicPartition.Topic |
||||||
|
} else { |
||||||
|
topic = "" |
||||||
|
} |
||||||
|
return fmt.Sprintf("%s[%d]@%s", topic, m.TopicPartition.Partition, m.TopicPartition.Offset) |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) getRktFromMessage(msg *Message) (crkt *C.rd_kafka_topic_t) { |
||||||
|
if msg.TopicPartition.Topic == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
return h.getRkt(*msg.TopicPartition.Topic) |
||||||
|
} |
||||||
|
|
||||||
|
// setupHeadersFromGlueMsg converts the C tmp headers in gMsg to
|
||||||
|
// Go Headers in msg.
|
||||||
|
// gMsg.tmphdrs will be freed.
|
||||||
|
func setupHeadersFromGlueMsg(msg *Message, gMsg *C.glue_msg_t) { |
||||||
|
msg.Headers = make([]Header, gMsg.tmphdrsCnt) |
||||||
|
for n := range msg.Headers { |
||||||
|
tmphdr := (*[1 << 30]C.tmphdr_t)(unsafe.Pointer(gMsg.tmphdrs))[n] |
||||||
|
msg.Headers[n].Key = C.GoString(tmphdr.key) |
||||||
|
if tmphdr.val != nil { |
||||||
|
msg.Headers[n].Value = C.GoBytes(unsafe.Pointer(tmphdr.val), C.int(tmphdr.size)) |
||||||
|
} else { |
||||||
|
msg.Headers[n].Value = nil |
||||||
|
} |
||||||
|
} |
||||||
|
C.free(unsafe.Pointer(gMsg.tmphdrs)) |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) newMessageFromGlueMsg(gMsg *C.glue_msg_t) (msg *Message) { |
||||||
|
msg = &Message{} |
||||||
|
|
||||||
|
if gMsg.ts != -1 { |
||||||
|
ts := int64(gMsg.ts) |
||||||
|
msg.TimestampType = TimestampType(gMsg.tstype) |
||||||
|
msg.Timestamp = time.Unix(ts/1000, (ts%1000)*1000000) |
||||||
|
} |
||||||
|
|
||||||
|
if gMsg.tmphdrsCnt > 0 { |
||||||
|
setupHeadersFromGlueMsg(msg, gMsg) |
||||||
|
} |
||||||
|
|
||||||
|
h.setupMessageFromC(msg, gMsg.msg) |
||||||
|
|
||||||
|
return msg |
||||||
|
} |
||||||
|
|
||||||
|
// setupMessageFromC sets up a message object from a C rd_kafka_message_t
|
||||||
|
func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) { |
||||||
|
if cmsg.rkt != nil { |
||||||
|
topic := h.getTopicNameFromRkt(cmsg.rkt) |
||||||
|
msg.TopicPartition.Topic = &topic |
||||||
|
} |
||||||
|
msg.TopicPartition.Partition = int32(cmsg.partition) |
||||||
|
if cmsg.payload != nil && h.msgFields.Value { |
||||||
|
msg.Value = C.GoBytes(unsafe.Pointer(cmsg.payload), C.int(cmsg.len)) |
||||||
|
} |
||||||
|
if cmsg.key != nil && h.msgFields.Key { |
||||||
|
msg.Key = C.GoBytes(unsafe.Pointer(cmsg.key), C.int(cmsg.key_len)) |
||||||
|
} |
||||||
|
if h.msgFields.Headers { |
||||||
|
var gMsg C.glue_msg_t |
||||||
|
gMsg.msg = cmsg |
||||||
|
gMsg.want_hdrs = C.int8_t(1) |
||||||
|
chdrsToTmphdrs(&gMsg) |
||||||
|
if gMsg.tmphdrsCnt > 0 { |
||||||
|
setupHeadersFromGlueMsg(msg, &gMsg) |
||||||
|
} |
||||||
|
} |
||||||
|
msg.TopicPartition.Offset = Offset(cmsg.offset) |
||||||
|
if cmsg.err != 0 { |
||||||
|
msg.TopicPartition.Error = newError(cmsg.err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// newMessageFromC creates a new message object from a C rd_kafka_message_t
|
||||||
|
// NOTE: For use with Producer: does not set message timestamp fields.
|
||||||
|
func (h *handle) newMessageFromC(cmsg *C.rd_kafka_message_t) (msg *Message) { |
||||||
|
msg = &Message{} |
||||||
|
|
||||||
|
h.setupMessageFromC(msg, cmsg) |
||||||
|
|
||||||
|
return msg |
||||||
|
} |
||||||
|
|
||||||
|
// messageToC sets up cmsg as a clone of msg
|
||||||
|
func (h *handle) messageToC(msg *Message, cmsg *C.rd_kafka_message_t) { |
||||||
|
var valp unsafe.Pointer |
||||||
|
var keyp unsafe.Pointer |
||||||
|
|
||||||
|
// to circumvent Cgo constraints we need to allocate C heap memory
|
||||||
|
// for both Value and Key (one allocation back to back)
|
||||||
|
// and copy the bytes from Value and Key to the C memory.
|
||||||
|
// We later tell librdkafka (in produce()) to free the
|
||||||
|
// C memory pointer when it is done.
|
||||||
|
var payload unsafe.Pointer |
||||||
|
|
||||||
|
valueLen := 0 |
||||||
|
keyLen := 0 |
||||||
|
if msg.Value != nil { |
||||||
|
valueLen = len(msg.Value) |
||||||
|
} |
||||||
|
if msg.Key != nil { |
||||||
|
keyLen = len(msg.Key) |
||||||
|
} |
||||||
|
|
||||||
|
allocLen := valueLen + keyLen |
||||||
|
if allocLen > 0 { |
||||||
|
payload = C.malloc(C.size_t(allocLen)) |
||||||
|
if valueLen > 0 { |
||||||
|
copy((*[1 << 30]byte)(payload)[0:valueLen], msg.Value) |
||||||
|
valp = payload |
||||||
|
} |
||||||
|
if keyLen > 0 { |
||||||
|
copy((*[1 << 30]byte)(payload)[valueLen:allocLen], msg.Key) |
||||||
|
keyp = unsafe.Pointer(&((*[1 << 31]byte)(payload)[valueLen])) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
cmsg.rkt = h.getRktFromMessage(msg) |
||||||
|
cmsg.partition = C.int32_t(msg.TopicPartition.Partition) |
||||||
|
cmsg.payload = valp |
||||||
|
cmsg.len = C.size_t(valueLen) |
||||||
|
cmsg.key = keyp |
||||||
|
cmsg.key_len = C.size_t(keyLen) |
||||||
|
cmsg._private = nil |
||||||
|
} |
||||||
|
|
||||||
|
// used for testing messageToC performance
|
||||||
|
func (h *handle) messageToCDummy(msg *Message) { |
||||||
|
var cmsg C.rd_kafka_message_t |
||||||
|
h.messageToC(msg, &cmsg) |
||||||
|
} |
@ -0,0 +1,180 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
struct rd_kafka_metadata_broker *_getMetadata_broker_element(struct rd_kafka_metadata *m, int i) { |
||||||
|
return &m->brokers[i]; |
||||||
|
} |
||||||
|
|
||||||
|
struct rd_kafka_metadata_topic *_getMetadata_topic_element(struct rd_kafka_metadata *m, int i) { |
||||||
|
return &m->topics[i]; |
||||||
|
} |
||||||
|
|
||||||
|
struct rd_kafka_metadata_partition *_getMetadata_partition_element(struct rd_kafka_metadata *m, int topic_idx, int partition_idx) { |
||||||
|
return &m->topics[topic_idx].partitions[partition_idx]; |
||||||
|
} |
||||||
|
|
||||||
|
int32_t _get_int32_element (int32_t *arr, int i) { |
||||||
|
return arr[i]; |
||||||
|
} |
||||||
|
|
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// BrokerMetadata contains per-broker metadata
|
||||||
|
type BrokerMetadata struct { |
||||||
|
ID int32 |
||||||
|
Host string |
||||||
|
Port int |
||||||
|
} |
||||||
|
|
||||||
|
// PartitionMetadata contains per-partition metadata
|
||||||
|
type PartitionMetadata struct { |
||||||
|
ID int32 |
||||||
|
Error Error |
||||||
|
Leader int32 |
||||||
|
Replicas []int32 |
||||||
|
Isrs []int32 |
||||||
|
} |
||||||
|
|
||||||
|
// TopicMetadata contains per-topic metadata
|
||||||
|
type TopicMetadata struct { |
||||||
|
Topic string |
||||||
|
Partitions []PartitionMetadata |
||||||
|
Error Error |
||||||
|
} |
||||||
|
|
||||||
|
// Metadata contains broker and topic metadata for all (matching) topics
|
||||||
|
type Metadata struct { |
||||||
|
Brokers []BrokerMetadata |
||||||
|
Topics map[string]TopicMetadata |
||||||
|
|
||||||
|
OriginatingBroker BrokerMetadata |
||||||
|
} |
||||||
|
|
||||||
|
// getMetadata queries broker for cluster and topic metadata.
|
||||||
|
// If topic is non-nil only information about that topic is returned, else if
|
||||||
|
// allTopics is false only information about locally used topics is returned,
|
||||||
|
// else information about all topics is returned.
|
||||||
|
func getMetadata(H Handle, topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { |
||||||
|
h := H.gethandle() |
||||||
|
|
||||||
|
var rkt *C.rd_kafka_topic_t |
||||||
|
if topic != nil { |
||||||
|
rkt = h.getRkt(*topic) |
||||||
|
} |
||||||
|
|
||||||
|
var cMd *C.struct_rd_kafka_metadata |
||||||
|
cErr := C.rd_kafka_metadata(h.rk, bool2cint(allTopics), |
||||||
|
rkt, &cMd, C.int(timeoutMs)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
m := Metadata{} |
||||||
|
defer C.rd_kafka_metadata_destroy(cMd) |
||||||
|
|
||||||
|
m.Brokers = make([]BrokerMetadata, cMd.broker_cnt) |
||||||
|
for i := 0; i < int(cMd.broker_cnt); i++ { |
||||||
|
b := C._getMetadata_broker_element(cMd, C.int(i)) |
||||||
|
m.Brokers[i] = BrokerMetadata{int32(b.id), C.GoString(b.host), |
||||||
|
int(b.port)} |
||||||
|
} |
||||||
|
|
||||||
|
m.Topics = make(map[string]TopicMetadata, int(cMd.topic_cnt)) |
||||||
|
for i := 0; i < int(cMd.topic_cnt); i++ { |
||||||
|
t := C._getMetadata_topic_element(cMd, C.int(i)) |
||||||
|
|
||||||
|
thisTopic := C.GoString(t.topic) |
||||||
|
m.Topics[thisTopic] = TopicMetadata{Topic: thisTopic, |
||||||
|
Error: newError(t.err), |
||||||
|
Partitions: make([]PartitionMetadata, int(t.partition_cnt))} |
||||||
|
|
||||||
|
for j := 0; j < int(t.partition_cnt); j++ { |
||||||
|
p := C._getMetadata_partition_element(cMd, C.int(i), C.int(j)) |
||||||
|
m.Topics[thisTopic].Partitions[j] = PartitionMetadata{ |
||||||
|
ID: int32(p.id), |
||||||
|
Error: newError(p.err), |
||||||
|
Leader: int32(p.leader)} |
||||||
|
m.Topics[thisTopic].Partitions[j].Replicas = make([]int32, int(p.replica_cnt)) |
||||||
|
for ir := 0; ir < int(p.replica_cnt); ir++ { |
||||||
|
m.Topics[thisTopic].Partitions[j].Replicas[ir] = int32(C._get_int32_element(p.replicas, C.int(ir))) |
||||||
|
} |
||||||
|
|
||||||
|
m.Topics[thisTopic].Partitions[j].Isrs = make([]int32, int(p.isr_cnt)) |
||||||
|
for ii := 0; ii < int(p.isr_cnt); ii++ { |
||||||
|
m.Topics[thisTopic].Partitions[j].Isrs[ii] = int32(C._get_int32_element(p.isrs, C.int(ii))) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
m.OriginatingBroker = BrokerMetadata{int32(cMd.orig_broker_id), |
||||||
|
C.GoString(cMd.orig_broker_name), 0} |
||||||
|
|
||||||
|
return &m, nil |
||||||
|
} |
||||||
|
|
||||||
|
// queryWatermarkOffsets returns the broker's low and high offsets for the given topic
|
||||||
|
// and partition.
|
||||||
|
func queryWatermarkOffsets(H Handle, topic string, partition int32, timeoutMs int) (low, high int64, err error) { |
||||||
|
h := H.gethandle() |
||||||
|
|
||||||
|
ctopic := C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
|
||||||
|
var cLow, cHigh C.int64_t |
||||||
|
|
||||||
|
e := C.rd_kafka_query_watermark_offsets(h.rk, ctopic, C.int32_t(partition), |
||||||
|
&cLow, &cHigh, C.int(timeoutMs)) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return 0, 0, newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
low = int64(cLow) |
||||||
|
high = int64(cHigh) |
||||||
|
return low, high, nil |
||||||
|
} |
||||||
|
|
||||||
|
// getWatermarkOffsets returns the clients cached low and high offsets for the given topic
|
||||||
|
// and partition.
|
||||||
|
func getWatermarkOffsets(H Handle, topic string, partition int32) (low, high int64, err error) { |
||||||
|
h := H.gethandle() |
||||||
|
|
||||||
|
ctopic := C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
|
||||||
|
var cLow, cHigh C.int64_t |
||||||
|
|
||||||
|
e := C.rd_kafka_get_watermark_offsets(h.rk, ctopic, C.int32_t(partition), |
||||||
|
&cLow, &cHigh) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return 0, 0, newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
low = int64(cLow) |
||||||
|
high = int64(cHigh) |
||||||
|
|
||||||
|
return low, high, nil |
||||||
|
} |
@ -0,0 +1,35 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// bool2int converts a bool to a C.int (1 or 0)
|
||||||
|
func bool2cint(b bool) C.int { |
||||||
|
if b { |
||||||
|
return 1 |
||||||
|
} |
||||||
|
return 0 |
||||||
|
} |
||||||
|
|
||||||
|
// cint2bool converts a C.int to a bool
|
||||||
|
func cint2bool(v C.int) bool { |
||||||
|
if v == 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
@ -0,0 +1,145 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2017 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"strconv" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
static int64_t _c_rdkafka_offset_tail(int64_t rel) { |
||||||
|
return RD_KAFKA_OFFSET_TAIL(rel); |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// Offset type (int64) with support for canonical names
|
||||||
|
type Offset int64 |
||||||
|
|
||||||
|
// OffsetBeginning represents the earliest offset (logical)
|
||||||
|
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING) |
||||||
|
|
||||||
|
// OffsetEnd represents the latest offset (logical)
|
||||||
|
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END) |
||||||
|
|
||||||
|
// OffsetInvalid represents an invalid/unspecified offset
|
||||||
|
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID) |
||||||
|
|
||||||
|
// OffsetStored represents a stored offset
|
||||||
|
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED) |
||||||
|
|
||||||
|
func (o Offset) String() string { |
||||||
|
switch o { |
||||||
|
case OffsetBeginning: |
||||||
|
return "beginning" |
||||||
|
case OffsetEnd: |
||||||
|
return "end" |
||||||
|
case OffsetInvalid: |
||||||
|
return "unset" |
||||||
|
case OffsetStored: |
||||||
|
return "stored" |
||||||
|
default: |
||||||
|
return fmt.Sprintf("%d", int64(o)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Set offset value, see NewOffset()
|
||||||
|
func (o *Offset) Set(offset interface{}) error { |
||||||
|
n, err := NewOffset(offset) |
||||||
|
|
||||||
|
if err == nil { |
||||||
|
*o = n |
||||||
|
} |
||||||
|
|
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// NewOffset creates a new Offset using the provided logical string, or an
|
||||||
|
// absolute int64 offset value.
|
||||||
|
// Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored"
|
||||||
|
func NewOffset(offset interface{}) (Offset, error) { |
||||||
|
|
||||||
|
switch v := offset.(type) { |
||||||
|
case string: |
||||||
|
switch v { |
||||||
|
case "beginning": |
||||||
|
fallthrough |
||||||
|
case "earliest": |
||||||
|
return Offset(OffsetBeginning), nil |
||||||
|
|
||||||
|
case "end": |
||||||
|
fallthrough |
||||||
|
case "latest": |
||||||
|
return Offset(OffsetEnd), nil |
||||||
|
|
||||||
|
case "unset": |
||||||
|
fallthrough |
||||||
|
case "invalid": |
||||||
|
return Offset(OffsetInvalid), nil |
||||||
|
|
||||||
|
case "stored": |
||||||
|
return Offset(OffsetStored), nil |
||||||
|
|
||||||
|
default: |
||||||
|
off, err := strconv.Atoi(v) |
||||||
|
return Offset(off), err |
||||||
|
} |
||||||
|
|
||||||
|
case int: |
||||||
|
return Offset((int64)(v)), nil |
||||||
|
case int64: |
||||||
|
return Offset(v), nil |
||||||
|
default: |
||||||
|
return OffsetInvalid, newErrorFromString(ErrInvalidArg, |
||||||
|
fmt.Sprintf("Invalid offset type: %t", v)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetTail returns the logical offset relativeOffset from current end of partition
|
||||||
|
func OffsetTail(relativeOffset Offset) Offset { |
||||||
|
return Offset(C._c_rdkafka_offset_tail(C.int64_t(relativeOffset))) |
||||||
|
} |
||||||
|
|
||||||
|
// offsetsForTimes looks up offsets by timestamp for the given partitions.
|
||||||
|
//
|
||||||
|
// The returned offset for each partition is the earliest offset whose
|
||||||
|
// timestamp is greater than or equal to the given timestamp in the
|
||||||
|
// corresponding partition. If the provided timestamp exceeds that of the
|
||||||
|
// last message in the partition, a value of -1 will be returned.
|
||||||
|
//
|
||||||
|
// The timestamps to query are represented as `.Offset` in the `times`
|
||||||
|
// argument and the looked up offsets are represented as `.Offset` in the returned
|
||||||
|
// `offsets` list.
|
||||||
|
//
|
||||||
|
// The function will block for at most timeoutMs milliseconds.
|
||||||
|
//
|
||||||
|
// Duplicate Topic+Partitions are not supported.
|
||||||
|
// Per-partition errors may be returned in the `.Error` field.
|
||||||
|
func offsetsForTimes(H Handle, times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(times) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_offsets_for_times(H.gethandle().rk, cparts, C.int(timeoutMs)) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cerr) |
||||||
|
} |
||||||
|
|
||||||
|
return newTopicPartitionsFromCparts(cparts), nil |
||||||
|
} |
@ -0,0 +1,918 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
|
||||||
|
|
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
// Convert tmphdrs to chdrs (created by this function).
|
||||||
|
// If tmphdr.size == -1: value is considered Null
|
||||||
|
// tmphdr.size == 0: value is considered empty (ignored)
|
||||||
|
// tmphdr.size > 0: value is considered non-empty
|
||||||
|
//
|
||||||
|
// WARNING: The header keys and values will be freed by this function.
|
||||||
|
void tmphdrs_to_chdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt, |
||||||
|
rd_kafka_headers_t **chdrs) { |
||||||
|
size_t i; |
||||||
|
|
||||||
|
*chdrs = rd_kafka_headers_new(tmphdrsCnt); |
||||||
|
|
||||||
|
for (i = 0 ; i < tmphdrsCnt ; i++) { |
||||||
|
rd_kafka_header_add(*chdrs, |
||||||
|
tmphdrs[i].key, -1, |
||||||
|
tmphdrs[i].size == -1 ? NULL : |
||||||
|
(tmphdrs[i].size == 0 ? "" : tmphdrs[i].val), |
||||||
|
tmphdrs[i].size == -1 ? 0 : tmphdrs[i].size); |
||||||
|
if (tmphdrs[i].size > 0) |
||||||
|
free((void *)tmphdrs[i].val); |
||||||
|
free((void *)tmphdrs[i].key); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
#else |
||||||
|
void free_tmphdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt) { |
||||||
|
size_t i; |
||||||
|
for (i = 0 ; i < tmphdrsCnt ; i++) { |
||||||
|
if (tmphdrs[i].size > 0) |
||||||
|
free((void *)tmphdrs[i].val); |
||||||
|
free((void *)tmphdrs[i].key); |
||||||
|
} |
||||||
|
} |
||||||
|
#endif |
||||||
|
|
||||||
|
|
||||||
|
rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, |
||||||
|
rd_kafka_topic_t *rkt, int32_t partition, |
||||||
|
int msgflags, |
||||||
|
int valIsNull, void *val, size_t val_len, |
||||||
|
int keyIsNull, void *key, size_t key_len, |
||||||
|
int64_t timestamp, |
||||||
|
tmphdr_t *tmphdrs, size_t tmphdrsCnt, |
||||||
|
uintptr_t cgoid) { |
||||||
|
void *valp = valIsNull ? NULL : val; |
||||||
|
void *keyp = keyIsNull ? NULL : key; |
||||||
|
#ifdef RD_KAFKA_V_TIMESTAMP |
||||||
|
rd_kafka_resp_err_t err; |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
rd_kafka_headers_t *hdrs = NULL; |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
|
||||||
|
|
||||||
|
if (tmphdrsCnt > 0) { |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
tmphdrs_to_chdrs(tmphdrs, tmphdrsCnt, &hdrs); |
||||||
|
#else |
||||||
|
free_tmphdrs(tmphdrs, tmphdrsCnt); |
||||||
|
return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
#ifdef RD_KAFKA_V_TIMESTAMP |
||||||
|
err = rd_kafka_producev(rk, |
||||||
|
RD_KAFKA_V_RKT(rkt), |
||||||
|
RD_KAFKA_V_PARTITION(partition), |
||||||
|
RD_KAFKA_V_MSGFLAGS(msgflags), |
||||||
|
RD_KAFKA_V_VALUE(valp, val_len), |
||||||
|
RD_KAFKA_V_KEY(keyp, key_len), |
||||||
|
RD_KAFKA_V_TIMESTAMP(timestamp), |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
RD_KAFKA_V_HEADERS(hdrs), |
||||||
|
#endif |
||||||
|
RD_KAFKA_V_OPAQUE((void *)cgoid), |
||||||
|
RD_KAFKA_V_END); |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
if (err && hdrs) |
||||||
|
rd_kafka_headers_destroy(hdrs); |
||||||
|
#endif |
||||||
|
return err; |
||||||
|
#else |
||||||
|
if (timestamp) |
||||||
|
return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; |
||||||
|
if (rd_kafka_produce(rkt, partition, msgflags, |
||||||
|
valp, val_len, |
||||||
|
keyp, key_len, |
||||||
|
(void *)cgoid) == -1) |
||||||
|
return rd_kafka_last_error(); |
||||||
|
else |
||||||
|
return RD_KAFKA_RESP_ERR_NO_ERROR; |
||||||
|
#endif |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// Producer implements a High-level Apache Kafka Producer instance
|
||||||
|
type Producer struct { |
||||||
|
events chan Event |
||||||
|
produceChannel chan *Message |
||||||
|
handle handle |
||||||
|
|
||||||
|
// Terminates the poller() goroutine
|
||||||
|
pollerTermChan chan bool |
||||||
|
} |
||||||
|
|
||||||
|
// String returns a human readable name for a Producer instance
|
||||||
|
func (p *Producer) String() string { |
||||||
|
return p.handle.String() |
||||||
|
} |
||||||
|
|
||||||
|
// get_handle implements the Handle interface
|
||||||
|
func (p *Producer) gethandle() *handle { |
||||||
|
return &p.handle |
||||||
|
} |
||||||
|
|
||||||
|
func (p *Producer) produce(msg *Message, msgFlags int, deliveryChan chan Event) error { |
||||||
|
if msg == nil || msg.TopicPartition.Topic == nil || len(*msg.TopicPartition.Topic) == 0 { |
||||||
|
return newErrorFromString(ErrInvalidArg, "") |
||||||
|
} |
||||||
|
|
||||||
|
crkt := p.handle.getRkt(*msg.TopicPartition.Topic) |
||||||
|
|
||||||
|
// Three problems:
|
||||||
|
// 1) There's a difference between an empty Value or Key (length 0, proper pointer) and
|
||||||
|
// a null Value or Key (length 0, null pointer).
|
||||||
|
// 2) we need to be able to send a null Value or Key, but the unsafe.Pointer(&slice[0])
|
||||||
|
// dereference can't be performed on a nil slice.
|
||||||
|
// 3) cgo's pointer checking requires the unsafe.Pointer(slice..) call to be made
|
||||||
|
// in the call to the C function.
|
||||||
|
//
|
||||||
|
// Solution:
|
||||||
|
// Keep track of whether the Value or Key were nil (1), but let the valp and keyp pointers
|
||||||
|
// point to a 1-byte slice (but the length to send is still 0) so that the dereference (2)
|
||||||
|
// works.
|
||||||
|
// Then perform the unsafe.Pointer() on the valp and keyp pointers (which now either point
|
||||||
|
// to the original msg.Value and msg.Key or to the 1-byte slices) in the call to C (3).
|
||||||
|
//
|
||||||
|
var valp []byte |
||||||
|
var keyp []byte |
||||||
|
oneByte := []byte{0} |
||||||
|
var valIsNull C.int |
||||||
|
var keyIsNull C.int |
||||||
|
var valLen int |
||||||
|
var keyLen int |
||||||
|
|
||||||
|
if msg.Value == nil { |
||||||
|
valIsNull = 1 |
||||||
|
valLen = 0 |
||||||
|
valp = oneByte |
||||||
|
} else { |
||||||
|
valLen = len(msg.Value) |
||||||
|
if valLen > 0 { |
||||||
|
valp = msg.Value |
||||||
|
} else { |
||||||
|
valp = oneByte |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if msg.Key == nil { |
||||||
|
keyIsNull = 1 |
||||||
|
keyLen = 0 |
||||||
|
keyp = oneByte |
||||||
|
} else { |
||||||
|
keyLen = len(msg.Key) |
||||||
|
if keyLen > 0 { |
||||||
|
keyp = msg.Key |
||||||
|
} else { |
||||||
|
keyp = oneByte |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
var cgoid int |
||||||
|
|
||||||
|
// Per-message state that needs to be retained through the C code:
|
||||||
|
// delivery channel (if specified)
|
||||||
|
// message opaque (if specified)
|
||||||
|
// Since these cant be passed as opaque pointers to the C code,
|
||||||
|
// due to cgo constraints, we add them to a per-producer map for lookup
|
||||||
|
// when the C code triggers the callbacks or events.
|
||||||
|
if deliveryChan != nil || msg.Opaque != nil { |
||||||
|
cgoid = p.handle.cgoPut(cgoDr{deliveryChan: deliveryChan, opaque: msg.Opaque}) |
||||||
|
} |
||||||
|
|
||||||
|
var timestamp int64 |
||||||
|
if !msg.Timestamp.IsZero() { |
||||||
|
timestamp = msg.Timestamp.UnixNano() / 1000000 |
||||||
|
} |
||||||
|
|
||||||
|
// Convert headers to C-friendly tmphdrs
|
||||||
|
var tmphdrs []C.tmphdr_t |
||||||
|
tmphdrsCnt := len(msg.Headers) |
||||||
|
|
||||||
|
if tmphdrsCnt > 0 { |
||||||
|
tmphdrs = make([]C.tmphdr_t, tmphdrsCnt) |
||||||
|
|
||||||
|
for n, hdr := range msg.Headers { |
||||||
|
// Make a copy of the key
|
||||||
|
// to avoid runtime panic with
|
||||||
|
// foreign Go pointers in cgo.
|
||||||
|
tmphdrs[n].key = C.CString(hdr.Key) |
||||||
|
if hdr.Value != nil { |
||||||
|
tmphdrs[n].size = C.ssize_t(len(hdr.Value)) |
||||||
|
if tmphdrs[n].size > 0 { |
||||||
|
// Make a copy of the value
|
||||||
|
// to avoid runtime panic with
|
||||||
|
// foreign Go pointers in cgo.
|
||||||
|
tmphdrs[n].val = C.CBytes(hdr.Value) |
||||||
|
} |
||||||
|
} else { |
||||||
|
// null value
|
||||||
|
tmphdrs[n].size = C.ssize_t(-1) |
||||||
|
} |
||||||
|
} |
||||||
|
} else { |
||||||
|
// no headers, need a dummy tmphdrs of size 1 to avoid index
|
||||||
|
// out of bounds panic in do_produce() call below.
|
||||||
|
// tmphdrsCnt will be 0.
|
||||||
|
tmphdrs = []C.tmphdr_t{{nil, nil, 0}} |
||||||
|
} |
||||||
|
|
||||||
|
cErr := C.do_produce(p.handle.rk, crkt, |
||||||
|
C.int32_t(msg.TopicPartition.Partition), |
||||||
|
C.int(msgFlags)|C.RD_KAFKA_MSG_F_COPY, |
||||||
|
valIsNull, unsafe.Pointer(&valp[0]), C.size_t(valLen), |
||||||
|
keyIsNull, unsafe.Pointer(&keyp[0]), C.size_t(keyLen), |
||||||
|
C.int64_t(timestamp), |
||||||
|
(*C.tmphdr_t)(unsafe.Pointer(&tmphdrs[0])), C.size_t(tmphdrsCnt), |
||||||
|
(C.uintptr_t)(cgoid)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
if cgoid != 0 { |
||||||
|
p.handle.cgoGet(cgoid) |
||||||
|
} |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Produce single message.
|
||||||
|
// This is an asynchronous call that enqueues the message on the internal
|
||||||
|
// transmit queue, thus returning immediately.
|
||||||
|
// The delivery report will be sent on the provided deliveryChan if specified,
|
||||||
|
// or on the Producer object's Events() channel if not.
|
||||||
|
// msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented),
|
||||||
|
// api.version.request=true, and broker >= 0.10.0.0.
|
||||||
|
// msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented),
|
||||||
|
// api.version.request=true, and broker >= 0.11.0.0.
|
||||||
|
// Returns an error if message could not be enqueued.
|
||||||
|
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error { |
||||||
|
return p.produce(msg, 0, deliveryChan) |
||||||
|
} |
||||||
|
|
||||||
|
// Produce a batch of messages.
|
||||||
|
// These batches do not relate to the message batches sent to the broker, the latter
|
||||||
|
// are collected on the fly internally in librdkafka.
|
||||||
|
// WARNING: This is an experimental API.
|
||||||
|
// NOTE: timestamps and headers are not supported with this API.
|
||||||
|
func (p *Producer) produceBatch(topic string, msgs []*Message, msgFlags int) error { |
||||||
|
crkt := p.handle.getRkt(topic) |
||||||
|
|
||||||
|
cmsgs := make([]C.rd_kafka_message_t, len(msgs)) |
||||||
|
for i, m := range msgs { |
||||||
|
p.handle.messageToC(m, &cmsgs[i]) |
||||||
|
} |
||||||
|
r := C.rd_kafka_produce_batch(crkt, C.RD_KAFKA_PARTITION_UA, C.int(msgFlags)|C.RD_KAFKA_MSG_F_FREE, |
||||||
|
(*C.rd_kafka_message_t)(&cmsgs[0]), C.int(len(msgs))) |
||||||
|
if r == -1 { |
||||||
|
return newError(C.rd_kafka_last_error()) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Events returns the Events channel (read)
|
||||||
|
func (p *Producer) Events() chan Event { |
||||||
|
return p.events |
||||||
|
} |
||||||
|
|
||||||
|
// Logs returns the Log channel (if enabled), else nil
|
||||||
|
func (p *Producer) Logs() chan LogEvent { |
||||||
|
return p.handle.logs |
||||||
|
} |
||||||
|
|
||||||
|
// ProduceChannel returns the produce *Message channel (write)
|
||||||
|
func (p *Producer) ProduceChannel() chan *Message { |
||||||
|
return p.produceChannel |
||||||
|
} |
||||||
|
|
||||||
|
// Len returns the number of messages and requests waiting to be transmitted to the broker
|
||||||
|
// as well as delivery reports queued for the application.
|
||||||
|
// Includes messages on ProduceChannel.
|
||||||
|
func (p *Producer) Len() int { |
||||||
|
return len(p.produceChannel) + len(p.events) + int(C.rd_kafka_outq_len(p.handle.rk)) |
||||||
|
} |
||||||
|
|
||||||
|
// Flush and wait for outstanding messages and requests to complete delivery.
|
||||||
|
// Includes messages on ProduceChannel.
|
||||||
|
// Runs until value reaches zero or on timeoutMs.
|
||||||
|
// Returns the number of outstanding events still un-flushed.
|
||||||
|
func (p *Producer) Flush(timeoutMs int) int { |
||||||
|
termChan := make(chan bool) // unused stand-in termChan
|
||||||
|
|
||||||
|
d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs)) |
||||||
|
tEnd := time.Now().Add(d) |
||||||
|
for p.Len() > 0 { |
||||||
|
remain := tEnd.Sub(time.Now()).Seconds() |
||||||
|
if remain <= 0.0 { |
||||||
|
return p.Len() |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.eventPoll(p.events, |
||||||
|
int(math.Min(100, remain*1000)), 1000, termChan) |
||||||
|
} |
||||||
|
|
||||||
|
return 0 |
||||||
|
} |
||||||
|
|
||||||
|
// Close a Producer instance.
|
||||||
|
// The Producer object or its channels are no longer usable after this call.
|
||||||
|
func (p *Producer) Close() { |
||||||
|
// Wait for poller() (signaled by closing pollerTermChan)
|
||||||
|
// and channel_producer() (signaled by closing ProduceChannel)
|
||||||
|
close(p.pollerTermChan) |
||||||
|
close(p.produceChannel) |
||||||
|
p.handle.waitGroup.Wait() |
||||||
|
|
||||||
|
close(p.events) |
||||||
|
|
||||||
|
p.handle.cleanup() |
||||||
|
|
||||||
|
C.rd_kafka_destroy(p.handle.rk) |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
// PurgeInFlight purges messages in-flight to or from the broker.
|
||||||
|
// Purging these messages will void any future acknowledgements from the
|
||||||
|
// broker, making it impossible for the application to know if these
|
||||||
|
// messages were successfully delivered or not.
|
||||||
|
// Retrying these messages may lead to duplicates.
|
||||||
|
PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT) |
||||||
|
|
||||||
|
// PurgeQueue Purge messages in internal queues.
|
||||||
|
PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE) |
||||||
|
|
||||||
|
// PurgeNonBlocking Don't wait for background thread queue purging to finish.
|
||||||
|
PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING) |
||||||
|
) |
||||||
|
|
||||||
|
// Purge messages currently handled by this producer instance.
|
||||||
|
//
|
||||||
|
// flags is a combination of PurgeQueue, PurgeInFlight and PurgeNonBlocking.
|
||||||
|
//
|
||||||
|
// The application will need to call Poll(), Flush() or read the Events() channel
|
||||||
|
// after this call to serve delivery reports for the purged messages.
|
||||||
|
//
|
||||||
|
// Messages purged from internal queues fail with the delivery report
|
||||||
|
// error code set to ErrPurgeQueue, while purged messages that
|
||||||
|
// are in-flight to or from the broker will fail with the error code set to
|
||||||
|
// ErrPurgeInflight.
|
||||||
|
//
|
||||||
|
// Warning: Purging messages that are in-flight to or from the broker
|
||||||
|
// will ignore any sub-sequent acknowledgement for these messages
|
||||||
|
// received from the broker, effectively making it impossible
|
||||||
|
// for the application to know if the messages were successfully
|
||||||
|
// produced or not. This may result in duplicate messages if the
|
||||||
|
// application retries these messages at a later time.
|
||||||
|
//
|
||||||
|
// Note: This call may block for a short time while background thread
|
||||||
|
// queues are purged.
|
||||||
|
//
|
||||||
|
// Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown.
|
||||||
|
func (p *Producer) Purge(flags int) error { |
||||||
|
cErr := C.rd_kafka_purge(p.handle.rk, C.int(flags)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// NewProducer creates a new high-level Producer instance.
|
||||||
|
//
|
||||||
|
// conf is a *ConfigMap with standard librdkafka configuration properties.
|
||||||
|
//
|
||||||
|
// Supported special configuration properties (type, default):
|
||||||
|
// go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance).
|
||||||
|
// These batches do not relate to Kafka message batches in any way.
|
||||||
|
// Note: timestamps and headers are not supported with this interface.
|
||||||
|
// go.delivery.reports (bool, true) - Forward per-message delivery reports to the
|
||||||
|
// Events() channel.
|
||||||
|
// go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports.
|
||||||
|
// Allowed values: all, none (or empty string), key, value, headers
|
||||||
|
// Warning: There is a performance penalty to include headers in the delivery report.
|
||||||
|
// go.events.channel.size (int, 1000000) - Events().
|
||||||
|
// go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
|
||||||
|
// go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
|
||||||
|
// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
|
||||||
|
//
|
||||||
|
func NewProducer(conf *ConfigMap) (*Producer, error) { |
||||||
|
|
||||||
|
err := versionCheck() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
p := &Producer{} |
||||||
|
|
||||||
|
// before we do anything with the configuration, create a copy such that
|
||||||
|
// the original is not mutated.
|
||||||
|
confCopy := conf.clone() |
||||||
|
|
||||||
|
v, err := confCopy.extract("delivery.report.only.error", false) |
||||||
|
if v == true { |
||||||
|
// FIXME: The filtering of successful DRs must be done in
|
||||||
|
// the Go client to avoid cgoDr memory leaks.
|
||||||
|
return nil, newErrorFromString(ErrUnsupportedFeature, |
||||||
|
"delivery.report.only.error=true is not currently supported by the Go client") |
||||||
|
} |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.batch.producer", false) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
batchProducer := v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.delivery.reports", true) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
p.handle.fwdDr = v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.delivery.report.fields", "key,value") |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.msgFields, err = newMessageFieldsFrom(v) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.events.channel.size", 1000000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
eventsChanSize := v.(int) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.produce.channel.size", 1000000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
produceChannelSize := v.(int) |
||||||
|
|
||||||
|
logsChanEnable, logsChan, err := confCopy.extractLogConfig() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if int(C.rd_kafka_version()) < 0x01000000 { |
||||||
|
// produce.offset.report is no longer used in librdkafka >= v1.0.0
|
||||||
|
v, _ = confCopy.extract("{topic}.produce.offset.report", nil) |
||||||
|
if v == nil { |
||||||
|
// Enable offset reporting by default, unless overriden.
|
||||||
|
confCopy.SetKey("{topic}.produce.offset.report", true) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Convert ConfigMap to librdkafka conf_t
|
||||||
|
cConf, err := confCopy.convert() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(256))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_DR|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) |
||||||
|
|
||||||
|
// Create librdkafka producer instance
|
||||||
|
p.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) |
||||||
|
if p.handle.rk == nil { |
||||||
|
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.p = p |
||||||
|
p.handle.setup() |
||||||
|
p.handle.rkq = C.rd_kafka_queue_get_main(p.handle.rk) |
||||||
|
p.events = make(chan Event, eventsChanSize) |
||||||
|
p.produceChannel = make(chan *Message, produceChannelSize) |
||||||
|
p.pollerTermChan = make(chan bool) |
||||||
|
|
||||||
|
if logsChanEnable { |
||||||
|
p.handle.setupLogQueue(logsChan, p.pollerTermChan) |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
poller(p, p.pollerTermChan) |
||||||
|
p.handle.waitGroup.Done() |
||||||
|
}() |
||||||
|
|
||||||
|
// non-batch or batch producer, only one must be used
|
||||||
|
var producer func(*Producer) |
||||||
|
if batchProducer { |
||||||
|
producer = channelBatchProducer |
||||||
|
} else { |
||||||
|
producer = channelProducer |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
producer(p) |
||||||
|
p.handle.waitGroup.Done() |
||||||
|
}() |
||||||
|
|
||||||
|
return p, nil |
||||||
|
} |
||||||
|
|
||||||
|
// channel_producer serves the ProduceChannel channel
|
||||||
|
func channelProducer(p *Producer) { |
||||||
|
for m := range p.produceChannel { |
||||||
|
err := p.produce(m, C.RD_KAFKA_MSG_F_BLOCK, nil) |
||||||
|
if err != nil { |
||||||
|
m.TopicPartition.Error = err |
||||||
|
p.events <- m |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// channelBatchProducer serves the ProduceChannel channel and attempts to
|
||||||
|
// improve cgo performance by using the produceBatch() interface.
|
||||||
|
func channelBatchProducer(p *Producer) { |
||||||
|
var buffered = make(map[string][]*Message) |
||||||
|
bufferedCnt := 0 |
||||||
|
const batchSize int = 1000000 |
||||||
|
totMsgCnt := 0 |
||||||
|
totBatchCnt := 0 |
||||||
|
|
||||||
|
for m := range p.produceChannel { |
||||||
|
buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) |
||||||
|
bufferedCnt++ |
||||||
|
|
||||||
|
loop2: |
||||||
|
for true { |
||||||
|
select { |
||||||
|
case m, ok := <-p.produceChannel: |
||||||
|
if !ok { |
||||||
|
break loop2 |
||||||
|
} |
||||||
|
if m == nil { |
||||||
|
panic("nil message received on ProduceChannel") |
||||||
|
} |
||||||
|
if m.TopicPartition.Topic == nil { |
||||||
|
panic(fmt.Sprintf("message without Topic received on ProduceChannel: %v", m)) |
||||||
|
} |
||||||
|
buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) |
||||||
|
bufferedCnt++ |
||||||
|
if bufferedCnt >= batchSize { |
||||||
|
break loop2 |
||||||
|
} |
||||||
|
default: |
||||||
|
break loop2 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
totBatchCnt++ |
||||||
|
totMsgCnt += len(buffered) |
||||||
|
|
||||||
|
for topic, buffered2 := range buffered { |
||||||
|
err := p.produceBatch(topic, buffered2, C.RD_KAFKA_MSG_F_BLOCK) |
||||||
|
if err != nil { |
||||||
|
for _, m = range buffered2 { |
||||||
|
m.TopicPartition.Error = err |
||||||
|
p.events <- m |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
buffered = make(map[string][]*Message) |
||||||
|
bufferedCnt = 0 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// poller polls the rd_kafka_t handle for events until signalled for termination
|
||||||
|
func poller(p *Producer, termChan chan bool) { |
||||||
|
for { |
||||||
|
select { |
||||||
|
case _ = <-termChan: |
||||||
|
return |
||||||
|
|
||||||
|
default: |
||||||
|
_, term := p.handle.eventPoll(p.events, 100, 1000, termChan) |
||||||
|
if term { |
||||||
|
return |
||||||
|
} |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// GetMetadata queries broker for cluster and topic metadata.
|
||||||
|
// If topic is non-nil only information about that topic is returned, else if
|
||||||
|
// allTopics is false only information about locally used topics is returned,
|
||||||
|
// else information about all topics is returned.
|
||||||
|
// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
|
||||||
|
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { |
||||||
|
return getMetadata(p, topic, allTopics, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// QueryWatermarkOffsets returns the broker's low and high offsets for the given topic
|
||||||
|
// and partition.
|
||||||
|
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { |
||||||
|
return queryWatermarkOffsets(p, topic, partition, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetsForTimes looks up offsets by timestamp for the given partitions.
|
||||||
|
//
|
||||||
|
// The returned offset for each partition is the earliest offset whose
|
||||||
|
// timestamp is greater than or equal to the given timestamp in the
|
||||||
|
// corresponding partition. If the provided timestamp exceeds that of the
|
||||||
|
// last message in the partition, a value of -1 will be returned.
|
||||||
|
//
|
||||||
|
// The timestamps to query are represented as `.Offset` in the `times`
|
||||||
|
// argument and the looked up offsets are represented as `.Offset` in the returned
|
||||||
|
// `offsets` list.
|
||||||
|
//
|
||||||
|
// The function will block for at most timeoutMs milliseconds.
|
||||||
|
//
|
||||||
|
// Duplicate Topic+Partitions are not supported.
|
||||||
|
// Per-partition errors may be returned in the `.Error` field.
|
||||||
|
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
return offsetsForTimes(p, times, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// GetFatalError returns an Error object if the client instance has raised a fatal error, else nil.
|
||||||
|
func (p *Producer) GetFatalError() error { |
||||||
|
return getFatalError(p) |
||||||
|
} |
||||||
|
|
||||||
|
// TestFatalError triggers a fatal error in the underlying client.
|
||||||
|
// This is to be used strictly for testing purposes.
|
||||||
|
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode { |
||||||
|
return testFatalError(p, code, str) |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerToken sets the the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
|
||||||
|
// on success, otherwise an error if:
|
||||||
|
// 1) the token data is invalid (meaning an expiration time in the past
|
||||||
|
// or either a token value or an extension key or value that does not meet
|
||||||
|
// the regular expression requirements as per
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1);
|
||||||
|
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { |
||||||
|
return p.handle.setOAuthBearerToken(oauthBearerToken) |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerTokenFailure sets the error message describing why token
|
||||||
|
// retrieval/setting failed; it also schedules a new token refresh event for 10
|
||||||
|
// seconds later so the attempt may be retried. It will return nil on
|
||||||
|
// success, otherwise an error if:
|
||||||
|
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error { |
||||||
|
return p.handle.setOAuthBearerTokenFailure(errstr) |
||||||
|
} |
||||||
|
|
||||||
|
// Transactional API
|
||||||
|
|
||||||
|
// InitTransactions Initializes transactions for the producer instance.
|
||||||
|
//
|
||||||
|
// This function ensures any transactions initiated by previous instances
|
||||||
|
// of the producer with the same `transactional.id` are completed.
|
||||||
|
// If the previous instance failed with a transaction in progress the
|
||||||
|
// previous transaction will be aborted.
|
||||||
|
// This function needs to be called before any other transactional or
|
||||||
|
// produce functions are called when the `transactional.id` is configured.
|
||||||
|
//
|
||||||
|
// If the last transaction had begun completion (following transaction commit)
|
||||||
|
// but not yet finished, this function will await the previous transaction's
|
||||||
|
// completion.
|
||||||
|
//
|
||||||
|
// When any previous transactions have been fenced this function
|
||||||
|
// will acquire the internal producer id and epoch, used in all future
|
||||||
|
// transactional messages issued by this producer instance.
|
||||||
|
//
|
||||||
|
// Upon successful return from this function the application has to perform at
|
||||||
|
// least one of the following operations within `transaction.timeout.ms` to
|
||||||
|
// avoid timing out the transaction on the broker:
|
||||||
|
// * `Produce()` (et.al)
|
||||||
|
// * `SendOffsetsToTransaction()`
|
||||||
|
// * `CommitTransaction()`
|
||||||
|
// * `AbortTransaction()`
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum time to block, or nil for indefinite.
|
||||||
|
// On timeout the operation may continue in the background,
|
||||||
|
// depending on state, and it is okay to call `InitTransactions()`
|
||||||
|
// again.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal
|
||||||
|
// error has been raised by calling `err.(kafka.Error).IsFatal()`.
|
||||||
|
func (p *Producer) InitTransactions(ctx context.Context) error { |
||||||
|
cError := C.rd_kafka_init_transactions(p.handle.rk, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// BeginTransaction starts a new transaction.
|
||||||
|
//
|
||||||
|
// `InitTransactions()` must have been called successfully (once)
|
||||||
|
// before this function is called.
|
||||||
|
//
|
||||||
|
// Any messages produced, offsets sent (`SendOffsetsToTransaction()`),
|
||||||
|
// etc, after the successful return of this function will be part of
|
||||||
|
// the transaction and committed or aborted atomatically.
|
||||||
|
//
|
||||||
|
// Finish the transaction by calling `CommitTransaction()` or
|
||||||
|
// abort the transaction by calling `AbortTransaction()`.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether a fatal error has been raised by
|
||||||
|
// calling `err.(kafka.Error).IsFatal()`.
|
||||||
|
//
|
||||||
|
// Note: With the transactional producer, `Produce()`, et.al, are only
|
||||||
|
// allowed during an on-going transaction, as started with this function.
|
||||||
|
// Any produce call outside an on-going transaction, or for a failed
|
||||||
|
// transaction, will fail.
|
||||||
|
func (p *Producer) BeginTransaction() error { |
||||||
|
cError := C.rd_kafka_begin_transaction(p.handle.rk) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SendOffsetsToTransaction sends a list of topic partition offsets to the
|
||||||
|
// consumer group coordinator for `consumerMetadata`, and marks the offsets
|
||||||
|
// as part part of the current transaction.
|
||||||
|
// These offsets will be considered committed only if the transaction is
|
||||||
|
// committed successfully.
|
||||||
|
//
|
||||||
|
// The offsets should be the next message your application will consume,
|
||||||
|
// i.e., the last processed message's offset + 1 for each partition.
|
||||||
|
// Either track the offsets manually during processing or use
|
||||||
|
// `consumer.Position()` (on the consumer) to get the current offsets for
|
||||||
|
// the partitions assigned to the consumer.
|
||||||
|
//
|
||||||
|
// Use this method at the end of a consume-transform-produce loop prior
|
||||||
|
// to committing the transaction with `CommitTransaction()`.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum amount of time to block, or nil for indefinite.
|
||||||
|
// * `offsets` - List of offsets to commit to the consumer group upon
|
||||||
|
// successful commit of the transaction. Offsets should be
|
||||||
|
// the next message to consume, e.g., last processed message + 1.
|
||||||
|
// * `consumerMetadata` - The current consumer group metadata as returned by
|
||||||
|
// `consumer.GetConsumerGroupMetadata()` on the consumer
|
||||||
|
// instance the provided offsets were consumed from.
|
||||||
|
//
|
||||||
|
// Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer).
|
||||||
|
//
|
||||||
|
// Note: Logical and invalid offsets (e.g., OffsetInvalid) in
|
||||||
|
// `offsets` will be ignored. If there are no valid offsets in
|
||||||
|
// `offsets` the function will return nil and no action will be taken.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable
|
||||||
|
// or fatal error has been raised by calling
|
||||||
|
// `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()`
|
||||||
|
// respectively.
|
||||||
|
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error { |
||||||
|
var cOffsets *C.rd_kafka_topic_partition_list_t |
||||||
|
if offsets != nil { |
||||||
|
cOffsets = newCPartsFromTopicPartitions(offsets) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cOffsets) |
||||||
|
} |
||||||
|
|
||||||
|
cgmd, err := deserializeConsumerGroupMetadata(consumerMetadata.serialized) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) |
||||||
|
|
||||||
|
cError := C.rd_kafka_send_offsets_to_transaction( |
||||||
|
p.handle.rk, |
||||||
|
cOffsets, |
||||||
|
cgmd, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// CommitTransaction commits the current transaction.
|
||||||
|
//
|
||||||
|
// Any outstanding messages will be flushed (delivered) before actually
|
||||||
|
// committing the transaction.
|
||||||
|
//
|
||||||
|
// If any of the outstanding messages fail permanently the current
|
||||||
|
// transaction will enter the abortable error state and this
|
||||||
|
// function will return an abortable error, in this case the application
|
||||||
|
// must call `AbortTransaction()` before attempting a new
|
||||||
|
// transaction with `BeginTransaction()`.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum amount of time to block, or nil for indefinite.
|
||||||
|
//
|
||||||
|
// Note: This function will block until all outstanding messages are
|
||||||
|
// delivered and the transaction commit request has been successfully
|
||||||
|
// handled by the transaction coordinator, or until the `ctx` expires,
|
||||||
|
// which ever comes first. On timeout the application may
|
||||||
|
// call the function again.
|
||||||
|
//
|
||||||
|
// Note: Will automatically call `Flush()` to ensure all queued
|
||||||
|
// messages are delivered before attempting to commit the transaction.
|
||||||
|
// The application MUST serve the `producer.Events()` channel for delivery
|
||||||
|
// reports in a separate go-routine during this time.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable
|
||||||
|
// or fatal error has been raised by calling
|
||||||
|
// `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()`
|
||||||
|
// respectively.
|
||||||
|
func (p *Producer) CommitTransaction(ctx context.Context) error { |
||||||
|
cError := C.rd_kafka_commit_transaction(p.handle.rk, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// AbortTransaction aborts the ongoing transaction.
|
||||||
|
//
|
||||||
|
// This function should also be used to recover from non-fatal abortable
|
||||||
|
// transaction errors.
|
||||||
|
//
|
||||||
|
// Any outstanding messages will be purged and fail with
|
||||||
|
// `ErrPurgeInflight` or `ErrPurgeQueue`.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum amount of time to block, or nil for indefinite.
|
||||||
|
//
|
||||||
|
// Note: This function will block until all outstanding messages are purged
|
||||||
|
// and the transaction abort request has been successfully
|
||||||
|
// handled by the transaction coordinator, or until the `ctx` expires,
|
||||||
|
// which ever comes first. On timeout the application may
|
||||||
|
// call the function again.
|
||||||
|
//
|
||||||
|
// Note: Will automatically call `Purge()` and `Flush()` to ensure all queued
|
||||||
|
// and in-flight messages are purged before attempting to abort the transaction.
|
||||||
|
// The application MUST serve the `producer.Events()` channel for delivery
|
||||||
|
// reports in a separate go-routine during this time.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error
|
||||||
|
// has been raised by calling `err.(kafka.Error).IsFatal()`.
|
||||||
|
func (p *Producer) AbortTransaction(ctx context.Context) error { |
||||||
|
cError := C.rd_kafka_abort_transaction(p.handle.rk, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,29 @@ |
|||||||
|
/**
|
||||||
|
* Copyright 2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// This file uses a preprocessor macro defined by the various build_*.go
|
||||||
|
// files to determine whether to import the bundled librdkafka header, or
|
||||||
|
// the system one.
|
||||||
|
// This is needed because cgo will automatically add -I. to the include
|
||||||
|
// path, so <librdkafka/rdkafka.h> would find a bundled header instead of
|
||||||
|
// the system one if it were called librdkafka/rdkafka.h instead of
|
||||||
|
// librdkafka_vendor/rdkafka.h
|
||||||
|
|
||||||
|
#ifdef USE_VENDORED_LIBRDKAFKA |
||||||
|
#include "librdkafka_vendor/rdkafka.h" |
||||||
|
#else |
||||||
|
#include <librdkafka/rdkafka.h> |
||||||
|
#endif |
@ -0,0 +1,8 @@ |
|||||||
|
{ |
||||||
|
"Brokers": "mybroker or $BROKERS env", |
||||||
|
"Topic": "test", |
||||||
|
"GroupID": "testgroup", |
||||||
|
"PerfMsgCount": 1000000, |
||||||
|
"PerfMsgSize": 100, |
||||||
|
"Config": ["api.version.request=true"] |
||||||
|
} |
@ -0,0 +1,248 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
"math/rand" |
||||||
|
"os" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
var testconf struct { |
||||||
|
Brokers string |
||||||
|
Topic string |
||||||
|
GroupID string |
||||||
|
PerfMsgCount int |
||||||
|
PerfMsgSize int |
||||||
|
Config []string |
||||||
|
conf ConfigMap |
||||||
|
} |
||||||
|
|
||||||
|
// testconf_read reads the test suite config file testconf.json which must
|
||||||
|
// contain at least Brokers and Topic string properties.
|
||||||
|
// Returns true if the testconf was found and usable, false if no such file, or panics
|
||||||
|
// if the file format is wrong.
|
||||||
|
func testconfRead() bool { |
||||||
|
cf, err := os.Open("testconf.json") |
||||||
|
if err != nil { |
||||||
|
fmt.Fprintf(os.Stderr, "%% testconf.json not found - ignoring test\n") |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// Default values
|
||||||
|
testconf.PerfMsgCount = 2000000 |
||||||
|
testconf.PerfMsgSize = 100 |
||||||
|
testconf.GroupID = "testgroup" |
||||||
|
|
||||||
|
jp := json.NewDecoder(cf) |
||||||
|
err = jp.Decode(&testconf) |
||||||
|
if err != nil { |
||||||
|
panic(fmt.Sprintf("Failed to parse testconf: %s", err)) |
||||||
|
} |
||||||
|
|
||||||
|
cf.Close() |
||||||
|
|
||||||
|
if testconf.Brokers[0] == '$' { |
||||||
|
// Read broker list from environment variable
|
||||||
|
testconf.Brokers = os.Getenv(testconf.Brokers[1:]) |
||||||
|
} |
||||||
|
|
||||||
|
if testconf.Brokers == "" || testconf.Topic == "" { |
||||||
|
panic("Missing Brokers or Topic in testconf.json") |
||||||
|
} |
||||||
|
|
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// update existing ConfigMap with key=value pairs from testconf.Config
|
||||||
|
func (cm *ConfigMap) updateFromTestconf() error { |
||||||
|
if testconf.Config == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Translate "key=value" pairs in Config to ConfigMap
|
||||||
|
for _, s := range testconf.Config { |
||||||
|
err := cm.Set(s) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// Return the number of messages available in all partitions of a topic.
|
||||||
|
// WARNING: This uses watermark offsets so it will be incorrect for compacted topics.
|
||||||
|
func getMessageCountInTopic(topic string) (int, error) { |
||||||
|
|
||||||
|
// Create consumer
|
||||||
|
config := &ConfigMap{"bootstrap.servers": testconf.Brokers, |
||||||
|
"group.id": testconf.GroupID} |
||||||
|
config.updateFromTestconf() |
||||||
|
|
||||||
|
c, err := NewConsumer(config) |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
defer c.Close() |
||||||
|
|
||||||
|
// get metadata for the topic to find out number of partitions
|
||||||
|
|
||||||
|
metadata, err := c.GetMetadata(&topic, false, 5*1000) |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
|
||||||
|
t, ok := metadata.Topics[topic] |
||||||
|
if !ok { |
||||||
|
return 0, newError(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) |
||||||
|
} |
||||||
|
|
||||||
|
cnt := 0 |
||||||
|
for _, p := range t.Partitions { |
||||||
|
low, high, err := c.QueryWatermarkOffsets(topic, p.ID, 5*1000) |
||||||
|
if err != nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
cnt += int(high - low) |
||||||
|
} |
||||||
|
|
||||||
|
return cnt, nil |
||||||
|
} |
||||||
|
|
||||||
|
// getBrokerList returns a list of brokers (ids) in the cluster
|
||||||
|
func getBrokerList(H Handle) (brokers []int32, err error) { |
||||||
|
md, err := getMetadata(H, nil, true, 15*1000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
brokers = make([]int32, len(md.Brokers)) |
||||||
|
for i, mdBroker := range md.Brokers { |
||||||
|
brokers[i] = mdBroker.ID |
||||||
|
} |
||||||
|
|
||||||
|
return brokers, nil |
||||||
|
} |
||||||
|
|
||||||
|
// waitTopicInMetadata waits for the given topic to show up in metadata
|
||||||
|
func waitTopicInMetadata(H Handle, topic string, timeoutMs int) error { |
||||||
|
d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs)) |
||||||
|
tEnd := time.Now().Add(d) |
||||||
|
|
||||||
|
for { |
||||||
|
remain := tEnd.Sub(time.Now()).Seconds() |
||||||
|
if remain < 0.0 { |
||||||
|
return newErrorFromString(ErrTimedOut, |
||||||
|
fmt.Sprintf("Timed out waiting for topic %s to appear in metadata", topic)) |
||||||
|
} |
||||||
|
|
||||||
|
md, err := getMetadata(H, nil, true, int(remain*1000)) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
for _, t := range md.Topics { |
||||||
|
if t.Topic != topic { |
||||||
|
continue |
||||||
|
} |
||||||
|
if t.Error.Code() != ErrNoError || len(t.Partitions) < 1 { |
||||||
|
continue |
||||||
|
} |
||||||
|
// Proper topic found in metadata
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
time.Sleep(500 * 1000) // 500ms
|
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
func createAdminClient(t *testing.T) (a *AdminClient) { |
||||||
|
numver, strver := LibraryVersion() |
||||||
|
if numver < 0x000b0500 { |
||||||
|
t.Skipf("Requires librdkafka >=0.11.5 (currently on %s, 0x%x)", strver, numver) |
||||||
|
} |
||||||
|
|
||||||
|
if !testconfRead() { |
||||||
|
t.Skipf("Missing testconf.json") |
||||||
|
} |
||||||
|
|
||||||
|
conf := ConfigMap{"bootstrap.servers": testconf.Brokers} |
||||||
|
conf.updateFromTestconf() |
||||||
|
|
||||||
|
/* |
||||||
|
* Create producer and produce a couple of messages with and without |
||||||
|
* headers. |
||||||
|
*/ |
||||||
|
a, err := NewAdminClient(&conf) |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("NewAdminClient: %v", err) |
||||||
|
} |
||||||
|
|
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
func createTestTopic(t *testing.T, suffix string, numPartitions int, replicationFactor int) string { |
||||||
|
rand.Seed(time.Now().Unix()) |
||||||
|
|
||||||
|
topic := fmt.Sprintf("%s-%s-%d", testconf.Topic, suffix, rand.Intn(100000)) |
||||||
|
|
||||||
|
a := createAdminClient(t) |
||||||
|
defer a.Close() |
||||||
|
|
||||||
|
newTopics := []TopicSpecification{ |
||||||
|
{ |
||||||
|
Topic: topic, |
||||||
|
NumPartitions: numPartitions, |
||||||
|
ReplicationFactor: replicationFactor, |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
maxDuration, err := time.ParseDuration("30s") |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("%s", err) |
||||||
|
} |
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), maxDuration) |
||||||
|
defer cancel() |
||||||
|
result, err := a.CreateTopics(ctx, newTopics, nil) |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("CreateTopics() failed: %s", err) |
||||||
|
} |
||||||
|
|
||||||
|
for _, res := range result { |
||||||
|
if res.Error.Code() != ErrNoError { |
||||||
|
t.Errorf("Failed to create topic %s: %s\n", |
||||||
|
res.Topic, res.Error) |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return topic |
||||||
|
} |
@ -0,0 +1,55 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2019 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import "C" |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
cTimeoutInfinite = C.int(-1) // Blocks indefinitely until completion.
|
||||||
|
cTimeoutNoWait = C.int(0) // Returns immediately without blocking.
|
||||||
|
) |
||||||
|
|
||||||
|
// cTimeoutFromContext returns the remaining time after which work done on behalf of this context
|
||||||
|
// should be canceled, in milliseconds.
|
||||||
|
//
|
||||||
|
// If no deadline/timeout is set, or if the timeout does not fit in an int32, it returns
|
||||||
|
// cTimeoutInfinite;
|
||||||
|
// If there is no time left in this context, it returns cTimeoutNoWait.
|
||||||
|
func cTimeoutFromContext(ctx context.Context) C.int { |
||||||
|
if ctx == nil { |
||||||
|
return cTimeoutInfinite |
||||||
|
} |
||||||
|
timeout, hasTimeout := timeout(ctx) |
||||||
|
if !hasTimeout { |
||||||
|
return cTimeoutInfinite |
||||||
|
} |
||||||
|
if timeout <= 0 { |
||||||
|
return cTimeoutNoWait |
||||||
|
} |
||||||
|
|
||||||
|
timeoutMs := int64(timeout / time.Millisecond) |
||||||
|
if int64(int32(timeoutMs)) < timeoutMs { |
||||||
|
return cTimeoutInfinite |
||||||
|
} |
||||||
|
|
||||||
|
return C.int(timeoutMs) |
||||||
|
} |
@ -0,0 +1,24 @@ |
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects) |
||||||
|
*.o |
||||||
|
*.a |
||||||
|
*.so |
||||||
|
|
||||||
|
# Folders |
||||||
|
_obj |
||||||
|
_test |
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes |
||||||
|
*.[568vq] |
||||||
|
[568vq].out |
||||||
|
|
||||||
|
*.cgo1.go |
||||||
|
*.cgo2.c |
||||||
|
_cgo_defun.c |
||||||
|
_cgo_gotypes.go |
||||||
|
_cgo_export.* |
||||||
|
|
||||||
|
_testmain.go |
||||||
|
|
||||||
|
*.exe |
||||||
|
*.test |
||||||
|
*.prof |
@ -0,0 +1,10 @@ |
|||||||
|
language: go |
||||||
|
go_import_path: github.com/pkg/errors |
||||||
|
go: |
||||||
|
- 1.11.x |
||||||
|
- 1.12.x |
||||||
|
- 1.13.x |
||||||
|
- tip |
||||||
|
|
||||||
|
script: |
||||||
|
- make check |
@ -0,0 +1,23 @@ |
|||||||
|
Copyright (c) 2015, Dave Cheney <dave@cheney.net> |
||||||
|
All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this |
||||||
|
list of conditions and the following disclaimer. |
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice, |
||||||
|
this list of conditions and the following disclaimer in the documentation |
||||||
|
and/or other materials provided with the distribution. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE |
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,44 @@ |
|||||||
|
PKGS := github.com/pkg/errors |
||||||
|
SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) |
||||||
|
GO := go |
||||||
|
|
||||||
|
check: test vet gofmt misspell unconvert staticcheck ineffassign unparam |
||||||
|
|
||||||
|
test: |
||||||
|
$(GO) test $(PKGS) |
||||||
|
|
||||||
|
vet: | test |
||||||
|
$(GO) vet $(PKGS) |
||||||
|
|
||||||
|
staticcheck: |
||||||
|
$(GO) get honnef.co/go/tools/cmd/staticcheck |
||||||
|
staticcheck -checks all $(PKGS) |
||||||
|
|
||||||
|
misspell: |
||||||
|
$(GO) get github.com/client9/misspell/cmd/misspell |
||||||
|
misspell \
|
||||||
|
-locale GB \
|
||||||
|
-error \
|
||||||
|
*.md *.go |
||||||
|
|
||||||
|
unconvert: |
||||||
|
$(GO) get github.com/mdempsky/unconvert |
||||||
|
unconvert -v $(PKGS) |
||||||
|
|
||||||
|
ineffassign: |
||||||
|
$(GO) get github.com/gordonklaus/ineffassign |
||||||
|
find $(SRCDIRS) -name '*.go' | xargs ineffassign |
||||||
|
|
||||||
|
pedantic: check errcheck |
||||||
|
|
||||||
|
unparam: |
||||||
|
$(GO) get mvdan.cc/unparam |
||||||
|
unparam ./... |
||||||
|
|
||||||
|
errcheck: |
||||||
|
$(GO) get github.com/kisielk/errcheck |
||||||
|
errcheck $(PKGS) |
||||||
|
|
||||||
|
gofmt: |
||||||
|
@echo Checking code is gofmted |
||||||
|
@test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" |
@ -0,0 +1,59 @@ |
|||||||
|
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) |
||||||
|
|
||||||
|
Package errors provides simple error handling primitives. |
||||||
|
|
||||||
|
`go get github.com/pkg/errors` |
||||||
|
|
||||||
|
The traditional error handling idiom in Go is roughly akin to |
||||||
|
```go |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
``` |
||||||
|
which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. |
||||||
|
|
||||||
|
## Adding context to an error |
||||||
|
|
||||||
|
The errors.Wrap function returns a new error that adds context to the original error. For example |
||||||
|
```go |
||||||
|
_, err := ioutil.ReadAll(r) |
||||||
|
if err != nil { |
||||||
|
return errors.Wrap(err, "read failed") |
||||||
|
} |
||||||
|
``` |
||||||
|
## Retrieving the cause of an error |
||||||
|
|
||||||
|
Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. |
||||||
|
```go |
||||||
|
type causer interface { |
||||||
|
Cause() error |
||||||
|
} |
||||||
|
``` |
||||||
|
`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: |
||||||
|
```go |
||||||
|
switch err := errors.Cause(err).(type) { |
||||||
|
case *MyError: |
||||||
|
// handle specifically |
||||||
|
default: |
||||||
|
// unknown error |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). |
||||||
|
|
||||||
|
## Roadmap |
||||||
|
|
||||||
|
With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: |
||||||
|
|
||||||
|
- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) |
||||||
|
- 1.0. Final release. |
||||||
|
|
||||||
|
## Contributing |
||||||
|
|
||||||
|
Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. |
||||||
|
|
||||||
|
Before sending a PR, please discuss your change by raising an issue. |
||||||
|
|
||||||
|
## License |
||||||
|
|
||||||
|
BSD-2-Clause |
@ -0,0 +1,32 @@ |
|||||||
|
version: build-{build}.{branch} |
||||||
|
|
||||||
|
clone_folder: C:\gopath\src\github.com\pkg\errors |
||||||
|
shallow_clone: true # for startup speed |
||||||
|
|
||||||
|
environment: |
||||||
|
GOPATH: C:\gopath |
||||||
|
|
||||||
|
platform: |
||||||
|
- x64 |
||||||
|
|
||||||
|
# http://www.appveyor.com/docs/installed-software |
||||||
|
install: |
||||||
|
# some helpful output for debugging builds |
||||||
|
- go version |
||||||
|
- go env |
||||||
|
# pre-installed MinGW at C:\MinGW is 32bit only |
||||||
|
# but MSYS2 at C:\msys64 has mingw64 |
||||||
|
- set PATH=C:\msys64\mingw64\bin;%PATH% |
||||||
|
- gcc --version |
||||||
|
- g++ --version |
||||||
|
|
||||||
|
build_script: |
||||||
|
- go install -v ./... |
||||||
|
|
||||||
|
test_script: |
||||||
|
- set PATH=C:\gopath\bin;%PATH% |
||||||
|
- go test -v ./... |
||||||
|
|
||||||
|
#artifacts: |
||||||
|
# - path: '%GOPATH%\bin\*.exe' |
||||||
|
deploy: off |
@ -0,0 +1,288 @@ |
|||||||
|
// Package errors provides simple error handling primitives.
|
||||||
|
//
|
||||||
|
// The traditional error handling idiom in Go is roughly akin to
|
||||||
|
//
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// which when applied recursively up the call stack results in error reports
|
||||||
|
// without context or debugging information. The errors package allows
|
||||||
|
// programmers to add context to the failure path in their code in a way
|
||||||
|
// that does not destroy the original value of the error.
|
||||||
|
//
|
||||||
|
// Adding context to an error
|
||||||
|
//
|
||||||
|
// The errors.Wrap function returns a new error that adds context to the
|
||||||
|
// original error by recording a stack trace at the point Wrap is called,
|
||||||
|
// together with the supplied message. For example
|
||||||
|
//
|
||||||
|
// _, err := ioutil.ReadAll(r)
|
||||||
|
// if err != nil {
|
||||||
|
// return errors.Wrap(err, "read failed")
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If additional control is required, the errors.WithStack and
|
||||||
|
// errors.WithMessage functions destructure errors.Wrap into its component
|
||||||
|
// operations: annotating an error with a stack trace and with a message,
|
||||||
|
// respectively.
|
||||||
|
//
|
||||||
|
// Retrieving the cause of an error
|
||||||
|
//
|
||||||
|
// Using errors.Wrap constructs a stack of errors, adding context to the
|
||||||
|
// preceding error. Depending on the nature of the error it may be necessary
|
||||||
|
// to reverse the operation of errors.Wrap to retrieve the original error
|
||||||
|
// for inspection. Any error value which implements this interface
|
||||||
|
//
|
||||||
|
// type causer interface {
|
||||||
|
// Cause() error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
|
||||||
|
// the topmost error that does not implement causer, which is assumed to be
|
||||||
|
// the original cause. For example:
|
||||||
|
//
|
||||||
|
// switch err := errors.Cause(err).(type) {
|
||||||
|
// case *MyError:
|
||||||
|
// // handle specifically
|
||||||
|
// default:
|
||||||
|
// // unknown error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Although the causer interface is not exported by this package, it is
|
||||||
|
// considered a part of its stable public interface.
|
||||||
|
//
|
||||||
|
// Formatted printing of errors
|
||||||
|
//
|
||||||
|
// All error values returned from this package implement fmt.Formatter and can
|
||||||
|
// be formatted by the fmt package. The following verbs are supported:
|
||||||
|
//
|
||||||
|
// %s print the error. If the error has a Cause it will be
|
||||||
|
// printed recursively.
|
||||||
|
// %v see %s
|
||||||
|
// %+v extended format. Each Frame of the error's StackTrace will
|
||||||
|
// be printed in detail.
|
||||||
|
//
|
||||||
|
// Retrieving the stack trace of an error or wrapper
|
||||||
|
//
|
||||||
|
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
|
||||||
|
// invoked. This information can be retrieved with the following interface:
|
||||||
|
//
|
||||||
|
// type stackTracer interface {
|
||||||
|
// StackTrace() errors.StackTrace
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The returned errors.StackTrace type is defined as
|
||||||
|
//
|
||||||
|
// type StackTrace []Frame
|
||||||
|
//
|
||||||
|
// The Frame type represents a call site in the stack trace. Frame supports
|
||||||
|
// the fmt.Formatter interface that can be used for printing information about
|
||||||
|
// the stack trace of this error. For example:
|
||||||
|
//
|
||||||
|
// if err, ok := err.(stackTracer); ok {
|
||||||
|
// for _, f := range err.StackTrace() {
|
||||||
|
// fmt.Printf("%+s:%d\n", f, f)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Although the stackTracer interface is not exported by this package, it is
|
||||||
|
// considered a part of its stable public interface.
|
||||||
|
//
|
||||||
|
// See the documentation for Frame.Format for more details.
|
||||||
|
package errors |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
) |
||||||
|
|
||||||
|
// New returns an error with the supplied message.
|
||||||
|
// New also records the stack trace at the point it was called.
|
||||||
|
func New(message string) error { |
||||||
|
return &fundamental{ |
||||||
|
msg: message, |
||||||
|
stack: callers(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Errorf formats according to a format specifier and returns the string
|
||||||
|
// as a value that satisfies error.
|
||||||
|
// Errorf also records the stack trace at the point it was called.
|
||||||
|
func Errorf(format string, args ...interface{}) error { |
||||||
|
return &fundamental{ |
||||||
|
msg: fmt.Sprintf(format, args...), |
||||||
|
stack: callers(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// fundamental is an error that has a message and a stack, but no caller.
|
||||||
|
type fundamental struct { |
||||||
|
msg string |
||||||
|
*stack |
||||||
|
} |
||||||
|
|
||||||
|
func (f *fundamental) Error() string { return f.msg } |
||||||
|
|
||||||
|
func (f *fundamental) Format(s fmt.State, verb rune) { |
||||||
|
switch verb { |
||||||
|
case 'v': |
||||||
|
if s.Flag('+') { |
||||||
|
io.WriteString(s, f.msg) |
||||||
|
f.stack.Format(s, verb) |
||||||
|
return |
||||||
|
} |
||||||
|
fallthrough |
||||||
|
case 's': |
||||||
|
io.WriteString(s, f.msg) |
||||||
|
case 'q': |
||||||
|
fmt.Fprintf(s, "%q", f.msg) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithStack annotates err with a stack trace at the point WithStack was called.
|
||||||
|
// If err is nil, WithStack returns nil.
|
||||||
|
func WithStack(err error) error { |
||||||
|
if err == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return &withStack{ |
||||||
|
err, |
||||||
|
callers(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
type withStack struct { |
||||||
|
error |
||||||
|
*stack |
||||||
|
} |
||||||
|
|
||||||
|
func (w *withStack) Cause() error { return w.error } |
||||||
|
|
||||||
|
// Unwrap provides compatibility for Go 1.13 error chains.
|
||||||
|
func (w *withStack) Unwrap() error { return w.error } |
||||||
|
|
||||||
|
func (w *withStack) Format(s fmt.State, verb rune) { |
||||||
|
switch verb { |
||||||
|
case 'v': |
||||||
|
if s.Flag('+') { |
||||||
|
fmt.Fprintf(s, "%+v", w.Cause()) |
||||||
|
w.stack.Format(s, verb) |
||||||
|
return |
||||||
|
} |
||||||
|
fallthrough |
||||||
|
case 's': |
||||||
|
io.WriteString(s, w.Error()) |
||||||
|
case 'q': |
||||||
|
fmt.Fprintf(s, "%q", w.Error()) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Wrap returns an error annotating err with a stack trace
|
||||||
|
// at the point Wrap is called, and the supplied message.
|
||||||
|
// If err is nil, Wrap returns nil.
|
||||||
|
func Wrap(err error, message string) error { |
||||||
|
if err == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
err = &withMessage{ |
||||||
|
cause: err, |
||||||
|
msg: message, |
||||||
|
} |
||||||
|
return &withStack{ |
||||||
|
err, |
||||||
|
callers(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Wrapf returns an error annotating err with a stack trace
|
||||||
|
// at the point Wrapf is called, and the format specifier.
|
||||||
|
// If err is nil, Wrapf returns nil.
|
||||||
|
func Wrapf(err error, format string, args ...interface{}) error { |
||||||
|
if err == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
err = &withMessage{ |
||||||
|
cause: err, |
||||||
|
msg: fmt.Sprintf(format, args...), |
||||||
|
} |
||||||
|
return &withStack{ |
||||||
|
err, |
||||||
|
callers(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithMessage annotates err with a new message.
|
||||||
|
// If err is nil, WithMessage returns nil.
|
||||||
|
func WithMessage(err error, message string) error { |
||||||
|
if err == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return &withMessage{ |
||||||
|
cause: err, |
||||||
|
msg: message, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// WithMessagef annotates err with the format specifier.
|
||||||
|
// If err is nil, WithMessagef returns nil.
|
||||||
|
func WithMessagef(err error, format string, args ...interface{}) error { |
||||||
|
if err == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return &withMessage{ |
||||||
|
cause: err, |
||||||
|
msg: fmt.Sprintf(format, args...), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
type withMessage struct { |
||||||
|
cause error |
||||||
|
msg string |
||||||
|
} |
||||||
|
|
||||||
|
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } |
||||||
|
func (w *withMessage) Cause() error { return w.cause } |
||||||
|
|
||||||
|
// Unwrap provides compatibility for Go 1.13 error chains.
|
||||||
|
func (w *withMessage) Unwrap() error { return w.cause } |
||||||
|
|
||||||
|
func (w *withMessage) Format(s fmt.State, verb rune) { |
||||||
|
switch verb { |
||||||
|
case 'v': |
||||||
|
if s.Flag('+') { |
||||||
|
fmt.Fprintf(s, "%+v\n", w.Cause()) |
||||||
|
io.WriteString(s, w.msg) |
||||||
|
return |
||||||
|
} |
||||||
|
fallthrough |
||||||
|
case 's', 'q': |
||||||
|
io.WriteString(s, w.Error()) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Cause returns the underlying cause of the error, if possible.
|
||||||
|
// An error value has a cause if it implements the following
|
||||||
|
// interface:
|
||||||
|
//
|
||||||
|
// type causer interface {
|
||||||
|
// Cause() error
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the error does not implement Cause, the original error will
|
||||||
|
// be returned. If the error is nil, nil will be returned without further
|
||||||
|
// investigation.
|
||||||
|
func Cause(err error) error { |
||||||
|
type causer interface { |
||||||
|
Cause() error |
||||||
|
} |
||||||
|
|
||||||
|
for err != nil { |
||||||
|
cause, ok := err.(causer) |
||||||
|
if !ok { |
||||||
|
break |
||||||
|
} |
||||||
|
err = cause.Cause() |
||||||
|
} |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,38 @@ |
|||||||
|
// +build go1.13
|
||||||
|
|
||||||
|
package errors |
||||||
|
|
||||||
|
import ( |
||||||
|
stderrors "errors" |
||||||
|
) |
||||||
|
|
||||||
|
// Is reports whether any error in err's chain matches target.
|
||||||
|
//
|
||||||
|
// The chain consists of err itself followed by the sequence of errors obtained by
|
||||||
|
// repeatedly calling Unwrap.
|
||||||
|
//
|
||||||
|
// An error is considered to match a target if it is equal to that target or if
|
||||||
|
// it implements a method Is(error) bool such that Is(target) returns true.
|
||||||
|
func Is(err, target error) bool { return stderrors.Is(err, target) } |
||||||
|
|
||||||
|
// As finds the first error in err's chain that matches target, and if so, sets
|
||||||
|
// target to that error value and returns true.
|
||||||
|
//
|
||||||
|
// The chain consists of err itself followed by the sequence of errors obtained by
|
||||||
|
// repeatedly calling Unwrap.
|
||||||
|
//
|
||||||
|
// An error matches target if the error's concrete value is assignable to the value
|
||||||
|
// pointed to by target, or if the error has a method As(interface{}) bool such that
|
||||||
|
// As(target) returns true. In the latter case, the As method is responsible for
|
||||||
|
// setting target.
|
||||||
|
//
|
||||||
|
// As will panic if target is not a non-nil pointer to either a type that implements
|
||||||
|
// error, or to any interface type. As returns false if err is nil.
|
||||||
|
func As(err error, target interface{}) bool { return stderrors.As(err, target) } |
||||||
|
|
||||||
|
// Unwrap returns the result of calling the Unwrap method on err, if err's
|
||||||
|
// type contains an Unwrap method returning error.
|
||||||
|
// Otherwise, Unwrap returns nil.
|
||||||
|
func Unwrap(err error) error { |
||||||
|
return stderrors.Unwrap(err) |
||||||
|
} |
@ -0,0 +1,177 @@ |
|||||||
|
package errors |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"path" |
||||||
|
"runtime" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
) |
||||||
|
|
||||||
|
// Frame represents a program counter inside a stack frame.
|
||||||
|
// For historical reasons if Frame is interpreted as a uintptr
|
||||||
|
// its value represents the program counter + 1.
|
||||||
|
type Frame uintptr |
||||||
|
|
||||||
|
// pc returns the program counter for this frame;
|
||||||
|
// multiple frames may have the same PC value.
|
||||||
|
func (f Frame) pc() uintptr { return uintptr(f) - 1 } |
||||||
|
|
||||||
|
// file returns the full path to the file that contains the
|
||||||
|
// function for this Frame's pc.
|
||||||
|
func (f Frame) file() string { |
||||||
|
fn := runtime.FuncForPC(f.pc()) |
||||||
|
if fn == nil { |
||||||
|
return "unknown" |
||||||
|
} |
||||||
|
file, _ := fn.FileLine(f.pc()) |
||||||
|
return file |
||||||
|
} |
||||||
|
|
||||||
|
// line returns the line number of source code of the
|
||||||
|
// function for this Frame's pc.
|
||||||
|
func (f Frame) line() int { |
||||||
|
fn := runtime.FuncForPC(f.pc()) |
||||||
|
if fn == nil { |
||||||
|
return 0 |
||||||
|
} |
||||||
|
_, line := fn.FileLine(f.pc()) |
||||||
|
return line |
||||||
|
} |
||||||
|
|
||||||
|
// name returns the name of this function, if known.
|
||||||
|
func (f Frame) name() string { |
||||||
|
fn := runtime.FuncForPC(f.pc()) |
||||||
|
if fn == nil { |
||||||
|
return "unknown" |
||||||
|
} |
||||||
|
return fn.Name() |
||||||
|
} |
||||||
|
|
||||||
|
// Format formats the frame according to the fmt.Formatter interface.
|
||||||
|
//
|
||||||
|
// %s source file
|
||||||
|
// %d source line
|
||||||
|
// %n function name
|
||||||
|
// %v equivalent to %s:%d
|
||||||
|
//
|
||||||
|
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||||
|
//
|
||||||
|
// %+s function name and path of source file relative to the compile time
|
||||||
|
// GOPATH separated by \n\t (<funcname>\n\t<path>)
|
||||||
|
// %+v equivalent to %+s:%d
|
||||||
|
func (f Frame) Format(s fmt.State, verb rune) { |
||||||
|
switch verb { |
||||||
|
case 's': |
||||||
|
switch { |
||||||
|
case s.Flag('+'): |
||||||
|
io.WriteString(s, f.name()) |
||||||
|
io.WriteString(s, "\n\t") |
||||||
|
io.WriteString(s, f.file()) |
||||||
|
default: |
||||||
|
io.WriteString(s, path.Base(f.file())) |
||||||
|
} |
||||||
|
case 'd': |
||||||
|
io.WriteString(s, strconv.Itoa(f.line())) |
||||||
|
case 'n': |
||||||
|
io.WriteString(s, funcname(f.name())) |
||||||
|
case 'v': |
||||||
|
f.Format(s, 's') |
||||||
|
io.WriteString(s, ":") |
||||||
|
f.Format(s, 'd') |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalText formats a stacktrace Frame as a text string. The output is the
|
||||||
|
// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
|
||||||
|
func (f Frame) MarshalText() ([]byte, error) { |
||||||
|
name := f.name() |
||||||
|
if name == "unknown" { |
||||||
|
return []byte(name), nil |
||||||
|
} |
||||||
|
return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil |
||||||
|
} |
||||||
|
|
||||||
|
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||||
|
type StackTrace []Frame |
||||||
|
|
||||||
|
// Format formats the stack of Frames according to the fmt.Formatter interface.
|
||||||
|
//
|
||||||
|
// %s lists source files for each Frame in the stack
|
||||||
|
// %v lists the source file and line number for each Frame in the stack
|
||||||
|
//
|
||||||
|
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||||
|
//
|
||||||
|
// %+v Prints filename, function, and line number for each Frame in the stack.
|
||||||
|
func (st StackTrace) Format(s fmt.State, verb rune) { |
||||||
|
switch verb { |
||||||
|
case 'v': |
||||||
|
switch { |
||||||
|
case s.Flag('+'): |
||||||
|
for _, f := range st { |
||||||
|
io.WriteString(s, "\n") |
||||||
|
f.Format(s, verb) |
||||||
|
} |
||||||
|
case s.Flag('#'): |
||||||
|
fmt.Fprintf(s, "%#v", []Frame(st)) |
||||||
|
default: |
||||||
|
st.formatSlice(s, verb) |
||||||
|
} |
||||||
|
case 's': |
||||||
|
st.formatSlice(s, verb) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// formatSlice will format this StackTrace into the given buffer as a slice of
|
||||||
|
// Frame, only valid when called with '%s' or '%v'.
|
||||||
|
func (st StackTrace) formatSlice(s fmt.State, verb rune) { |
||||||
|
io.WriteString(s, "[") |
||||||
|
for i, f := range st { |
||||||
|
if i > 0 { |
||||||
|
io.WriteString(s, " ") |
||||||
|
} |
||||||
|
f.Format(s, verb) |
||||||
|
} |
||||||
|
io.WriteString(s, "]") |
||||||
|
} |
||||||
|
|
||||||
|
// stack represents a stack of program counters.
|
||||||
|
type stack []uintptr |
||||||
|
|
||||||
|
func (s *stack) Format(st fmt.State, verb rune) { |
||||||
|
switch verb { |
||||||
|
case 'v': |
||||||
|
switch { |
||||||
|
case st.Flag('+'): |
||||||
|
for _, pc := range *s { |
||||||
|
f := Frame(pc) |
||||||
|
fmt.Fprintf(st, "\n%+v", f) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (s *stack) StackTrace() StackTrace { |
||||||
|
f := make([]Frame, len(*s)) |
||||||
|
for i := 0; i < len(f); i++ { |
||||||
|
f[i] = Frame((*s)[i]) |
||||||
|
} |
||||||
|
return f |
||||||
|
} |
||||||
|
|
||||||
|
func callers() *stack { |
||||||
|
const depth = 32 |
||||||
|
var pcs [depth]uintptr |
||||||
|
n := runtime.Callers(3, pcs[:]) |
||||||
|
var st stack = pcs[0:n] |
||||||
|
return &st |
||||||
|
} |
||||||
|
|
||||||
|
// funcname removes the path prefix component of a function's name reported by func.Name().
|
||||||
|
func funcname(name string) string { |
||||||
|
i := strings.LastIndex(name, "/") |
||||||
|
name = name[i+1:] |
||||||
|
i = strings.Index(name, ".") |
||||||
|
return name[i+1:] |
||||||
|
} |
@ -0,0 +1,25 @@ |
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects) |
||||||
|
*.o |
||||||
|
*.a |
||||||
|
*.so |
||||||
|
|
||||||
|
# Folders |
||||||
|
_obj |
||||||
|
_test |
||||||
|
tmp |
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes |
||||||
|
*.[568vq] |
||||||
|
[568vq].out |
||||||
|
|
||||||
|
*.cgo1.go |
||||||
|
*.cgo2.c |
||||||
|
_cgo_defun.c |
||||||
|
_cgo_gotypes.go |
||||||
|
_cgo_export.* |
||||||
|
|
||||||
|
_testmain.go |
||||||
|
|
||||||
|
*.exe |
||||||
|
*.test |
||||||
|
*.prof |
@ -0,0 +1,21 @@ |
|||||||
|
MIT License |
||||||
|
|
||||||
|
Copyright (c) 2017 Olivier Poitrey |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all |
||||||
|
copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||||
|
SOFTWARE. |
@ -0,0 +1,695 @@ |
|||||||
|
# Zero Allocation JSON Logger |
||||||
|
|
||||||
|
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) |
||||||
|
|
||||||
|
The zerolog package provides a fast and simple logger dedicated to JSON output. |
||||||
|
|
||||||
|
Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. |
||||||
|
|
||||||
|
Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. |
||||||
|
|
||||||
|
To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). |
||||||
|
|
||||||
|
![Pretty Logging Image](pretty.png) |
||||||
|
|
||||||
|
## Who uses zerolog |
||||||
|
|
||||||
|
Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. |
||||||
|
|
||||||
|
## Features |
||||||
|
|
||||||
|
* [Blazing fast](#benchmarks) |
||||||
|
* [Low to zero allocation](#benchmarks) |
||||||
|
* [Leveled logging](#leveled-logging) |
||||||
|
* [Sampling](#log-sampling) |
||||||
|
* [Hooks](#hooks) |
||||||
|
* [Contextual fields](#contextual-logging) |
||||||
|
* `context.Context` integration |
||||||
|
* [Integration with `net/http`](#integration-with-nethttp) |
||||||
|
* [JSON and CBOR encoding formats](#binary-encoding) |
||||||
|
* [Pretty logging for development](#pretty-logging) |
||||||
|
* [Error Logging (with optional Stacktrace)](#error-logging) |
||||||
|
|
||||||
|
## Installation |
||||||
|
|
||||||
|
```bash |
||||||
|
go get -u github.com/rs/zerolog/log |
||||||
|
``` |
||||||
|
|
||||||
|
## Getting Started |
||||||
|
|
||||||
|
### Simple Logging Example |
||||||
|
|
||||||
|
For simple logging, import the global logger package **github.com/rs/zerolog/log** |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
// UNIX Time is faster and smaller than most timestamps |
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
|
||||||
|
log.Print("hello world") |
||||||
|
} |
||||||
|
|
||||||
|
// Output: {"time":1516134303,"level":"debug","message":"hello world"} |
||||||
|
``` |
||||||
|
> Note: By default log writes to `os.Stderr` |
||||||
|
> Note: The default log level for `log.Print` is *debug* |
||||||
|
|
||||||
|
### Contextual Logging |
||||||
|
|
||||||
|
**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
|
||||||
|
log.Debug(). |
||||||
|
Str("Scale", "833 cents"). |
||||||
|
Float64("Interval", 833.09). |
||||||
|
Msg("Fibonacci is everywhere") |
||||||
|
|
||||||
|
log.Debug(). |
||||||
|
Str("Name", "Tom"). |
||||||
|
Send() |
||||||
|
} |
||||||
|
|
||||||
|
// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"} |
||||||
|
// Output: {"level":"debug","Name":"Tom","time":1562212768} |
||||||
|
``` |
||||||
|
|
||||||
|
> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) |
||||||
|
|
||||||
|
### Leveled Logging |
||||||
|
|
||||||
|
#### Simple Leveled Logging Example |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
|
||||||
|
log.Info().Msg("hello world") |
||||||
|
} |
||||||
|
|
||||||
|
// Output: {"time":1516134303,"level":"info","message":"hello world"} |
||||||
|
``` |
||||||
|
|
||||||
|
> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. |
||||||
|
|
||||||
|
**zerolog** allows for logging at the following levels (from highest to lowest): |
||||||
|
|
||||||
|
* panic (`zerolog.PanicLevel`, 5) |
||||||
|
* fatal (`zerolog.FatalLevel`, 4) |
||||||
|
* error (`zerolog.ErrorLevel`, 3) |
||||||
|
* warn (`zerolog.WarnLevel`, 2) |
||||||
|
* info (`zerolog.InfoLevel`, 1) |
||||||
|
* debug (`zerolog.DebugLevel`, 0) |
||||||
|
* trace (`zerolog.TraceLevel`, -1) |
||||||
|
|
||||||
|
You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. |
||||||
|
|
||||||
|
#### Setting Global Log Level |
||||||
|
|
||||||
|
This example uses command-line flags to demonstrate various outputs depending on the chosen log level. |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"flag" |
||||||
|
|
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
debug := flag.Bool("debug", false, "sets log level to debug") |
||||||
|
|
||||||
|
flag.Parse() |
||||||
|
|
||||||
|
// Default level for this example is info, unless debug flag is present |
||||||
|
zerolog.SetGlobalLevel(zerolog.InfoLevel) |
||||||
|
if *debug { |
||||||
|
zerolog.SetGlobalLevel(zerolog.DebugLevel) |
||||||
|
} |
||||||
|
|
||||||
|
log.Debug().Msg("This message appears only when log level set to Debug") |
||||||
|
log.Info().Msg("This message appears when log level set to Debug or Info") |
||||||
|
|
||||||
|
if e := log.Debug(); e.Enabled() { |
||||||
|
// Compute log output only if enabled. |
||||||
|
value := "bar" |
||||||
|
e.Str("foo", value).Msg("some debug message") |
||||||
|
} |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
Info Output (no flag) |
||||||
|
|
||||||
|
```bash |
||||||
|
$ ./logLevelExample |
||||||
|
{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} |
||||||
|
``` |
||||||
|
|
||||||
|
Debug Output (debug flag set) |
||||||
|
|
||||||
|
```bash |
||||||
|
$ ./logLevelExample -debug |
||||||
|
{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} |
||||||
|
{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} |
||||||
|
{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} |
||||||
|
``` |
||||||
|
|
||||||
|
#### Logging without Level or Message |
||||||
|
|
||||||
|
You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
|
||||||
|
log.Log(). |
||||||
|
Str("foo", "bar"). |
||||||
|
Msg("") |
||||||
|
} |
||||||
|
|
||||||
|
// Output: {"time":1494567715,"foo":"bar"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Error Logging |
||||||
|
|
||||||
|
You can log errors using the `Err` method |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
|
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
|
||||||
|
err := errors.New("seems we have an error here") |
||||||
|
log.Error().Err(err).Msg("") |
||||||
|
} |
||||||
|
|
||||||
|
// Output: {"level":"error","error":"seems we have an error here","time":1609085256} |
||||||
|
``` |
||||||
|
|
||||||
|
> The default field name for errors is `error`, you can change this by setting `zerolog.ErrorFieldName` to meet your needs. |
||||||
|
|
||||||
|
#### Error Logging with Stacktrace |
||||||
|
|
||||||
|
Using `github.com/pkg/errors`, you can add a formatted stacktrace to your errors. |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/pkg/errors" |
||||||
|
"github.com/rs/zerolog/pkgerrors" |
||||||
|
|
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack |
||||||
|
|
||||||
|
err := outer() |
||||||
|
log.Error().Stack().Err(err).Msg("") |
||||||
|
} |
||||||
|
|
||||||
|
func inner() error { |
||||||
|
return errors.New("seems we have an error here") |
||||||
|
} |
||||||
|
|
||||||
|
func middle() error { |
||||||
|
err := inner() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func outer() error { |
||||||
|
err := middle() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Output: {"level":"error","stack":[{"func":"inner","line":"20","source":"errors.go"},{"func":"middle","line":"24","source":"errors.go"},{"func":"outer","line":"32","source":"errors.go"},{"func":"main","line":"15","source":"errors.go"},{"func":"main","line":"204","source":"proc.go"},{"func":"goexit","line":"1374","source":"asm_amd64.s"}],"error":"seems we have an error here","time":1609086683} |
||||||
|
``` |
||||||
|
|
||||||
|
> zerolog.ErrorStackMarshaler must be set in order for the stack to output anything. |
||||||
|
|
||||||
|
#### Logging Fatal Messages |
||||||
|
|
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
|
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
err := errors.New("A repo man spends his life getting into tense situations") |
||||||
|
service := "myservice" |
||||||
|
|
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
|
||||||
|
log.Fatal(). |
||||||
|
Err(err). |
||||||
|
Str("service", service). |
||||||
|
Msgf("Cannot start %s", service) |
||||||
|
} |
||||||
|
|
||||||
|
// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} |
||||||
|
// exit status 1 |
||||||
|
``` |
||||||
|
|
||||||
|
> NOTE: Using `Msgf` generates one allocation even when the logger is disabled. |
||||||
|
|
||||||
|
|
||||||
|
### Create logger instance to manage different outputs |
||||||
|
|
||||||
|
```go |
||||||
|
logger := zerolog.New(os.Stderr).With().Timestamp().Logger() |
||||||
|
|
||||||
|
logger.Info().Str("foo", "bar").Msg("hello world") |
||||||
|
|
||||||
|
// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Sub-loggers let you chain loggers with additional context |
||||||
|
|
||||||
|
```go |
||||||
|
sublogger := log.With(). |
||||||
|
Str("component", "foo"). |
||||||
|
Logger() |
||||||
|
sublogger.Info().Msg("hello world") |
||||||
|
|
||||||
|
// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Pretty logging |
||||||
|
|
||||||
|
To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: |
||||||
|
|
||||||
|
```go |
||||||
|
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) |
||||||
|
|
||||||
|
log.Info().Str("foo", "bar").Msg("Hello world") |
||||||
|
|
||||||
|
// Output: 3:04PM INF Hello World foo=bar |
||||||
|
``` |
||||||
|
|
||||||
|
To customize the configuration and formatting: |
||||||
|
|
||||||
|
```go |
||||||
|
output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} |
||||||
|
output.FormatLevel = func(i interface{}) string { |
||||||
|
return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) |
||||||
|
} |
||||||
|
output.FormatMessage = func(i interface{}) string { |
||||||
|
return fmt.Sprintf("***%s****", i) |
||||||
|
} |
||||||
|
output.FormatFieldName = func(i interface{}) string { |
||||||
|
return fmt.Sprintf("%s:", i) |
||||||
|
} |
||||||
|
output.FormatFieldValue = func(i interface{}) string { |
||||||
|
return strings.ToUpper(fmt.Sprintf("%s", i)) |
||||||
|
} |
||||||
|
|
||||||
|
log := zerolog.New(output).With().Timestamp().Logger() |
||||||
|
|
||||||
|
log.Info().Str("foo", "bar").Msg("Hello World") |
||||||
|
|
||||||
|
// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR |
||||||
|
``` |
||||||
|
|
||||||
|
### Sub dictionary |
||||||
|
|
||||||
|
```go |
||||||
|
log.Info(). |
||||||
|
Str("foo", "bar"). |
||||||
|
Dict("dict", zerolog.Dict(). |
||||||
|
Str("bar", "baz"). |
||||||
|
Int("n", 1), |
||||||
|
).Msg("hello world") |
||||||
|
|
||||||
|
// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Customize automatic field names |
||||||
|
|
||||||
|
```go |
||||||
|
zerolog.TimestampFieldName = "t" |
||||||
|
zerolog.LevelFieldName = "l" |
||||||
|
zerolog.MessageFieldName = "m" |
||||||
|
|
||||||
|
log.Info().Msg("hello world") |
||||||
|
|
||||||
|
// Output: {"l":"info","t":1494567715,"m":"hello world"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Add contextual fields to the global logger |
||||||
|
|
||||||
|
```go |
||||||
|
log.Logger = log.With().Str("foo", "bar").Logger() |
||||||
|
``` |
||||||
|
|
||||||
|
### Add file and line number to log |
||||||
|
|
||||||
|
```go |
||||||
|
log.Logger = log.With().Caller().Logger() |
||||||
|
log.Info().Msg("hello world") |
||||||
|
|
||||||
|
// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
### Thread-safe, lock-free, non-blocking writer |
||||||
|
|
||||||
|
If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow: |
||||||
|
|
||||||
|
```go |
||||||
|
wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { |
||||||
|
fmt.Printf("Logger Dropped %d messages", missed) |
||||||
|
}) |
||||||
|
log := zerolog.New(wr) |
||||||
|
log.Print("test") |
||||||
|
``` |
||||||
|
|
||||||
|
You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. |
||||||
|
|
||||||
|
### Log Sampling |
||||||
|
|
||||||
|
```go |
||||||
|
sampled := log.Sample(&zerolog.BasicSampler{N: 10}) |
||||||
|
sampled.Info().Msg("will be logged every 10 messages") |
||||||
|
|
||||||
|
// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"} |
||||||
|
``` |
||||||
|
|
||||||
|
More advanced sampling: |
||||||
|
|
||||||
|
```go |
||||||
|
// Will let 5 debug messages per period of 1 second. |
||||||
|
// Over 5 debug message, 1 every 100 debug messages are logged. |
||||||
|
// Other levels are not sampled. |
||||||
|
sampled := log.Sample(zerolog.LevelSampler{ |
||||||
|
DebugSampler: &zerolog.BurstSampler{ |
||||||
|
Burst: 5, |
||||||
|
Period: 1*time.Second, |
||||||
|
NextSampler: &zerolog.BasicSampler{N: 100}, |
||||||
|
}, |
||||||
|
}) |
||||||
|
sampled.Debug().Msg("hello world") |
||||||
|
|
||||||
|
// Output: {"time":1494567715,"level":"debug","message":"hello world"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Hooks |
||||||
|
|
||||||
|
```go |
||||||
|
type SeverityHook struct{} |
||||||
|
|
||||||
|
func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { |
||||||
|
if level != zerolog.NoLevel { |
||||||
|
e.Str("severity", level.String()) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
hooked := log.Hook(SeverityHook{}) |
||||||
|
hooked.Warn().Msg("") |
||||||
|
|
||||||
|
// Output: {"level":"warn","severity":"warn"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Pass a sub-logger by context |
||||||
|
|
||||||
|
```go |
||||||
|
ctx := log.With().Str("component", "module").Logger().WithContext(ctx) |
||||||
|
|
||||||
|
log.Ctx(ctx).Info().Msg("hello world") |
||||||
|
|
||||||
|
// Output: {"component":"module","level":"info","message":"hello world"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Set as standard logger output |
||||||
|
|
||||||
|
```go |
||||||
|
log := zerolog.New(os.Stdout).With(). |
||||||
|
Str("foo", "bar"). |
||||||
|
Logger() |
||||||
|
|
||||||
|
stdlog.SetFlags(0) |
||||||
|
stdlog.SetOutput(log) |
||||||
|
|
||||||
|
stdlog.Print("hello world") |
||||||
|
|
||||||
|
// Output: {"foo":"bar","message":"hello world"} |
||||||
|
``` |
||||||
|
|
||||||
|
### Integration with `net/http` |
||||||
|
|
||||||
|
The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`. |
||||||
|
|
||||||
|
In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability. |
||||||
|
|
||||||
|
```go |
||||||
|
log := zerolog.New(os.Stdout).With(). |
||||||
|
Timestamp(). |
||||||
|
Str("role", "my-service"). |
||||||
|
Str("host", host). |
||||||
|
Logger() |
||||||
|
|
||||||
|
c := alice.New() |
||||||
|
|
||||||
|
// Install the logger handler with default output on the console |
||||||
|
c = c.Append(hlog.NewHandler(log)) |
||||||
|
|
||||||
|
// Install some provided extra handler to set some request's context fields. |
||||||
|
// Thanks to that handler, all our logs will come with some prepopulated fields. |
||||||
|
c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { |
||||||
|
hlog.FromRequest(r).Info(). |
||||||
|
Str("method", r.Method). |
||||||
|
Stringer("url", r.URL). |
||||||
|
Int("status", status). |
||||||
|
Int("size", size). |
||||||
|
Dur("duration", duration). |
||||||
|
Msg("") |
||||||
|
})) |
||||||
|
c = c.Append(hlog.RemoteAddrHandler("ip")) |
||||||
|
c = c.Append(hlog.UserAgentHandler("user_agent")) |
||||||
|
c = c.Append(hlog.RefererHandler("referer")) |
||||||
|
c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id")) |
||||||
|
|
||||||
|
// Here is your final handler |
||||||
|
h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||||
|
// Get the logger from the request's context. You can safely assume it |
||||||
|
// will be always there: if the handler is removed, hlog.FromRequest |
||||||
|
// will return a no-op logger. |
||||||
|
hlog.FromRequest(r).Info(). |
||||||
|
Str("user", "current user"). |
||||||
|
Str("status", "ok"). |
||||||
|
Msg("Something happened") |
||||||
|
|
||||||
|
// Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"} |
||||||
|
})) |
||||||
|
http.Handle("/", h) |
||||||
|
|
||||||
|
if err := http.ListenAndServe(":8080", nil); err != nil { |
||||||
|
log.Fatal().Err(err).Msg("Startup failed") |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
## Multiple Log Output |
||||||
|
`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs. |
||||||
|
In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter. |
||||||
|
```go |
||||||
|
func main() { |
||||||
|
consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout} |
||||||
|
|
||||||
|
multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout) |
||||||
|
|
||||||
|
logger := zerolog.New(multi).With().Timestamp().Logger() |
||||||
|
|
||||||
|
logger.Info().Msg("Hello World!") |
||||||
|
} |
||||||
|
|
||||||
|
// Output (Line 1: Console; Line 2: Stdout) |
||||||
|
// 12:36PM INF Hello World! |
||||||
|
// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"} |
||||||
|
``` |
||||||
|
|
||||||
|
## Global Settings |
||||||
|
|
||||||
|
Some settings can be changed and will by applied to all loggers: |
||||||
|
|
||||||
|
* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). |
||||||
|
* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode). |
||||||
|
* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. |
||||||
|
* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. |
||||||
|
* `zerolog.LevelFieldName`: Can be set to customize level field name. |
||||||
|
* `zerolog.MessageFieldName`: Can be set to customize message field name. |
||||||
|
* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. |
||||||
|
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp. |
||||||
|
* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`). |
||||||
|
* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`). |
||||||
|
* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. |
||||||
|
|
||||||
|
## Field Types |
||||||
|
|
||||||
|
### Standard Types |
||||||
|
|
||||||
|
* `Str` |
||||||
|
* `Bool` |
||||||
|
* `Int`, `Int8`, `Int16`, `Int32`, `Int64` |
||||||
|
* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64` |
||||||
|
* `Float32`, `Float64` |
||||||
|
|
||||||
|
### Advanced Fields |
||||||
|
|
||||||
|
* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name. |
||||||
|
* `Func`: Run a `func` only if the level is enabled. |
||||||
|
* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`. |
||||||
|
* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`. |
||||||
|
* `Dur`: Adds a field with `time.Duration`. |
||||||
|
* `Dict`: Adds a sub-key/value as a field of the event. |
||||||
|
* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`) |
||||||
|
* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`) |
||||||
|
* `Interface`: Uses reflection to marshal the type. |
||||||
|
|
||||||
|
Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.) |
||||||
|
|
||||||
|
## Binary Encoding |
||||||
|
|
||||||
|
In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: |
||||||
|
|
||||||
|
```bash |
||||||
|
go build -tags binary_log . |
||||||
|
``` |
||||||
|
|
||||||
|
To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work |
||||||
|
with zerolog library is [CSD](https://github.com/toravir/csd/). |
||||||
|
|
||||||
|
## Related Projects |
||||||
|
|
||||||
|
* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` |
||||||
|
* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog` |
||||||
|
* [zerologr](https://github.com/go-logr/zerologr): Implementation of `logr.LogSink` interface using `zerolog` |
||||||
|
|
||||||
|
## Benchmarks |
||||||
|
|
||||||
|
See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. |
||||||
|
|
||||||
|
All operations are allocation free (those numbers *include* JSON encoding): |
||||||
|
|
||||||
|
```text |
||||||
|
BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op |
||||||
|
BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op |
||||||
|
BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op |
||||||
|
BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op |
||||||
|
BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op |
||||||
|
``` |
||||||
|
|
||||||
|
There are a few Go logging benchmarks and comparisons that include zerolog. |
||||||
|
|
||||||
|
* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) |
||||||
|
* [uber-common/zap](https://github.com/uber-go/zap#performance) |
||||||
|
|
||||||
|
Using Uber's zap comparison benchmark: |
||||||
|
|
||||||
|
Log a message and 10 fields: |
||||||
|
|
||||||
|
| Library | Time | Bytes Allocated | Objects Allocated | |
||||||
|
| :--- | :---: | :---: | :---: | |
||||||
|
| zerolog | 767 ns/op | 552 B/op | 6 allocs/op | |
||||||
|
| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op | |
||||||
|
| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op | |
||||||
|
| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op | |
||||||
|
| lion | 5392 ns/op | 5807 B/op | 63 allocs/op | |
||||||
|
| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op | |
||||||
|
| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op | |
||||||
|
| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op | |
||||||
|
|
||||||
|
Log a message with a logger that already has 10 fields of context: |
||||||
|
|
||||||
|
| Library | Time | Bytes Allocated | Objects Allocated | |
||||||
|
| :--- | :---: | :---: | :---: | |
||||||
|
| zerolog | 52 ns/op | 0 B/op | 0 allocs/op | |
||||||
|
| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op | |
||||||
|
| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | |
||||||
|
| lion | 2702 ns/op | 4074 B/op | 38 allocs/op | |
||||||
|
| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op | |
||||||
|
| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op | |
||||||
|
| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op | |
||||||
|
| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op | |
||||||
|
|
||||||
|
Log a static string, without any context or `printf`-style templating: |
||||||
|
|
||||||
|
| Library | Time | Bytes Allocated | Objects Allocated | |
||||||
|
| :--- | :---: | :---: | :---: | |
||||||
|
| zerolog | 50 ns/op | 0 B/op | 0 allocs/op | |
||||||
|
| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op | |
||||||
|
| standard library | 453 ns/op | 80 B/op | 2 allocs/op | |
||||||
|
| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | |
||||||
|
| go-kit | 508 ns/op | 656 B/op | 13 allocs/op | |
||||||
|
| lion | 771 ns/op | 1224 B/op | 10 allocs/op | |
||||||
|
| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op | |
||||||
|
| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | |
||||||
|
| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | |
||||||
|
|
||||||
|
## Caveats |
||||||
|
|
||||||
|
Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON: |
||||||
|
|
||||||
|
```go |
||||||
|
logger := zerolog.New(os.Stderr).With().Timestamp().Logger() |
||||||
|
logger.Info(). |
||||||
|
Timestamp(). |
||||||
|
Msg("dup") |
||||||
|
// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} |
||||||
|
``` |
||||||
|
|
||||||
|
In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt. |
@ -0,0 +1 @@ |
|||||||
|
remote_theme: rs/gh-readme |
@ -0,0 +1,240 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"net" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
var arrayPool = &sync.Pool{ |
||||||
|
New: func() interface{} { |
||||||
|
return &Array{ |
||||||
|
buf: make([]byte, 0, 500), |
||||||
|
} |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
// Array is used to prepopulate an array of items
|
||||||
|
// which can be re-used to add to log messages.
|
||||||
|
type Array struct { |
||||||
|
buf []byte |
||||||
|
} |
||||||
|
|
||||||
|
func putArray(a *Array) { |
||||||
|
// Proper usage of a sync.Pool requires each entry to have approximately
|
||||||
|
// the same memory cost. To obtain this property when the stored type
|
||||||
|
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
|
||||||
|
// to place back in the pool.
|
||||||
|
//
|
||||||
|
// See https://golang.org/issue/23199
|
||||||
|
const maxSize = 1 << 16 // 64KiB
|
||||||
|
if cap(a.buf) > maxSize { |
||||||
|
return |
||||||
|
} |
||||||
|
arrayPool.Put(a) |
||||||
|
} |
||||||
|
|
||||||
|
// Arr creates an array to be added to an Event or Context.
|
||||||
|
func Arr() *Array { |
||||||
|
a := arrayPool.Get().(*Array) |
||||||
|
a.buf = a.buf[:0] |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalZerologArray method here is no-op - since data is
|
||||||
|
// already in the needed format.
|
||||||
|
func (*Array) MarshalZerologArray(*Array) { |
||||||
|
} |
||||||
|
|
||||||
|
func (a *Array) write(dst []byte) []byte { |
||||||
|
dst = enc.AppendArrayStart(dst) |
||||||
|
if len(a.buf) > 0 { |
||||||
|
dst = append(append(dst, a.buf...)) |
||||||
|
} |
||||||
|
dst = enc.AppendArrayEnd(dst) |
||||||
|
putArray(a) |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// Object marshals an object that implement the LogObjectMarshaler
|
||||||
|
// interface and append append it to the array.
|
||||||
|
func (a *Array) Object(obj LogObjectMarshaler) *Array { |
||||||
|
e := Dict() |
||||||
|
obj.MarshalZerologObject(e) |
||||||
|
e.buf = enc.AppendEndMarker(e.buf) |
||||||
|
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) |
||||||
|
putEvent(e) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Str append append the val as a string to the array.
|
||||||
|
func (a *Array) Str(val string) *Array { |
||||||
|
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Bytes append append the val as a string to the array.
|
||||||
|
func (a *Array) Bytes(val []byte) *Array { |
||||||
|
a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Hex append append the val as a hex string to the array.
|
||||||
|
func (a *Array) Hex(val []byte) *Array { |
||||||
|
a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// RawJSON adds already encoded JSON to the array.
|
||||||
|
func (a *Array) RawJSON(val []byte) *Array { |
||||||
|
a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Err serializes and appends the err to the array.
|
||||||
|
func (a *Array) Err(err error) *Array { |
||||||
|
switch m := ErrorMarshalFunc(err).(type) { |
||||||
|
case LogObjectMarshaler: |
||||||
|
e := newEvent(nil, 0) |
||||||
|
e.buf = e.buf[:0] |
||||||
|
e.appendObject(m) |
||||||
|
a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) |
||||||
|
putEvent(e) |
||||||
|
case error: |
||||||
|
if m == nil || isNilValue(m) { |
||||||
|
a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf)) |
||||||
|
} else { |
||||||
|
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) |
||||||
|
} |
||||||
|
case string: |
||||||
|
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) |
||||||
|
default: |
||||||
|
a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) |
||||||
|
} |
||||||
|
|
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Bool append append the val as a bool to the array.
|
||||||
|
func (a *Array) Bool(b bool) *Array { |
||||||
|
a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Int append append i as a int to the array.
|
||||||
|
func (a *Array) Int(i int) *Array { |
||||||
|
a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Int8 append append i as a int8 to the array.
|
||||||
|
func (a *Array) Int8(i int8) *Array { |
||||||
|
a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Int16 append append i as a int16 to the array.
|
||||||
|
func (a *Array) Int16(i int16) *Array { |
||||||
|
a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Int32 append append i as a int32 to the array.
|
||||||
|
func (a *Array) Int32(i int32) *Array { |
||||||
|
a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Int64 append append i as a int64 to the array.
|
||||||
|
func (a *Array) Int64(i int64) *Array { |
||||||
|
a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Uint append append i as a uint to the array.
|
||||||
|
func (a *Array) Uint(i uint) *Array { |
||||||
|
a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Uint8 append append i as a uint8 to the array.
|
||||||
|
func (a *Array) Uint8(i uint8) *Array { |
||||||
|
a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Uint16 append append i as a uint16 to the array.
|
||||||
|
func (a *Array) Uint16(i uint16) *Array { |
||||||
|
a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Uint32 append append i as a uint32 to the array.
|
||||||
|
func (a *Array) Uint32(i uint32) *Array { |
||||||
|
a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Uint64 append append i as a uint64 to the array.
|
||||||
|
func (a *Array) Uint64(i uint64) *Array { |
||||||
|
a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Float32 append append f as a float32 to the array.
|
||||||
|
func (a *Array) Float32(f float32) *Array { |
||||||
|
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Float64 append append f as a float64 to the array.
|
||||||
|
func (a *Array) Float64(f float64) *Array { |
||||||
|
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Time append append t formated as string using zerolog.TimeFieldFormat.
|
||||||
|
func (a *Array) Time(t time.Time) *Array { |
||||||
|
a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Dur append append d to the array.
|
||||||
|
func (a *Array) Dur(d time.Duration) *Array { |
||||||
|
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Interface append append i marshaled using reflection.
|
||||||
|
func (a *Array) Interface(i interface{}) *Array { |
||||||
|
if obj, ok := i.(LogObjectMarshaler); ok { |
||||||
|
return a.Object(obj) |
||||||
|
} |
||||||
|
a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// IPAddr adds IPv4 or IPv6 address to the array
|
||||||
|
func (a *Array) IPAddr(ip net.IP) *Array { |
||||||
|
a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array
|
||||||
|
func (a *Array) IPPrefix(pfx net.IPNet) *Array { |
||||||
|
a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// MACAddr adds a MAC (Ethernet) address to the array
|
||||||
|
func (a *Array) MACAddr(ha net.HardwareAddr) *Array { |
||||||
|
a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) |
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
// Dict adds the dict Event to the array
|
||||||
|
func (a *Array) Dict(dict *Event) *Array { |
||||||
|
dict.buf = enc.AppendEndMarker(dict.buf) |
||||||
|
a.buf = append(enc.AppendArrayDelim(a.buf), dict.buf...) |
||||||
|
return a |
||||||
|
} |
@ -0,0 +1,409 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"path/filepath" |
||||||
|
"sort" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
colorBlack = iota + 30 |
||||||
|
colorRed |
||||||
|
colorGreen |
||||||
|
colorYellow |
||||||
|
colorBlue |
||||||
|
colorMagenta |
||||||
|
colorCyan |
||||||
|
colorWhite |
||||||
|
|
||||||
|
colorBold = 1 |
||||||
|
colorDarkGray = 90 |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
consoleBufPool = sync.Pool{ |
||||||
|
New: func() interface{} { |
||||||
|
return bytes.NewBuffer(make([]byte, 0, 100)) |
||||||
|
}, |
||||||
|
} |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
consoleDefaultTimeFormat = time.Kitchen |
||||||
|
) |
||||||
|
|
||||||
|
// Formatter transforms the input into a formatted string.
|
||||||
|
type Formatter func(interface{}) string |
||||||
|
|
||||||
|
// ConsoleWriter parses the JSON input and writes it in an
|
||||||
|
// (optionally) colorized, human-friendly format to Out.
|
||||||
|
type ConsoleWriter struct { |
||||||
|
// Out is the output destination.
|
||||||
|
Out io.Writer |
||||||
|
|
||||||
|
// NoColor disables the colorized output.
|
||||||
|
NoColor bool |
||||||
|
|
||||||
|
// TimeFormat specifies the format for timestamp in output.
|
||||||
|
TimeFormat string |
||||||
|
|
||||||
|
// PartsOrder defines the order of parts in output.
|
||||||
|
PartsOrder []string |
||||||
|
|
||||||
|
// PartsExclude defines parts to not display in output.
|
||||||
|
PartsExclude []string |
||||||
|
|
||||||
|
FormatTimestamp Formatter |
||||||
|
FormatLevel Formatter |
||||||
|
FormatCaller Formatter |
||||||
|
FormatMessage Formatter |
||||||
|
FormatFieldName Formatter |
||||||
|
FormatFieldValue Formatter |
||||||
|
FormatErrFieldName Formatter |
||||||
|
FormatErrFieldValue Formatter |
||||||
|
} |
||||||
|
|
||||||
|
// NewConsoleWriter creates and initializes a new ConsoleWriter.
|
||||||
|
func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { |
||||||
|
w := ConsoleWriter{ |
||||||
|
Out: os.Stdout, |
||||||
|
TimeFormat: consoleDefaultTimeFormat, |
||||||
|
PartsOrder: consoleDefaultPartsOrder(), |
||||||
|
} |
||||||
|
|
||||||
|
for _, opt := range options { |
||||||
|
opt(&w) |
||||||
|
} |
||||||
|
|
||||||
|
return w |
||||||
|
} |
||||||
|
|
||||||
|
// Write transforms the JSON input with formatters and appends to w.Out.
|
||||||
|
func (w ConsoleWriter) Write(p []byte) (n int, err error) { |
||||||
|
if w.PartsOrder == nil { |
||||||
|
w.PartsOrder = consoleDefaultPartsOrder() |
||||||
|
} |
||||||
|
|
||||||
|
var buf = consoleBufPool.Get().(*bytes.Buffer) |
||||||
|
defer func() { |
||||||
|
buf.Reset() |
||||||
|
consoleBufPool.Put(buf) |
||||||
|
}() |
||||||
|
|
||||||
|
var evt map[string]interface{} |
||||||
|
p = decodeIfBinaryToBytes(p) |
||||||
|
d := json.NewDecoder(bytes.NewReader(p)) |
||||||
|
d.UseNumber() |
||||||
|
err = d.Decode(&evt) |
||||||
|
if err != nil { |
||||||
|
return n, fmt.Errorf("cannot decode event: %s", err) |
||||||
|
} |
||||||
|
|
||||||
|
for _, p := range w.PartsOrder { |
||||||
|
w.writePart(buf, evt, p) |
||||||
|
} |
||||||
|
|
||||||
|
w.writeFields(evt, buf) |
||||||
|
|
||||||
|
err = buf.WriteByte('\n') |
||||||
|
if err != nil { |
||||||
|
return n, err |
||||||
|
} |
||||||
|
_, err = buf.WriteTo(w.Out) |
||||||
|
return len(p), err |
||||||
|
} |
||||||
|
|
||||||
|
// writeFields appends formatted key-value pairs to buf.
|
||||||
|
func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { |
||||||
|
var fields = make([]string, 0, len(evt)) |
||||||
|
for field := range evt { |
||||||
|
switch field { |
||||||
|
case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: |
||||||
|
continue |
||||||
|
} |
||||||
|
fields = append(fields, field) |
||||||
|
} |
||||||
|
sort.Strings(fields) |
||||||
|
|
||||||
|
if len(fields) > 0 { |
||||||
|
buf.WriteByte(' ') |
||||||
|
} |
||||||
|
|
||||||
|
// Move the "error" field to the front
|
||||||
|
ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) |
||||||
|
if ei < len(fields) && fields[ei] == ErrorFieldName { |
||||||
|
fields[ei] = "" |
||||||
|
fields = append([]string{ErrorFieldName}, fields...) |
||||||
|
var xfields = make([]string, 0, len(fields)) |
||||||
|
for _, field := range fields { |
||||||
|
if field == "" { // Skip empty fields
|
||||||
|
continue |
||||||
|
} |
||||||
|
xfields = append(xfields, field) |
||||||
|
} |
||||||
|
fields = xfields |
||||||
|
} |
||||||
|
|
||||||
|
for i, field := range fields { |
||||||
|
var fn Formatter |
||||||
|
var fv Formatter |
||||||
|
|
||||||
|
if field == ErrorFieldName { |
||||||
|
if w.FormatErrFieldName == nil { |
||||||
|
fn = consoleDefaultFormatErrFieldName(w.NoColor) |
||||||
|
} else { |
||||||
|
fn = w.FormatErrFieldName |
||||||
|
} |
||||||
|
|
||||||
|
if w.FormatErrFieldValue == nil { |
||||||
|
fv = consoleDefaultFormatErrFieldValue(w.NoColor) |
||||||
|
} else { |
||||||
|
fv = w.FormatErrFieldValue |
||||||
|
} |
||||||
|
} else { |
||||||
|
if w.FormatFieldName == nil { |
||||||
|
fn = consoleDefaultFormatFieldName(w.NoColor) |
||||||
|
} else { |
||||||
|
fn = w.FormatFieldName |
||||||
|
} |
||||||
|
|
||||||
|
if w.FormatFieldValue == nil { |
||||||
|
fv = consoleDefaultFormatFieldValue |
||||||
|
} else { |
||||||
|
fv = w.FormatFieldValue |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
buf.WriteString(fn(field)) |
||||||
|
|
||||||
|
switch fValue := evt[field].(type) { |
||||||
|
case string: |
||||||
|
if needsQuote(fValue) { |
||||||
|
buf.WriteString(fv(strconv.Quote(fValue))) |
||||||
|
} else { |
||||||
|
buf.WriteString(fv(fValue)) |
||||||
|
} |
||||||
|
case json.Number: |
||||||
|
buf.WriteString(fv(fValue)) |
||||||
|
default: |
||||||
|
b, err := json.Marshal(fValue) |
||||||
|
if err != nil { |
||||||
|
fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) |
||||||
|
} else { |
||||||
|
fmt.Fprint(buf, fv(b)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if i < len(fields)-1 { // Skip space for last field
|
||||||
|
buf.WriteByte(' ') |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// writePart appends a formatted part to buf.
|
||||||
|
func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { |
||||||
|
var f Formatter |
||||||
|
|
||||||
|
if w.PartsExclude != nil && len(w.PartsExclude) > 0 { |
||||||
|
for _, exclude := range w.PartsExclude { |
||||||
|
if exclude == p { |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
switch p { |
||||||
|
case LevelFieldName: |
||||||
|
if w.FormatLevel == nil { |
||||||
|
f = consoleDefaultFormatLevel(w.NoColor) |
||||||
|
} else { |
||||||
|
f = w.FormatLevel |
||||||
|
} |
||||||
|
case TimestampFieldName: |
||||||
|
if w.FormatTimestamp == nil { |
||||||
|
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) |
||||||
|
} else { |
||||||
|
f = w.FormatTimestamp |
||||||
|
} |
||||||
|
case MessageFieldName: |
||||||
|
if w.FormatMessage == nil { |
||||||
|
f = consoleDefaultFormatMessage |
||||||
|
} else { |
||||||
|
f = w.FormatMessage |
||||||
|
} |
||||||
|
case CallerFieldName: |
||||||
|
if w.FormatCaller == nil { |
||||||
|
f = consoleDefaultFormatCaller(w.NoColor) |
||||||
|
} else { |
||||||
|
f = w.FormatCaller |
||||||
|
} |
||||||
|
default: |
||||||
|
if w.FormatFieldValue == nil { |
||||||
|
f = consoleDefaultFormatFieldValue |
||||||
|
} else { |
||||||
|
f = w.FormatFieldValue |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
var s = f(evt[p]) |
||||||
|
|
||||||
|
if len(s) > 0 { |
||||||
|
buf.WriteString(s) |
||||||
|
if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part
|
||||||
|
buf.WriteByte(' ') |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// needsQuote returns true when the string s should be quoted in output.
|
||||||
|
func needsQuote(s string) bool { |
||||||
|
for i := range s { |
||||||
|
if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { |
||||||
|
return true |
||||||
|
} |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// colorize returns the string s wrapped in ANSI code c, unless disabled is true.
|
||||||
|
func colorize(s interface{}, c int, disabled bool) string { |
||||||
|
if disabled { |
||||||
|
return fmt.Sprintf("%s", s) |
||||||
|
} |
||||||
|
return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) |
||||||
|
} |
||||||
|
|
||||||
|
// ----- DEFAULT FORMATTERS ---------------------------------------------------
|
||||||
|
|
||||||
|
func consoleDefaultPartsOrder() []string { |
||||||
|
return []string{ |
||||||
|
TimestampFieldName, |
||||||
|
LevelFieldName, |
||||||
|
CallerFieldName, |
||||||
|
MessageFieldName, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { |
||||||
|
if timeFormat == "" { |
||||||
|
timeFormat = consoleDefaultTimeFormat |
||||||
|
} |
||||||
|
return func(i interface{}) string { |
||||||
|
t := "<nil>" |
||||||
|
switch tt := i.(type) { |
||||||
|
case string: |
||||||
|
ts, err := time.Parse(TimeFieldFormat, tt) |
||||||
|
if err != nil { |
||||||
|
t = tt |
||||||
|
} else { |
||||||
|
t = ts.Format(timeFormat) |
||||||
|
} |
||||||
|
case json.Number: |
||||||
|
i, err := tt.Int64() |
||||||
|
if err != nil { |
||||||
|
t = tt.String() |
||||||
|
} else { |
||||||
|
var sec, nsec int64 = i, 0 |
||||||
|
switch TimeFieldFormat { |
||||||
|
case TimeFormatUnixMs: |
||||||
|
nsec = int64(time.Duration(i) * time.Millisecond) |
||||||
|
sec = 0 |
||||||
|
case TimeFormatUnixMicro: |
||||||
|
nsec = int64(time.Duration(i) * time.Microsecond) |
||||||
|
sec = 0 |
||||||
|
} |
||||||
|
ts := time.Unix(sec, nsec).UTC() |
||||||
|
t = ts.Format(timeFormat) |
||||||
|
} |
||||||
|
} |
||||||
|
return colorize(t, colorDarkGray, noColor) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatLevel(noColor bool) Formatter { |
||||||
|
return func(i interface{}) string { |
||||||
|
var l string |
||||||
|
if ll, ok := i.(string); ok { |
||||||
|
switch ll { |
||||||
|
case LevelTraceValue: |
||||||
|
l = colorize("TRC", colorMagenta, noColor) |
||||||
|
case LevelDebugValue: |
||||||
|
l = colorize("DBG", colorYellow, noColor) |
||||||
|
case LevelInfoValue: |
||||||
|
l = colorize("INF", colorGreen, noColor) |
||||||
|
case LevelWarnValue: |
||||||
|
l = colorize("WRN", colorRed, noColor) |
||||||
|
case LevelErrorValue: |
||||||
|
l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) |
||||||
|
case LevelFatalValue: |
||||||
|
l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) |
||||||
|
case LevelPanicValue: |
||||||
|
l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) |
||||||
|
default: |
||||||
|
l = colorize("???", colorBold, noColor) |
||||||
|
} |
||||||
|
} else { |
||||||
|
if i == nil { |
||||||
|
l = colorize("???", colorBold, noColor) |
||||||
|
} else { |
||||||
|
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] |
||||||
|
} |
||||||
|
} |
||||||
|
return l |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatCaller(noColor bool) Formatter { |
||||||
|
return func(i interface{}) string { |
||||||
|
var c string |
||||||
|
if cc, ok := i.(string); ok { |
||||||
|
c = cc |
||||||
|
} |
||||||
|
if len(c) > 0 { |
||||||
|
if cwd, err := os.Getwd(); err == nil { |
||||||
|
if rel, err := filepath.Rel(cwd, c); err == nil { |
||||||
|
c = rel |
||||||
|
} |
||||||
|
} |
||||||
|
c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor) |
||||||
|
} |
||||||
|
return c |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatMessage(i interface{}) string { |
||||||
|
if i == nil { |
||||||
|
return "" |
||||||
|
} |
||||||
|
return fmt.Sprintf("%s", i) |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatFieldName(noColor bool) Formatter { |
||||||
|
return func(i interface{}) string { |
||||||
|
return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatFieldValue(i interface{}) string { |
||||||
|
return fmt.Sprintf("%s", i) |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatErrFieldName(noColor bool) Formatter { |
||||||
|
return func(i interface{}) string { |
||||||
|
return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { |
||||||
|
return func(i interface{}) string { |
||||||
|
return colorize(fmt.Sprintf("%s", i), colorRed, noColor) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,433 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"io/ioutil" |
||||||
|
"math" |
||||||
|
"net" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// Context configures a new sub-logger with contextual fields.
|
||||||
|
type Context struct { |
||||||
|
l Logger |
||||||
|
} |
||||||
|
|
||||||
|
// Logger returns the logger with the context previously set.
|
||||||
|
func (c Context) Logger() Logger { |
||||||
|
return c.l |
||||||
|
} |
||||||
|
|
||||||
|
// Fields is a helper function to use a map or slice to set fields using type assertion.
|
||||||
|
// Only map[string]interface{} and []interface{} are accepted. []interface{} must
|
||||||
|
// alternate string keys and arbitrary values, and extraneous ones are ignored.
|
||||||
|
func (c Context) Fields(fields interface{}) Context { |
||||||
|
c.l.context = appendFields(c.l.context, fields) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Dict adds the field key with the dict to the logger context.
|
||||||
|
func (c Context) Dict(key string, dict *Event) Context { |
||||||
|
dict.buf = enc.AppendEndMarker(dict.buf) |
||||||
|
c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) |
||||||
|
putEvent(dict) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Array adds the field key with an array to the event context.
|
||||||
|
// Use zerolog.Arr() to create the array or pass a type that
|
||||||
|
// implement the LogArrayMarshaler interface.
|
||||||
|
func (c Context) Array(key string, arr LogArrayMarshaler) Context { |
||||||
|
c.l.context = enc.AppendKey(c.l.context, key) |
||||||
|
if arr, ok := arr.(*Array); ok { |
||||||
|
c.l.context = arr.write(c.l.context) |
||||||
|
return c |
||||||
|
} |
||||||
|
var a *Array |
||||||
|
if aa, ok := arr.(*Array); ok { |
||||||
|
a = aa |
||||||
|
} else { |
||||||
|
a = Arr() |
||||||
|
arr.MarshalZerologArray(a) |
||||||
|
} |
||||||
|
c.l.context = a.write(c.l.context) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Object marshals an object that implement the LogObjectMarshaler interface.
|
||||||
|
func (c Context) Object(key string, obj LogObjectMarshaler) Context { |
||||||
|
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) |
||||||
|
e.Object(key, obj) |
||||||
|
c.l.context = enc.AppendObjectData(c.l.context, e.buf) |
||||||
|
putEvent(e) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface.
|
||||||
|
func (c Context) EmbedObject(obj LogObjectMarshaler) Context { |
||||||
|
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) |
||||||
|
e.EmbedObject(obj) |
||||||
|
c.l.context = enc.AppendObjectData(c.l.context, e.buf) |
||||||
|
putEvent(e) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Str adds the field key with val as a string to the logger context.
|
||||||
|
func (c Context) Str(key, val string) Context { |
||||||
|
c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Strs adds the field key with val as a string to the logger context.
|
||||||
|
func (c Context) Strs(key string, vals []string) Context { |
||||||
|
c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Stringer adds the field key with val.String() (or null if val is nil) to the logger context.
|
||||||
|
func (c Context) Stringer(key string, val fmt.Stringer) Context { |
||||||
|
if val != nil { |
||||||
|
c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String()) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Bytes adds the field key with val as a []byte to the logger context.
|
||||||
|
func (c Context) Bytes(key string, val []byte) Context { |
||||||
|
c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Hex adds the field key with val as a hex string to the logger context.
|
||||||
|
func (c Context) Hex(key string, val []byte) Context { |
||||||
|
c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// RawJSON adds already encoded JSON to context.
|
||||||
|
//
|
||||||
|
// No sanity check is performed on b; it must not contain carriage returns and
|
||||||
|
// be valid JSON.
|
||||||
|
func (c Context) RawJSON(key string, b []byte) Context { |
||||||
|
c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// AnErr adds the field key with serialized err to the logger context.
|
||||||
|
func (c Context) AnErr(key string, err error) Context { |
||||||
|
switch m := ErrorMarshalFunc(err).(type) { |
||||||
|
case nil: |
||||||
|
return c |
||||||
|
case LogObjectMarshaler: |
||||||
|
return c.Object(key, m) |
||||||
|
case error: |
||||||
|
if m == nil || isNilValue(m) { |
||||||
|
return c |
||||||
|
} else { |
||||||
|
return c.Str(key, m.Error()) |
||||||
|
} |
||||||
|
case string: |
||||||
|
return c.Str(key, m) |
||||||
|
default: |
||||||
|
return c.Interface(key, m) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Errs adds the field key with errs as an array of serialized errors to the
|
||||||
|
// logger context.
|
||||||
|
func (c Context) Errs(key string, errs []error) Context { |
||||||
|
arr := Arr() |
||||||
|
for _, err := range errs { |
||||||
|
switch m := ErrorMarshalFunc(err).(type) { |
||||||
|
case LogObjectMarshaler: |
||||||
|
arr = arr.Object(m) |
||||||
|
case error: |
||||||
|
if m == nil || isNilValue(m) { |
||||||
|
arr = arr.Interface(nil) |
||||||
|
} else { |
||||||
|
arr = arr.Str(m.Error()) |
||||||
|
} |
||||||
|
case string: |
||||||
|
arr = arr.Str(m) |
||||||
|
default: |
||||||
|
arr = arr.Interface(m) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return c.Array(key, arr) |
||||||
|
} |
||||||
|
|
||||||
|
// Err adds the field "error" with serialized err to the logger context.
|
||||||
|
func (c Context) Err(err error) Context { |
||||||
|
return c.AnErr(ErrorFieldName, err) |
||||||
|
} |
||||||
|
|
||||||
|
// Bool adds the field key with val as a bool to the logger context.
|
||||||
|
func (c Context) Bool(key string, b bool) Context { |
||||||
|
c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Bools adds the field key with val as a []bool to the logger context.
|
||||||
|
func (c Context) Bools(key string, b []bool) Context { |
||||||
|
c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Int adds the field key with i as a int to the logger context.
|
||||||
|
func (c Context) Int(key string, i int) Context { |
||||||
|
c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Ints adds the field key with i as a []int to the logger context.
|
||||||
|
func (c Context) Ints(key string, i []int) Context { |
||||||
|
c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Int8 adds the field key with i as a int8 to the logger context.
|
||||||
|
func (c Context) Int8(key string, i int8) Context { |
||||||
|
c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Ints8 adds the field key with i as a []int8 to the logger context.
|
||||||
|
func (c Context) Ints8(key string, i []int8) Context { |
||||||
|
c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Int16 adds the field key with i as a int16 to the logger context.
|
||||||
|
func (c Context) Int16(key string, i int16) Context { |
||||||
|
c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Ints16 adds the field key with i as a []int16 to the logger context.
|
||||||
|
func (c Context) Ints16(key string, i []int16) Context { |
||||||
|
c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Int32 adds the field key with i as a int32 to the logger context.
|
||||||
|
func (c Context) Int32(key string, i int32) Context { |
||||||
|
c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Ints32 adds the field key with i as a []int32 to the logger context.
|
||||||
|
func (c Context) Ints32(key string, i []int32) Context { |
||||||
|
c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Int64 adds the field key with i as a int64 to the logger context.
|
||||||
|
func (c Context) Int64(key string, i int64) Context { |
||||||
|
c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Ints64 adds the field key with i as a []int64 to the logger context.
|
||||||
|
func (c Context) Ints64(key string, i []int64) Context { |
||||||
|
c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uint adds the field key with i as a uint to the logger context.
|
||||||
|
func (c Context) Uint(key string, i uint) Context { |
||||||
|
c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uints adds the field key with i as a []uint to the logger context.
|
||||||
|
func (c Context) Uints(key string, i []uint) Context { |
||||||
|
c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uint8 adds the field key with i as a uint8 to the logger context.
|
||||||
|
func (c Context) Uint8(key string, i uint8) Context { |
||||||
|
c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uints8 adds the field key with i as a []uint8 to the logger context.
|
||||||
|
func (c Context) Uints8(key string, i []uint8) Context { |
||||||
|
c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uint16 adds the field key with i as a uint16 to the logger context.
|
||||||
|
func (c Context) Uint16(key string, i uint16) Context { |
||||||
|
c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uints16 adds the field key with i as a []uint16 to the logger context.
|
||||||
|
func (c Context) Uints16(key string, i []uint16) Context { |
||||||
|
c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uint32 adds the field key with i as a uint32 to the logger context.
|
||||||
|
func (c Context) Uint32(key string, i uint32) Context { |
||||||
|
c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uints32 adds the field key with i as a []uint32 to the logger context.
|
||||||
|
func (c Context) Uints32(key string, i []uint32) Context { |
||||||
|
c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uint64 adds the field key with i as a uint64 to the logger context.
|
||||||
|
func (c Context) Uint64(key string, i uint64) Context { |
||||||
|
c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Uints64 adds the field key with i as a []uint64 to the logger context.
|
||||||
|
func (c Context) Uints64(key string, i []uint64) Context { |
||||||
|
c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Float32 adds the field key with f as a float32 to the logger context.
|
||||||
|
func (c Context) Float32(key string, f float32) Context { |
||||||
|
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Floats32 adds the field key with f as a []float32 to the logger context.
|
||||||
|
func (c Context) Floats32(key string, f []float32) Context { |
||||||
|
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Float64 adds the field key with f as a float64 to the logger context.
|
||||||
|
func (c Context) Float64(key string, f float64) Context { |
||||||
|
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Floats64 adds the field key with f as a []float64 to the logger context.
|
||||||
|
func (c Context) Floats64(key string, f []float64) Context { |
||||||
|
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
type timestampHook struct{} |
||||||
|
|
||||||
|
func (ts timestampHook) Run(e *Event, level Level, msg string) { |
||||||
|
e.Timestamp() |
||||||
|
} |
||||||
|
|
||||||
|
var th = timestampHook{} |
||||||
|
|
||||||
|
// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
|
||||||
|
// To customize the key name, change zerolog.TimestampFieldName.
|
||||||
|
//
|
||||||
|
// NOTE: It won't dedupe the "time" key if the *Context has one already.
|
||||||
|
func (c Context) Timestamp() Context { |
||||||
|
c.l = c.l.Hook(th) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||||
|
func (c Context) Time(key string, t time.Time) Context { |
||||||
|
c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||||
|
func (c Context) Times(key string, t []time.Time) Context { |
||||||
|
c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Dur adds the fields key with d divided by unit and stored as a float.
|
||||||
|
func (c Context) Dur(key string, d time.Duration) Context { |
||||||
|
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Durs adds the fields key with d divided by unit and stored as a float.
|
||||||
|
func (c Context) Durs(key string, d []time.Duration) Context { |
||||||
|
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Interface adds the field key with obj marshaled using reflection.
|
||||||
|
func (c Context) Interface(key string, i interface{}) Context { |
||||||
|
c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
type callerHook struct { |
||||||
|
callerSkipFrameCount int |
||||||
|
} |
||||||
|
|
||||||
|
func newCallerHook(skipFrameCount int) callerHook { |
||||||
|
return callerHook{callerSkipFrameCount: skipFrameCount} |
||||||
|
} |
||||||
|
|
||||||
|
func (ch callerHook) Run(e *Event, level Level, msg string) { |
||||||
|
switch ch.callerSkipFrameCount { |
||||||
|
case useGlobalSkipFrameCount: |
||||||
|
// Extra frames to skip (added by hook infra).
|
||||||
|
e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount) |
||||||
|
default: |
||||||
|
// Extra frames to skip (added by hook infra).
|
||||||
|
e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run
|
||||||
|
// to use the global CallerSkipFrameCount.
|
||||||
|
const useGlobalSkipFrameCount = math.MinInt32 |
||||||
|
|
||||||
|
// ch is the default caller hook using the global CallerSkipFrameCount.
|
||||||
|
var ch = newCallerHook(useGlobalSkipFrameCount) |
||||||
|
|
||||||
|
// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
|
||||||
|
func (c Context) Caller() Context { |
||||||
|
c.l = c.l.Hook(ch) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key.
|
||||||
|
// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger.
|
||||||
|
// If set to -1 the global CallerSkipFrameCount will be used.
|
||||||
|
func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context { |
||||||
|
c.l = c.l.Hook(newCallerHook(skipFrameCount)) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// Stack enables stack trace printing for the error passed to Err().
|
||||||
|
func (c Context) Stack() Context { |
||||||
|
c.l.stack = true |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// IPAddr adds IPv4 or IPv6 Address to the context
|
||||||
|
func (c Context) IPAddr(key string, ip net.IP) Context { |
||||||
|
c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context
|
||||||
|
func (c Context) IPPrefix(key string, pfx net.IPNet) Context { |
||||||
|
c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// MACAddr adds MAC address to the context
|
||||||
|
func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { |
||||||
|
c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) |
||||||
|
return c |
||||||
|
} |
@ -0,0 +1,51 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
) |
||||||
|
|
||||||
|
var disabledLogger *Logger |
||||||
|
|
||||||
|
func init() { |
||||||
|
SetGlobalLevel(TraceLevel) |
||||||
|
l := Nop() |
||||||
|
disabledLogger = &l |
||||||
|
} |
||||||
|
|
||||||
|
type ctxKey struct{} |
||||||
|
|
||||||
|
// WithContext returns a copy of ctx with l associated. If an instance of Logger
|
||||||
|
// is already in the context, the context is not updated.
|
||||||
|
//
|
||||||
|
// For instance, to add a field to an existing logger in the context, use this
|
||||||
|
// notation:
|
||||||
|
//
|
||||||
|
// ctx := r.Context()
|
||||||
|
// l := zerolog.Ctx(ctx)
|
||||||
|
// l.UpdateContext(func(c Context) Context {
|
||||||
|
// return c.Str("bar", "baz")
|
||||||
|
// })
|
||||||
|
func (l *Logger) WithContext(ctx context.Context) context.Context { |
||||||
|
if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok { |
||||||
|
if lp == l { |
||||||
|
// Do not store same logger.
|
||||||
|
return ctx |
||||||
|
} |
||||||
|
} else if l.level == Disabled { |
||||||
|
// Do not store disabled logger.
|
||||||
|
return ctx |
||||||
|
} |
||||||
|
return context.WithValue(ctx, ctxKey{}, l) |
||||||
|
} |
||||||
|
|
||||||
|
// Ctx returns the Logger associated with the ctx. If no logger
|
||||||
|
// is associated, DefaultContextLogger is returned, unless DefaultContextLogger
|
||||||
|
// is nil, in which case a disabled logger is returned.
|
||||||
|
func Ctx(ctx context.Context) *Logger { |
||||||
|
if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { |
||||||
|
return l |
||||||
|
} else if l = DefaultContextLogger; l != nil { |
||||||
|
return l |
||||||
|
} |
||||||
|
return disabledLogger |
||||||
|
} |
@ -0,0 +1,56 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"net" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
type encoder interface { |
||||||
|
AppendArrayDelim(dst []byte) []byte |
||||||
|
AppendArrayEnd(dst []byte) []byte |
||||||
|
AppendArrayStart(dst []byte) []byte |
||||||
|
AppendBeginMarker(dst []byte) []byte |
||||||
|
AppendBool(dst []byte, val bool) []byte |
||||||
|
AppendBools(dst []byte, vals []bool) []byte |
||||||
|
AppendBytes(dst, s []byte) []byte |
||||||
|
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte |
||||||
|
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte |
||||||
|
AppendEndMarker(dst []byte) []byte |
||||||
|
AppendFloat32(dst []byte, val float32) []byte |
||||||
|
AppendFloat64(dst []byte, val float64) []byte |
||||||
|
AppendFloats32(dst []byte, vals []float32) []byte |
||||||
|
AppendFloats64(dst []byte, vals []float64) []byte |
||||||
|
AppendHex(dst, s []byte) []byte |
||||||
|
AppendIPAddr(dst []byte, ip net.IP) []byte |
||||||
|
AppendIPPrefix(dst []byte, pfx net.IPNet) []byte |
||||||
|
AppendInt(dst []byte, val int) []byte |
||||||
|
AppendInt16(dst []byte, val int16) []byte |
||||||
|
AppendInt32(dst []byte, val int32) []byte |
||||||
|
AppendInt64(dst []byte, val int64) []byte |
||||||
|
AppendInt8(dst []byte, val int8) []byte |
||||||
|
AppendInterface(dst []byte, i interface{}) []byte |
||||||
|
AppendInts(dst []byte, vals []int) []byte |
||||||
|
AppendInts16(dst []byte, vals []int16) []byte |
||||||
|
AppendInts32(dst []byte, vals []int32) []byte |
||||||
|
AppendInts64(dst []byte, vals []int64) []byte |
||||||
|
AppendInts8(dst []byte, vals []int8) []byte |
||||||
|
AppendKey(dst []byte, key string) []byte |
||||||
|
AppendLineBreak(dst []byte) []byte |
||||||
|
AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte |
||||||
|
AppendNil(dst []byte) []byte |
||||||
|
AppendObjectData(dst []byte, o []byte) []byte |
||||||
|
AppendString(dst []byte, s string) []byte |
||||||
|
AppendStrings(dst []byte, vals []string) []byte |
||||||
|
AppendTime(dst []byte, t time.Time, format string) []byte |
||||||
|
AppendTimes(dst []byte, vals []time.Time, format string) []byte |
||||||
|
AppendUint(dst []byte, val uint) []byte |
||||||
|
AppendUint16(dst []byte, val uint16) []byte |
||||||
|
AppendUint32(dst []byte, val uint32) []byte |
||||||
|
AppendUint64(dst []byte, val uint64) []byte |
||||||
|
AppendUint8(dst []byte, val uint8) []byte |
||||||
|
AppendUints(dst []byte, vals []uint) []byte |
||||||
|
AppendUints16(dst []byte, vals []uint16) []byte |
||||||
|
AppendUints32(dst []byte, vals []uint32) []byte |
||||||
|
AppendUints64(dst []byte, vals []uint64) []byte |
||||||
|
AppendUints8(dst []byte, vals []uint8) []byte |
||||||
|
} |
@ -0,0 +1,42 @@ |
|||||||
|
// +build binary_log
|
||||||
|
|
||||||
|
package zerolog |
||||||
|
|
||||||
|
// This file contains bindings to do binary encoding.
|
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/rs/zerolog/internal/cbor" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
_ encoder = (*cbor.Encoder)(nil) |
||||||
|
|
||||||
|
enc = cbor.Encoder{} |
||||||
|
) |
||||||
|
|
||||||
|
func init() { |
||||||
|
// using closure to reflect the changes at runtime.
|
||||||
|
cbor.JSONMarshalFunc = func(v interface{}) ([]byte, error) { |
||||||
|
return InterfaceMarshalFunc(v) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func appendJSON(dst []byte, j []byte) []byte { |
||||||
|
return cbor.AppendEmbeddedJSON(dst, j) |
||||||
|
} |
||||||
|
|
||||||
|
// decodeIfBinaryToString - converts a binary formatted log msg to a
|
||||||
|
// JSON formatted String Log message.
|
||||||
|
func decodeIfBinaryToString(in []byte) string { |
||||||
|
return cbor.DecodeIfBinaryToString(in) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeObjectToStr(in []byte) string { |
||||||
|
return cbor.DecodeObjectToStr(in) |
||||||
|
} |
||||||
|
|
||||||
|
// decodeIfBinaryToBytes - converts a binary formatted log msg to a
|
||||||
|
// JSON formatted Bytes Log message.
|
||||||
|
func decodeIfBinaryToBytes(in []byte) []byte { |
||||||
|
return cbor.DecodeIfBinaryToBytes(in) |
||||||
|
} |
@ -0,0 +1,39 @@ |
|||||||
|
// +build !binary_log
|
||||||
|
|
||||||
|
package zerolog |
||||||
|
|
||||||
|
// encoder_json.go file contains bindings to generate
|
||||||
|
// JSON encoded byte stream.
|
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/rs/zerolog/internal/json" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
_ encoder = (*json.Encoder)(nil) |
||||||
|
|
||||||
|
enc = json.Encoder{} |
||||||
|
) |
||||||
|
|
||||||
|
func init() { |
||||||
|
// using closure to reflect the changes at runtime.
|
||||||
|
json.JSONMarshalFunc = func(v interface{}) ([]byte, error) { |
||||||
|
return InterfaceMarshalFunc(v) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func appendJSON(dst []byte, j []byte) []byte { |
||||||
|
return append(dst, j...) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeIfBinaryToString(in []byte) string { |
||||||
|
return string(in) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeObjectToStr(in []byte) string { |
||||||
|
return string(in) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeIfBinaryToBytes(in []byte) []byte { |
||||||
|
return in |
||||||
|
} |
@ -0,0 +1,773 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"net" |
||||||
|
"os" |
||||||
|
"runtime" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
var eventPool = &sync.Pool{ |
||||||
|
New: func() interface{} { |
||||||
|
return &Event{ |
||||||
|
buf: make([]byte, 0, 500), |
||||||
|
} |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
// Event represents a log event. It is instanced by one of the level method of
|
||||||
|
// Logger and finalized by the Msg or Msgf method.
|
||||||
|
type Event struct { |
||||||
|
buf []byte |
||||||
|
w LevelWriter |
||||||
|
level Level |
||||||
|
done func(msg string) |
||||||
|
stack bool // enable error stack trace
|
||||||
|
ch []Hook // hooks from context
|
||||||
|
skipFrame int // The number of additional frames to skip when printing the caller.
|
||||||
|
} |
||||||
|
|
||||||
|
func putEvent(e *Event) { |
||||||
|
// Proper usage of a sync.Pool requires each entry to have approximately
|
||||||
|
// the same memory cost. To obtain this property when the stored type
|
||||||
|
// contains a variably-sized buffer, we add a hard limit on the maximum buffer
|
||||||
|
// to place back in the pool.
|
||||||
|
//
|
||||||
|
// See https://golang.org/issue/23199
|
||||||
|
const maxSize = 1 << 16 // 64KiB
|
||||||
|
if cap(e.buf) > maxSize { |
||||||
|
return |
||||||
|
} |
||||||
|
eventPool.Put(e) |
||||||
|
} |
||||||
|
|
||||||
|
// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface
|
||||||
|
// to be implemented by types used with Event/Context's Object methods.
|
||||||
|
type LogObjectMarshaler interface { |
||||||
|
MarshalZerologObject(e *Event) |
||||||
|
} |
||||||
|
|
||||||
|
// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface
|
||||||
|
// to be implemented by types used with Event/Context's Array methods.
|
||||||
|
type LogArrayMarshaler interface { |
||||||
|
MarshalZerologArray(a *Array) |
||||||
|
} |
||||||
|
|
||||||
|
func newEvent(w LevelWriter, level Level) *Event { |
||||||
|
e := eventPool.Get().(*Event) |
||||||
|
e.buf = e.buf[:0] |
||||||
|
e.ch = nil |
||||||
|
e.buf = enc.AppendBeginMarker(e.buf) |
||||||
|
e.w = w |
||||||
|
e.level = level |
||||||
|
e.stack = false |
||||||
|
e.skipFrame = 0 |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
func (e *Event) write() (err error) { |
||||||
|
if e == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
if e.level != Disabled { |
||||||
|
e.buf = enc.AppendEndMarker(e.buf) |
||||||
|
e.buf = enc.AppendLineBreak(e.buf) |
||||||
|
if e.w != nil { |
||||||
|
_, err = e.w.WriteLevel(e.level, e.buf) |
||||||
|
} |
||||||
|
} |
||||||
|
putEvent(e) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
// Enabled return false if the *Event is going to be filtered out by
|
||||||
|
// log level or sampling.
|
||||||
|
func (e *Event) Enabled() bool { |
||||||
|
return e != nil && e.level != Disabled |
||||||
|
} |
||||||
|
|
||||||
|
// Discard disables the event so Msg(f) won't print it.
|
||||||
|
func (e *Event) Discard() *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.level = Disabled |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Msg sends the *Event with msg added as the message field if not empty.
|
||||||
|
//
|
||||||
|
// NOTICE: once this method is called, the *Event should be disposed.
|
||||||
|
// Calling Msg twice can have unexpected result.
|
||||||
|
func (e *Event) Msg(msg string) { |
||||||
|
if e == nil { |
||||||
|
return |
||||||
|
} |
||||||
|
e.msg(msg) |
||||||
|
} |
||||||
|
|
||||||
|
// Send is equivalent to calling Msg("").
|
||||||
|
//
|
||||||
|
// NOTICE: once this method is called, the *Event should be disposed.
|
||||||
|
func (e *Event) Send() { |
||||||
|
if e == nil { |
||||||
|
return |
||||||
|
} |
||||||
|
e.msg("") |
||||||
|
} |
||||||
|
|
||||||
|
// Msgf sends the event with formatted msg added as the message field if not empty.
|
||||||
|
//
|
||||||
|
// NOTICE: once this method is called, the *Event should be disposed.
|
||||||
|
// Calling Msgf twice can have unexpected result.
|
||||||
|
func (e *Event) Msgf(format string, v ...interface{}) { |
||||||
|
if e == nil { |
||||||
|
return |
||||||
|
} |
||||||
|
e.msg(fmt.Sprintf(format, v...)) |
||||||
|
} |
||||||
|
|
||||||
|
func (e *Event) msg(msg string) { |
||||||
|
for _, hook := range e.ch { |
||||||
|
hook.Run(e, e.level, msg) |
||||||
|
} |
||||||
|
if msg != "" { |
||||||
|
e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) |
||||||
|
} |
||||||
|
if e.done != nil { |
||||||
|
defer e.done(msg) |
||||||
|
} |
||||||
|
if err := e.write(); err != nil { |
||||||
|
if ErrorHandler != nil { |
||||||
|
ErrorHandler(err) |
||||||
|
} else { |
||||||
|
fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Fields is a helper function to use a map or slice to set fields using type assertion.
|
||||||
|
// Only map[string]interface{} and []interface{} are accepted. []interface{} must
|
||||||
|
// alternate string keys and arbitrary values, and extraneous ones are ignored.
|
||||||
|
func (e *Event) Fields(fields interface{}) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = appendFields(e.buf, fields) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Dict adds the field key with a dict to the event context.
|
||||||
|
// Use zerolog.Dict() to create the dictionary.
|
||||||
|
func (e *Event) Dict(key string, dict *Event) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
dict.buf = enc.AppendEndMarker(dict.buf) |
||||||
|
e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) |
||||||
|
putEvent(dict) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Dict creates an Event to be used with the *Event.Dict method.
|
||||||
|
// Call usual field methods like Str, Int etc to add fields to this
|
||||||
|
// event and give it as argument the *Event.Dict method.
|
||||||
|
func Dict() *Event { |
||||||
|
return newEvent(nil, 0) |
||||||
|
} |
||||||
|
|
||||||
|
// Array adds the field key with an array to the event context.
|
||||||
|
// Use zerolog.Arr() to create the array or pass a type that
|
||||||
|
// implement the LogArrayMarshaler interface.
|
||||||
|
func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendKey(e.buf, key) |
||||||
|
var a *Array |
||||||
|
if aa, ok := arr.(*Array); ok { |
||||||
|
a = aa |
||||||
|
} else { |
||||||
|
a = Arr() |
||||||
|
arr.MarshalZerologArray(a) |
||||||
|
} |
||||||
|
e.buf = a.write(e.buf) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
func (e *Event) appendObject(obj LogObjectMarshaler) { |
||||||
|
e.buf = enc.AppendBeginMarker(e.buf) |
||||||
|
obj.MarshalZerologObject(e) |
||||||
|
e.buf = enc.AppendEndMarker(e.buf) |
||||||
|
} |
||||||
|
|
||||||
|
// Object marshals an object that implement the LogObjectMarshaler interface.
|
||||||
|
func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendKey(e.buf, key) |
||||||
|
if obj == nil { |
||||||
|
e.buf = enc.AppendNil(e.buf) |
||||||
|
|
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
e.appendObject(obj) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Func allows an anonymous func to run only if the event is enabled.
|
||||||
|
func (e *Event) Func(f func(e *Event)) *Event { |
||||||
|
if e != nil && e.Enabled() { |
||||||
|
f(e) |
||||||
|
} |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// EmbedObject marshals an object that implement the LogObjectMarshaler interface.
|
||||||
|
func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
if obj == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
obj.MarshalZerologObject(e) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Str adds the field key with val as a string to the *Event context.
|
||||||
|
func (e *Event) Str(key, val string) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Strs adds the field key with vals as a []string to the *Event context.
|
||||||
|
func (e *Event) Strs(key string, vals []string) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Stringer adds the field key with val.String() (or null if val is nil)
|
||||||
|
// to the *Event context.
|
||||||
|
func (e *Event) Stringer(key string, val fmt.Stringer) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendStringer(enc.AppendKey(e.buf, key), val) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Stringers adds the field key with vals where each individual val
|
||||||
|
// is used as val.String() (or null if val is empty) to the *Event
|
||||||
|
// context.
|
||||||
|
func (e *Event) Stringers(key string, vals []fmt.Stringer) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendStringers(enc.AppendKey(e.buf, key), vals) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Bytes adds the field key with val as a string to the *Event context.
|
||||||
|
//
|
||||||
|
// Runes outside of normal ASCII ranges will be hex-encoded in the resulting
|
||||||
|
// JSON.
|
||||||
|
func (e *Event) Bytes(key string, val []byte) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Hex adds the field key with val as a hex string to the *Event context.
|
||||||
|
func (e *Event) Hex(key string, val []byte) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// RawJSON adds already encoded JSON to the log line under key.
|
||||||
|
//
|
||||||
|
// No sanity check is performed on b; it must not contain carriage returns and
|
||||||
|
// be valid JSON.
|
||||||
|
func (e *Event) RawJSON(key string, b []byte) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = appendJSON(enc.AppendKey(e.buf, key), b) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// AnErr adds the field key with serialized err to the *Event context.
|
||||||
|
// If err is nil, no field is added.
|
||||||
|
func (e *Event) AnErr(key string, err error) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
switch m := ErrorMarshalFunc(err).(type) { |
||||||
|
case nil: |
||||||
|
return e |
||||||
|
case LogObjectMarshaler: |
||||||
|
return e.Object(key, m) |
||||||
|
case error: |
||||||
|
if m == nil || isNilValue(m) { |
||||||
|
return e |
||||||
|
} else { |
||||||
|
return e.Str(key, m.Error()) |
||||||
|
} |
||||||
|
case string: |
||||||
|
return e.Str(key, m) |
||||||
|
default: |
||||||
|
return e.Interface(key, m) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Errs adds the field key with errs as an array of serialized errors to the
|
||||||
|
// *Event context.
|
||||||
|
func (e *Event) Errs(key string, errs []error) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
arr := Arr() |
||||||
|
for _, err := range errs { |
||||||
|
switch m := ErrorMarshalFunc(err).(type) { |
||||||
|
case LogObjectMarshaler: |
||||||
|
arr = arr.Object(m) |
||||||
|
case error: |
||||||
|
arr = arr.Err(m) |
||||||
|
case string: |
||||||
|
arr = arr.Str(m) |
||||||
|
default: |
||||||
|
arr = arr.Interface(m) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return e.Array(key, arr) |
||||||
|
} |
||||||
|
|
||||||
|
// Err adds the field "error" with serialized err to the *Event context.
|
||||||
|
// If err is nil, no field is added.
|
||||||
|
//
|
||||||
|
// To customize the key name, change zerolog.ErrorFieldName.
|
||||||
|
//
|
||||||
|
// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined,
|
||||||
|
// the err is passed to ErrorStackMarshaler and the result is appended to the
|
||||||
|
// zerolog.ErrorStackFieldName.
|
||||||
|
func (e *Event) Err(err error) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
if e.stack && ErrorStackMarshaler != nil { |
||||||
|
switch m := ErrorStackMarshaler(err).(type) { |
||||||
|
case nil: |
||||||
|
case LogObjectMarshaler: |
||||||
|
e.Object(ErrorStackFieldName, m) |
||||||
|
case error: |
||||||
|
if m != nil && !isNilValue(m) { |
||||||
|
e.Str(ErrorStackFieldName, m.Error()) |
||||||
|
} |
||||||
|
case string: |
||||||
|
e.Str(ErrorStackFieldName, m) |
||||||
|
default: |
||||||
|
e.Interface(ErrorStackFieldName, m) |
||||||
|
} |
||||||
|
} |
||||||
|
return e.AnErr(ErrorFieldName, err) |
||||||
|
} |
||||||
|
|
||||||
|
// Stack enables stack trace printing for the error passed to Err().
|
||||||
|
//
|
||||||
|
// ErrorStackMarshaler must be set for this method to do something.
|
||||||
|
func (e *Event) Stack() *Event { |
||||||
|
if e != nil { |
||||||
|
e.stack = true |
||||||
|
} |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Bool adds the field key with val as a bool to the *Event context.
|
||||||
|
func (e *Event) Bool(key string, b bool) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Bools adds the field key with val as a []bool to the *Event context.
|
||||||
|
func (e *Event) Bools(key string, b []bool) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Int adds the field key with i as a int to the *Event context.
|
||||||
|
func (e *Event) Int(key string, i int) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Ints adds the field key with i as a []int to the *Event context.
|
||||||
|
func (e *Event) Ints(key string, i []int) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Int8 adds the field key with i as a int8 to the *Event context.
|
||||||
|
func (e *Event) Int8(key string, i int8) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Ints8 adds the field key with i as a []int8 to the *Event context.
|
||||||
|
func (e *Event) Ints8(key string, i []int8) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Int16 adds the field key with i as a int16 to the *Event context.
|
||||||
|
func (e *Event) Int16(key string, i int16) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Ints16 adds the field key with i as a []int16 to the *Event context.
|
||||||
|
func (e *Event) Ints16(key string, i []int16) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Int32 adds the field key with i as a int32 to the *Event context.
|
||||||
|
func (e *Event) Int32(key string, i int32) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Ints32 adds the field key with i as a []int32 to the *Event context.
|
||||||
|
func (e *Event) Ints32(key string, i []int32) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Int64 adds the field key with i as a int64 to the *Event context.
|
||||||
|
func (e *Event) Int64(key string, i int64) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Ints64 adds the field key with i as a []int64 to the *Event context.
|
||||||
|
func (e *Event) Ints64(key string, i []int64) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uint adds the field key with i as a uint to the *Event context.
|
||||||
|
func (e *Event) Uint(key string, i uint) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uints adds the field key with i as a []int to the *Event context.
|
||||||
|
func (e *Event) Uints(key string, i []uint) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uint8 adds the field key with i as a uint8 to the *Event context.
|
||||||
|
func (e *Event) Uint8(key string, i uint8) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uints8 adds the field key with i as a []int8 to the *Event context.
|
||||||
|
func (e *Event) Uints8(key string, i []uint8) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uint16 adds the field key with i as a uint16 to the *Event context.
|
||||||
|
func (e *Event) Uint16(key string, i uint16) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uints16 adds the field key with i as a []int16 to the *Event context.
|
||||||
|
func (e *Event) Uints16(key string, i []uint16) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uint32 adds the field key with i as a uint32 to the *Event context.
|
||||||
|
func (e *Event) Uint32(key string, i uint32) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uints32 adds the field key with i as a []int32 to the *Event context.
|
||||||
|
func (e *Event) Uints32(key string, i []uint32) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uint64 adds the field key with i as a uint64 to the *Event context.
|
||||||
|
func (e *Event) Uint64(key string, i uint64) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Uints64 adds the field key with i as a []int64 to the *Event context.
|
||||||
|
func (e *Event) Uints64(key string, i []uint64) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Float32 adds the field key with f as a float32 to the *Event context.
|
||||||
|
func (e *Event) Float32(key string, f float32) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Floats32 adds the field key with f as a []float32 to the *Event context.
|
||||||
|
func (e *Event) Floats32(key string, f []float32) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Float64 adds the field key with f as a float64 to the *Event context.
|
||||||
|
func (e *Event) Float64(key string, f float64) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Floats64 adds the field key with f as a []float64 to the *Event context.
|
||||||
|
func (e *Event) Floats64(key string, f []float64) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key.
|
||||||
|
// To customize the key name, change zerolog.TimestampFieldName.
|
||||||
|
//
|
||||||
|
// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one
|
||||||
|
// already.
|
||||||
|
func (e *Event) Timestamp() *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||||
|
func (e *Event) Time(key string, t time.Time) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||||
|
func (e *Event) Times(key string, t []time.Time) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit.
|
||||||
|
// If zerolog.DurationFieldInteger is true, durations are rendered as integer
|
||||||
|
// instead of float.
|
||||||
|
func (e *Event) Dur(key string, d time.Duration) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit.
|
||||||
|
// If zerolog.DurationFieldInteger is true, durations are rendered as integer
|
||||||
|
// instead of float.
|
||||||
|
func (e *Event) Durs(key string, d []time.Duration) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// TimeDiff adds the field key with positive duration between time t and start.
|
||||||
|
// If time t is not greater than start, duration will be 0.
|
||||||
|
// Duration format follows the same principle as Dur().
|
||||||
|
func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
var d time.Duration |
||||||
|
if t.After(start) { |
||||||
|
d = t.Sub(start) |
||||||
|
} |
||||||
|
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Interface adds the field key with i marshaled using reflection.
|
||||||
|
func (e *Event) Interface(key string, i interface{}) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
if obj, ok := i.(LogObjectMarshaler); ok { |
||||||
|
return e.Object(key, obj) |
||||||
|
} |
||||||
|
e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames.
|
||||||
|
// This includes those added via hooks from the context.
|
||||||
|
func (e *Event) CallerSkipFrame(skip int) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.skipFrame += skip |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
|
||||||
|
// The argument skip is the number of stack frames to ascend
|
||||||
|
// Skip If not passed, use the global variable CallerSkipFrameCount
|
||||||
|
func (e *Event) Caller(skip ...int) *Event { |
||||||
|
sk := CallerSkipFrameCount |
||||||
|
if len(skip) > 0 { |
||||||
|
sk = skip[0] + CallerSkipFrameCount |
||||||
|
} |
||||||
|
return e.caller(sk) |
||||||
|
} |
||||||
|
|
||||||
|
func (e *Event) caller(skip int) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
_, file, line, ok := runtime.Caller(skip + e.skipFrame) |
||||||
|
if !ok { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(file, line)) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// IPAddr adds IPv4 or IPv6 Address to the event
|
||||||
|
func (e *Event) IPAddr(key string, ip net.IP) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event
|
||||||
|
func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// MACAddr adds MAC address to the event
|
||||||
|
func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { |
||||||
|
if e == nil { |
||||||
|
return e |
||||||
|
} |
||||||
|
e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) |
||||||
|
return e |
||||||
|
} |
@ -0,0 +1,277 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"net" |
||||||
|
"sort" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
func isNilValue(i interface{}) bool { |
||||||
|
return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 |
||||||
|
} |
||||||
|
|
||||||
|
func appendFields(dst []byte, fields interface{}) []byte { |
||||||
|
switch fields := fields.(type) { |
||||||
|
case []interface{}: |
||||||
|
if n := len(fields); n&0x1 == 1 { // odd number
|
||||||
|
fields = fields[:n-1] |
||||||
|
} |
||||||
|
dst = appendFieldList(dst, fields) |
||||||
|
case map[string]interface{}: |
||||||
|
keys := make([]string, 0, len(fields)) |
||||||
|
for key := range fields { |
||||||
|
keys = append(keys, key) |
||||||
|
} |
||||||
|
sort.Strings(keys) |
||||||
|
kv := make([]interface{}, 2) |
||||||
|
for _, key := range keys { |
||||||
|
kv[0], kv[1] = key, fields[key] |
||||||
|
dst = appendFieldList(dst, kv) |
||||||
|
} |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
func appendFieldList(dst []byte, kvList []interface{}) []byte { |
||||||
|
for i, n := 0, len(kvList); i < n; i += 2 { |
||||||
|
key, val := kvList[i], kvList[i+1] |
||||||
|
if key, ok := key.(string); ok { |
||||||
|
dst = enc.AppendKey(dst, key) |
||||||
|
} else { |
||||||
|
continue |
||||||
|
} |
||||||
|
if val, ok := val.(LogObjectMarshaler); ok { |
||||||
|
e := newEvent(nil, 0) |
||||||
|
e.buf = e.buf[:0] |
||||||
|
e.appendObject(val) |
||||||
|
dst = append(dst, e.buf...) |
||||||
|
putEvent(e) |
||||||
|
continue |
||||||
|
} |
||||||
|
switch val := val.(type) { |
||||||
|
case string: |
||||||
|
dst = enc.AppendString(dst, val) |
||||||
|
case []byte: |
||||||
|
dst = enc.AppendBytes(dst, val) |
||||||
|
case error: |
||||||
|
switch m := ErrorMarshalFunc(val).(type) { |
||||||
|
case LogObjectMarshaler: |
||||||
|
e := newEvent(nil, 0) |
||||||
|
e.buf = e.buf[:0] |
||||||
|
e.appendObject(m) |
||||||
|
dst = append(dst, e.buf...) |
||||||
|
putEvent(e) |
||||||
|
case error: |
||||||
|
if m == nil || isNilValue(m) { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} else { |
||||||
|
dst = enc.AppendString(dst, m.Error()) |
||||||
|
} |
||||||
|
case string: |
||||||
|
dst = enc.AppendString(dst, m) |
||||||
|
default: |
||||||
|
dst = enc.AppendInterface(dst, m) |
||||||
|
} |
||||||
|
case []error: |
||||||
|
dst = enc.AppendArrayStart(dst) |
||||||
|
for i, err := range val { |
||||||
|
switch m := ErrorMarshalFunc(err).(type) { |
||||||
|
case LogObjectMarshaler: |
||||||
|
e := newEvent(nil, 0) |
||||||
|
e.buf = e.buf[:0] |
||||||
|
e.appendObject(m) |
||||||
|
dst = append(dst, e.buf...) |
||||||
|
putEvent(e) |
||||||
|
case error: |
||||||
|
if m == nil || isNilValue(m) { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} else { |
||||||
|
dst = enc.AppendString(dst, m.Error()) |
||||||
|
} |
||||||
|
case string: |
||||||
|
dst = enc.AppendString(dst, m) |
||||||
|
default: |
||||||
|
dst = enc.AppendInterface(dst, m) |
||||||
|
} |
||||||
|
|
||||||
|
if i < (len(val) - 1) { |
||||||
|
enc.AppendArrayDelim(dst) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = enc.AppendArrayEnd(dst) |
||||||
|
case bool: |
||||||
|
dst = enc.AppendBool(dst, val) |
||||||
|
case int: |
||||||
|
dst = enc.AppendInt(dst, val) |
||||||
|
case int8: |
||||||
|
dst = enc.AppendInt8(dst, val) |
||||||
|
case int16: |
||||||
|
dst = enc.AppendInt16(dst, val) |
||||||
|
case int32: |
||||||
|
dst = enc.AppendInt32(dst, val) |
||||||
|
case int64: |
||||||
|
dst = enc.AppendInt64(dst, val) |
||||||
|
case uint: |
||||||
|
dst = enc.AppendUint(dst, val) |
||||||
|
case uint8: |
||||||
|
dst = enc.AppendUint8(dst, val) |
||||||
|
case uint16: |
||||||
|
dst = enc.AppendUint16(dst, val) |
||||||
|
case uint32: |
||||||
|
dst = enc.AppendUint32(dst, val) |
||||||
|
case uint64: |
||||||
|
dst = enc.AppendUint64(dst, val) |
||||||
|
case float32: |
||||||
|
dst = enc.AppendFloat32(dst, val) |
||||||
|
case float64: |
||||||
|
dst = enc.AppendFloat64(dst, val) |
||||||
|
case time.Time: |
||||||
|
dst = enc.AppendTime(dst, val, TimeFieldFormat) |
||||||
|
case time.Duration: |
||||||
|
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) |
||||||
|
case *string: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendString(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *bool: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendBool(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *int: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendInt(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *int8: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendInt8(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *int16: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendInt16(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *int32: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendInt32(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *int64: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendInt64(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *uint: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendUint(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *uint8: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendUint8(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *uint16: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendUint16(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *uint32: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendUint32(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *uint64: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendUint64(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *float32: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendFloat32(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *float64: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendFloat64(dst, *val) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *time.Time: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendTime(dst, *val, TimeFieldFormat) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case *time.Duration: |
||||||
|
if val != nil { |
||||||
|
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) |
||||||
|
} else { |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
} |
||||||
|
case []string: |
||||||
|
dst = enc.AppendStrings(dst, val) |
||||||
|
case []bool: |
||||||
|
dst = enc.AppendBools(dst, val) |
||||||
|
case []int: |
||||||
|
dst = enc.AppendInts(dst, val) |
||||||
|
case []int8: |
||||||
|
dst = enc.AppendInts8(dst, val) |
||||||
|
case []int16: |
||||||
|
dst = enc.AppendInts16(dst, val) |
||||||
|
case []int32: |
||||||
|
dst = enc.AppendInts32(dst, val) |
||||||
|
case []int64: |
||||||
|
dst = enc.AppendInts64(dst, val) |
||||||
|
case []uint: |
||||||
|
dst = enc.AppendUints(dst, val) |
||||||
|
// case []uint8:
|
||||||
|
// dst = enc.AppendUints8(dst, val)
|
||||||
|
case []uint16: |
||||||
|
dst = enc.AppendUints16(dst, val) |
||||||
|
case []uint32: |
||||||
|
dst = enc.AppendUints32(dst, val) |
||||||
|
case []uint64: |
||||||
|
dst = enc.AppendUints64(dst, val) |
||||||
|
case []float32: |
||||||
|
dst = enc.AppendFloats32(dst, val) |
||||||
|
case []float64: |
||||||
|
dst = enc.AppendFloats64(dst, val) |
||||||
|
case []time.Time: |
||||||
|
dst = enc.AppendTimes(dst, val, TimeFieldFormat) |
||||||
|
case []time.Duration: |
||||||
|
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) |
||||||
|
case nil: |
||||||
|
dst = enc.AppendNil(dst) |
||||||
|
case net.IP: |
||||||
|
dst = enc.AppendIPAddr(dst, val) |
||||||
|
case net.IPNet: |
||||||
|
dst = enc.AppendIPPrefix(dst, val) |
||||||
|
case net.HardwareAddr: |
||||||
|
dst = enc.AppendMACAddr(dst, val) |
||||||
|
case json.RawMessage: |
||||||
|
dst = appendJSON(dst, val) |
||||||
|
default: |
||||||
|
dst = enc.AppendInterface(dst, val) |
||||||
|
} |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
@ -0,0 +1,138 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"strconv" |
||||||
|
"sync/atomic" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
// TimeFormatUnix defines a time format that makes time fields to be
|
||||||
|
// serialized as Unix timestamp integers.
|
||||||
|
TimeFormatUnix = "" |
||||||
|
|
||||||
|
// TimeFormatUnixMs defines a time format that makes time fields to be
|
||||||
|
// serialized as Unix timestamp integers in milliseconds.
|
||||||
|
TimeFormatUnixMs = "UNIXMS" |
||||||
|
|
||||||
|
// TimeFormatUnixMicro defines a time format that makes time fields to be
|
||||||
|
// serialized as Unix timestamp integers in microseconds.
|
||||||
|
TimeFormatUnixMicro = "UNIXMICRO" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
// TimestampFieldName is the field name used for the timestamp field.
|
||||||
|
TimestampFieldName = "time" |
||||||
|
|
||||||
|
// LevelFieldName is the field name used for the level field.
|
||||||
|
LevelFieldName = "level" |
||||||
|
|
||||||
|
// LevelTraceValue is the value used for the trace level field.
|
||||||
|
LevelTraceValue = "trace" |
||||||
|
// LevelDebugValue is the value used for the debug level field.
|
||||||
|
LevelDebugValue = "debug" |
||||||
|
// LevelInfoValue is the value used for the info level field.
|
||||||
|
LevelInfoValue = "info" |
||||||
|
// LevelWarnValue is the value used for the warn level field.
|
||||||
|
LevelWarnValue = "warn" |
||||||
|
// LevelErrorValue is the value used for the error level field.
|
||||||
|
LevelErrorValue = "error" |
||||||
|
// LevelFatalValue is the value used for the fatal level field.
|
||||||
|
LevelFatalValue = "fatal" |
||||||
|
// LevelPanicValue is the value used for the panic level field.
|
||||||
|
LevelPanicValue = "panic" |
||||||
|
|
||||||
|
// LevelFieldMarshalFunc allows customization of global level field marshaling.
|
||||||
|
LevelFieldMarshalFunc = func(l Level) string { |
||||||
|
return l.String() |
||||||
|
} |
||||||
|
|
||||||
|
// MessageFieldName is the field name used for the message field.
|
||||||
|
MessageFieldName = "message" |
||||||
|
|
||||||
|
// ErrorFieldName is the field name used for error fields.
|
||||||
|
ErrorFieldName = "error" |
||||||
|
|
||||||
|
// CallerFieldName is the field name used for caller field.
|
||||||
|
CallerFieldName = "caller" |
||||||
|
|
||||||
|
// CallerSkipFrameCount is the number of stack frames to skip to find the caller.
|
||||||
|
CallerSkipFrameCount = 2 |
||||||
|
|
||||||
|
// CallerMarshalFunc allows customization of global caller marshaling
|
||||||
|
CallerMarshalFunc = func(file string, line int) string { |
||||||
|
return file + ":" + strconv.Itoa(line) |
||||||
|
} |
||||||
|
|
||||||
|
// ErrorStackFieldName is the field name used for error stacks.
|
||||||
|
ErrorStackFieldName = "stack" |
||||||
|
|
||||||
|
// ErrorStackMarshaler extract the stack from err if any.
|
||||||
|
ErrorStackMarshaler func(err error) interface{} |
||||||
|
|
||||||
|
// ErrorMarshalFunc allows customization of global error marshaling
|
||||||
|
ErrorMarshalFunc = func(err error) interface{} { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// InterfaceMarshalFunc allows customization of interface marshaling.
|
||||||
|
// Default: "encoding/json.Marshal"
|
||||||
|
InterfaceMarshalFunc = json.Marshal |
||||||
|
|
||||||
|
// TimeFieldFormat defines the time format of the Time field type. If set to
|
||||||
|
// TimeFormatUnix, TimeFormatUnixMs or TimeFormatUnixMicro, the time is formatted as an UNIX
|
||||||
|
// timestamp as integer.
|
||||||
|
TimeFieldFormat = time.RFC3339 |
||||||
|
|
||||||
|
// TimestampFunc defines the function called to generate a timestamp.
|
||||||
|
TimestampFunc = time.Now |
||||||
|
|
||||||
|
// DurationFieldUnit defines the unit for time.Duration type fields added
|
||||||
|
// using the Dur method.
|
||||||
|
DurationFieldUnit = time.Millisecond |
||||||
|
|
||||||
|
// DurationFieldInteger renders Dur fields as integer instead of float if
|
||||||
|
// set to true.
|
||||||
|
DurationFieldInteger = false |
||||||
|
|
||||||
|
// ErrorHandler is called whenever zerolog fails to write an event on its
|
||||||
|
// output. If not set, an error is printed on the stderr. This handler must
|
||||||
|
// be thread safe and non-blocking.
|
||||||
|
ErrorHandler func(err error) |
||||||
|
|
||||||
|
// DefaultContextLogger is returned from Ctx() if there is no logger associated
|
||||||
|
// with the context.
|
||||||
|
DefaultContextLogger *Logger |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
gLevel = new(int32) |
||||||
|
disableSampling = new(int32) |
||||||
|
) |
||||||
|
|
||||||
|
// SetGlobalLevel sets the global override for log level. If this
|
||||||
|
// values is raised, all Loggers will use at least this value.
|
||||||
|
//
|
||||||
|
// To globally disable logs, set GlobalLevel to Disabled.
|
||||||
|
func SetGlobalLevel(l Level) { |
||||||
|
atomic.StoreInt32(gLevel, int32(l)) |
||||||
|
} |
||||||
|
|
||||||
|
// GlobalLevel returns the current global log level
|
||||||
|
func GlobalLevel() Level { |
||||||
|
return Level(atomic.LoadInt32(gLevel)) |
||||||
|
} |
||||||
|
|
||||||
|
// DisableSampling will disable sampling in all Loggers if true.
|
||||||
|
func DisableSampling(v bool) { |
||||||
|
var i int32 |
||||||
|
if v { |
||||||
|
i = 1 |
||||||
|
} |
||||||
|
atomic.StoreInt32(disableSampling, i) |
||||||
|
} |
||||||
|
|
||||||
|
func samplingDisabled() bool { |
||||||
|
return atomic.LoadInt32(disableSampling) == 1 |
||||||
|
} |
@ -0,0 +1,7 @@ |
|||||||
|
// +build go1.12
|
||||||
|
|
||||||
|
package zerolog |
||||||
|
|
||||||
|
// Since go 1.12, some auto generated init functions are hidden from
|
||||||
|
// runtime.Caller.
|
||||||
|
const contextCallerSkipFrameCount = 2 |
@ -0,0 +1,64 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
// Hook defines an interface to a log hook.
|
||||||
|
type Hook interface { |
||||||
|
// Run runs the hook with the event.
|
||||||
|
Run(e *Event, level Level, message string) |
||||||
|
} |
||||||
|
|
||||||
|
// HookFunc is an adaptor to allow the use of an ordinary function
|
||||||
|
// as a Hook.
|
||||||
|
type HookFunc func(e *Event, level Level, message string) |
||||||
|
|
||||||
|
// Run implements the Hook interface.
|
||||||
|
func (h HookFunc) Run(e *Event, level Level, message string) { |
||||||
|
h(e, level, message) |
||||||
|
} |
||||||
|
|
||||||
|
// LevelHook applies a different hook for each level.
|
||||||
|
type LevelHook struct { |
||||||
|
NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook |
||||||
|
} |
||||||
|
|
||||||
|
// Run implements the Hook interface.
|
||||||
|
func (h LevelHook) Run(e *Event, level Level, message string) { |
||||||
|
switch level { |
||||||
|
case TraceLevel: |
||||||
|
if h.TraceHook != nil { |
||||||
|
h.TraceHook.Run(e, level, message) |
||||||
|
} |
||||||
|
case DebugLevel: |
||||||
|
if h.DebugHook != nil { |
||||||
|
h.DebugHook.Run(e, level, message) |
||||||
|
} |
||||||
|
case InfoLevel: |
||||||
|
if h.InfoHook != nil { |
||||||
|
h.InfoHook.Run(e, level, message) |
||||||
|
} |
||||||
|
case WarnLevel: |
||||||
|
if h.WarnHook != nil { |
||||||
|
h.WarnHook.Run(e, level, message) |
||||||
|
} |
||||||
|
case ErrorLevel: |
||||||
|
if h.ErrorHook != nil { |
||||||
|
h.ErrorHook.Run(e, level, message) |
||||||
|
} |
||||||
|
case FatalLevel: |
||||||
|
if h.FatalHook != nil { |
||||||
|
h.FatalHook.Run(e, level, message) |
||||||
|
} |
||||||
|
case PanicLevel: |
||||||
|
if h.PanicHook != nil { |
||||||
|
h.PanicHook.Run(e, level, message) |
||||||
|
} |
||||||
|
case NoLevel: |
||||||
|
if h.NoLevelHook != nil { |
||||||
|
h.NoLevelHook.Run(e, level, message) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// NewLevelHook returns a new LevelHook.
|
||||||
|
func NewLevelHook() LevelHook { |
||||||
|
return LevelHook{} |
||||||
|
} |
@ -0,0 +1,56 @@ |
|||||||
|
## Reference: |
||||||
|
CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) |
||||||
|
|
||||||
|
## Comparison of JSON vs CBOR |
||||||
|
|
||||||
|
Two main areas of reduction are: |
||||||
|
|
||||||
|
1. CPU usage to write a log msg |
||||||
|
2. Size (in bytes) of log messages. |
||||||
|
|
||||||
|
|
||||||
|
CPU Usage savings are below: |
||||||
|
``` |
||||||
|
name JSON time/op CBOR time/op delta |
||||||
|
Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) |
||||||
|
ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) |
||||||
|
ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) |
||||||
|
LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) |
||||||
|
LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) |
||||||
|
LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) |
||||||
|
LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) |
||||||
|
LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) |
||||||
|
LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) |
||||||
|
LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) |
||||||
|
LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) |
||||||
|
LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) |
||||||
|
LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) |
||||||
|
LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) |
||||||
|
LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) |
||||||
|
LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) |
||||||
|
LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) |
||||||
|
LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) |
||||||
|
LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) |
||||||
|
LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) |
||||||
|
LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) |
||||||
|
LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) |
||||||
|
LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) |
||||||
|
LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) |
||||||
|
``` |
||||||
|
|
||||||
|
Log message size savings is greatly dependent on the number and type of fields in the log message. |
||||||
|
Assuming this log message (with an Integer, timestamp and string, in addition to level). |
||||||
|
|
||||||
|
`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` |
||||||
|
|
||||||
|
Two measurements were done for the log file sizes - one without any compression, second |
||||||
|
using [compress/zlib](https://golang.org/pkg/compress/zlib/). |
||||||
|
|
||||||
|
Results for 10,000 log messages: |
||||||
|
|
||||||
|
| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | |
||||||
|
| :--- | :---: | :---: | |
||||||
|
| JSON | 920 | 28 | |
||||||
|
| CBOR | 550 | 28 | |
||||||
|
|
||||||
|
The example used to calculate the above data is available in [Examples](examples). |
@ -0,0 +1,19 @@ |
|||||||
|
package cbor |
||||||
|
|
||||||
|
// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice.
|
||||||
|
// Making it package level instead of embedded in Encoder brings
|
||||||
|
// some extra efforts at importing, but avoids value copy when the functions
|
||||||
|
// of Encoder being invoked.
|
||||||
|
// DO REMEMBER to set this variable at importing, or
|
||||||
|
// you might get a nil pointer dereference panic at runtime.
|
||||||
|
var JSONMarshalFunc func(v interface{}) ([]byte, error) |
||||||
|
|
||||||
|
type Encoder struct{} |
||||||
|
|
||||||
|
// AppendKey adds a key (string) to the binary encoded log message
|
||||||
|
func (e Encoder) AppendKey(dst []byte, key string) []byte { |
||||||
|
if len(dst) < 1 { |
||||||
|
dst = e.AppendBeginMarker(dst) |
||||||
|
} |
||||||
|
return e.AppendString(dst, key) |
||||||
|
} |
@ -0,0 +1,100 @@ |
|||||||
|
// Package cbor provides primitives for storing different data
|
||||||
|
// in the CBOR (binary) format. CBOR is defined in RFC7049.
|
||||||
|
package cbor |
||||||
|
|
||||||
|
import "time" |
||||||
|
|
||||||
|
const ( |
||||||
|
majorOffset = 5 |
||||||
|
additionalMax = 23 |
||||||
|
|
||||||
|
// Non Values.
|
||||||
|
additionalTypeBoolFalse byte = 20 |
||||||
|
additionalTypeBoolTrue byte = 21 |
||||||
|
additionalTypeNull byte = 22 |
||||||
|
|
||||||
|
// Integer (+ve and -ve) Sub-types.
|
||||||
|
additionalTypeIntUint8 byte = 24 |
||||||
|
additionalTypeIntUint16 byte = 25 |
||||||
|
additionalTypeIntUint32 byte = 26 |
||||||
|
additionalTypeIntUint64 byte = 27 |
||||||
|
|
||||||
|
// Float Sub-types.
|
||||||
|
additionalTypeFloat16 byte = 25 |
||||||
|
additionalTypeFloat32 byte = 26 |
||||||
|
additionalTypeFloat64 byte = 27 |
||||||
|
additionalTypeBreak byte = 31 |
||||||
|
|
||||||
|
// Tag Sub-types.
|
||||||
|
additionalTypeTimestamp byte = 01 |
||||||
|
|
||||||
|
// Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml
|
||||||
|
additionalTypeTagNetworkAddr uint16 = 260 |
||||||
|
additionalTypeTagNetworkPrefix uint16 = 261 |
||||||
|
additionalTypeEmbeddedJSON uint16 = 262 |
||||||
|
additionalTypeTagHexString uint16 = 263 |
||||||
|
|
||||||
|
// Unspecified number of elements.
|
||||||
|
additionalTypeInfiniteCount byte = 31 |
||||||
|
) |
||||||
|
const ( |
||||||
|
majorTypeUnsignedInt byte = iota << majorOffset // Major type 0
|
||||||
|
majorTypeNegativeInt // Major type 1
|
||||||
|
majorTypeByteString // Major type 2
|
||||||
|
majorTypeUtf8String // Major type 3
|
||||||
|
majorTypeArray // Major type 4
|
||||||
|
majorTypeMap // Major type 5
|
||||||
|
majorTypeTags // Major type 6
|
||||||
|
majorTypeSimpleAndFloat // Major type 7
|
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
maskOutAdditionalType byte = (7 << majorOffset) |
||||||
|
maskOutMajorType byte = 31 |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
float32Nan = "\xfa\x7f\xc0\x00\x00" |
||||||
|
float32PosInfinity = "\xfa\x7f\x80\x00\x00" |
||||||
|
float32NegInfinity = "\xfa\xff\x80\x00\x00" |
||||||
|
float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" |
||||||
|
float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" |
||||||
|
float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" |
||||||
|
) |
||||||
|
|
||||||
|
// IntegerTimeFieldFormat indicates the format of timestamp decoded
|
||||||
|
// from an integer (time in seconds).
|
||||||
|
var IntegerTimeFieldFormat = time.RFC3339 |
||||||
|
|
||||||
|
// NanoTimeFieldFormat indicates the format of timestamp decoded
|
||||||
|
// from a float value (time in seconds and nano seconds).
|
||||||
|
var NanoTimeFieldFormat = time.RFC3339Nano |
||||||
|
|
||||||
|
func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { |
||||||
|
byteCount := 8 |
||||||
|
var minor byte |
||||||
|
switch { |
||||||
|
case number < 256: |
||||||
|
byteCount = 1 |
||||||
|
minor = additionalTypeIntUint8 |
||||||
|
|
||||||
|
case number < 65536: |
||||||
|
byteCount = 2 |
||||||
|
minor = additionalTypeIntUint16 |
||||||
|
|
||||||
|
case number < 4294967296: |
||||||
|
byteCount = 4 |
||||||
|
minor = additionalTypeIntUint32 |
||||||
|
|
||||||
|
default: |
||||||
|
byteCount = 8 |
||||||
|
minor = additionalTypeIntUint64 |
||||||
|
|
||||||
|
} |
||||||
|
dst = append(dst, byte(major|minor)) |
||||||
|
byteCount-- |
||||||
|
for ; byteCount >= 0; byteCount-- { |
||||||
|
dst = append(dst, byte(number>>(uint(byteCount)*8))) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
@ -0,0 +1,614 @@ |
|||||||
|
package cbor |
||||||
|
|
||||||
|
// This file contains code to decode a stream of CBOR Data into JSON.
|
||||||
|
|
||||||
|
import ( |
||||||
|
"bufio" |
||||||
|
"bytes" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"math" |
||||||
|
"net" |
||||||
|
"runtime" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
"unicode/utf8" |
||||||
|
) |
||||||
|
|
||||||
|
var decodeTimeZone *time.Location |
||||||
|
|
||||||
|
const hexTable = "0123456789abcdef" |
||||||
|
|
||||||
|
const isFloat32 = 4 |
||||||
|
const isFloat64 = 8 |
||||||
|
|
||||||
|
func readNBytes(src *bufio.Reader, n int) []byte { |
||||||
|
ret := make([]byte, n) |
||||||
|
for i := 0; i < n; i++ { |
||||||
|
ch, e := src.ReadByte() |
||||||
|
if e != nil { |
||||||
|
panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) |
||||||
|
} |
||||||
|
ret[i] = ch |
||||||
|
} |
||||||
|
return ret |
||||||
|
} |
||||||
|
|
||||||
|
func readByte(src *bufio.Reader) byte { |
||||||
|
b, e := src.ReadByte() |
||||||
|
if e != nil { |
||||||
|
panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) |
||||||
|
} |
||||||
|
return b |
||||||
|
} |
||||||
|
|
||||||
|
func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 { |
||||||
|
val := int64(0) |
||||||
|
if minor <= 23 { |
||||||
|
val = int64(minor) |
||||||
|
} else { |
||||||
|
bytesToRead := 0 |
||||||
|
switch minor { |
||||||
|
case additionalTypeIntUint8: |
||||||
|
bytesToRead = 1 |
||||||
|
case additionalTypeIntUint16: |
||||||
|
bytesToRead = 2 |
||||||
|
case additionalTypeIntUint32: |
||||||
|
bytesToRead = 4 |
||||||
|
case additionalTypeIntUint64: |
||||||
|
bytesToRead = 8 |
||||||
|
default: |
||||||
|
panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) |
||||||
|
} |
||||||
|
pb := readNBytes(src, bytesToRead) |
||||||
|
for i := 0; i < bytesToRead; i++ { |
||||||
|
val = val * 256 |
||||||
|
val += int64(pb[i]) |
||||||
|
} |
||||||
|
} |
||||||
|
return val |
||||||
|
} |
||||||
|
|
||||||
|
func decodeInteger(src *bufio.Reader) int64 { |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { |
||||||
|
panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) |
||||||
|
} |
||||||
|
val := decodeIntAdditonalType(src, minor) |
||||||
|
if major == 0 { |
||||||
|
return val |
||||||
|
} |
||||||
|
return (-1 - val) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeFloat(src *bufio.Reader) (float64, int) { |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeSimpleAndFloat { |
||||||
|
panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) |
||||||
|
} |
||||||
|
|
||||||
|
switch minor { |
||||||
|
case additionalTypeFloat16: |
||||||
|
panic(fmt.Errorf("float16 is not suppported in decodeFloat")) |
||||||
|
|
||||||
|
case additionalTypeFloat32: |
||||||
|
pb := readNBytes(src, 4) |
||||||
|
switch string(pb) { |
||||||
|
case float32Nan: |
||||||
|
return math.NaN(), isFloat32 |
||||||
|
case float32PosInfinity: |
||||||
|
return math.Inf(0), isFloat32 |
||||||
|
case float32NegInfinity: |
||||||
|
return math.Inf(-1), isFloat32 |
||||||
|
} |
||||||
|
n := uint32(0) |
||||||
|
for i := 0; i < 4; i++ { |
||||||
|
n = n * 256 |
||||||
|
n += uint32(pb[i]) |
||||||
|
} |
||||||
|
val := math.Float32frombits(n) |
||||||
|
return float64(val), isFloat32 |
||||||
|
case additionalTypeFloat64: |
||||||
|
pb := readNBytes(src, 8) |
||||||
|
switch string(pb) { |
||||||
|
case float64Nan: |
||||||
|
return math.NaN(), isFloat64 |
||||||
|
case float64PosInfinity: |
||||||
|
return math.Inf(0), isFloat64 |
||||||
|
case float64NegInfinity: |
||||||
|
return math.Inf(-1), isFloat64 |
||||||
|
} |
||||||
|
n := uint64(0) |
||||||
|
for i := 0; i < 8; i++ { |
||||||
|
n = n * 256 |
||||||
|
n += uint64(pb[i]) |
||||||
|
} |
||||||
|
val := math.Float64frombits(n) |
||||||
|
return val, isFloat64 |
||||||
|
} |
||||||
|
panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeStringComplex(dst []byte, s string, pos uint) []byte { |
||||||
|
i := int(pos) |
||||||
|
start := 0 |
||||||
|
|
||||||
|
for i < len(s) { |
||||||
|
b := s[i] |
||||||
|
if b >= utf8.RuneSelf { |
||||||
|
r, size := utf8.DecodeRuneInString(s[i:]) |
||||||
|
if r == utf8.RuneError && size == 1 { |
||||||
|
// In case of error, first append previous simple characters to
|
||||||
|
// the byte slice if any and append a replacement character code
|
||||||
|
// in place of the invalid sequence.
|
||||||
|
if start < i { |
||||||
|
dst = append(dst, s[start:i]...) |
||||||
|
} |
||||||
|
dst = append(dst, `\ufffd`...) |
||||||
|
i += size |
||||||
|
start = i |
||||||
|
continue |
||||||
|
} |
||||||
|
i += size |
||||||
|
continue |
||||||
|
} |
||||||
|
if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { |
||||||
|
i++ |
||||||
|
continue |
||||||
|
} |
||||||
|
// We encountered a character that needs to be encoded.
|
||||||
|
// Let's append the previous simple characters to the byte slice
|
||||||
|
// and switch our operation to read and encode the remainder
|
||||||
|
// characters byte-by-byte.
|
||||||
|
if start < i { |
||||||
|
dst = append(dst, s[start:i]...) |
||||||
|
} |
||||||
|
switch b { |
||||||
|
case '"', '\\': |
||||||
|
dst = append(dst, '\\', b) |
||||||
|
case '\b': |
||||||
|
dst = append(dst, '\\', 'b') |
||||||
|
case '\f': |
||||||
|
dst = append(dst, '\\', 'f') |
||||||
|
case '\n': |
||||||
|
dst = append(dst, '\\', 'n') |
||||||
|
case '\r': |
||||||
|
dst = append(dst, '\\', 'r') |
||||||
|
case '\t': |
||||||
|
dst = append(dst, '\\', 't') |
||||||
|
default: |
||||||
|
dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) |
||||||
|
} |
||||||
|
i++ |
||||||
|
start = i |
||||||
|
} |
||||||
|
if start < len(s) { |
||||||
|
dst = append(dst, s[start:]...) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
func decodeString(src *bufio.Reader, noQuotes bool) []byte { |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeByteString { |
||||||
|
panic(fmt.Errorf("Major type is: %d in decodeString", major)) |
||||||
|
} |
||||||
|
result := []byte{} |
||||||
|
if !noQuotes { |
||||||
|
result = append(result, '"') |
||||||
|
} |
||||||
|
length := decodeIntAdditonalType(src, minor) |
||||||
|
len := int(length) |
||||||
|
pbs := readNBytes(src, len) |
||||||
|
result = append(result, pbs...) |
||||||
|
if noQuotes { |
||||||
|
return result |
||||||
|
} |
||||||
|
return append(result, '"') |
||||||
|
} |
||||||
|
|
||||||
|
func decodeUTF8String(src *bufio.Reader) []byte { |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeUtf8String { |
||||||
|
panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) |
||||||
|
} |
||||||
|
result := []byte{'"'} |
||||||
|
length := decodeIntAdditonalType(src, minor) |
||||||
|
len := int(length) |
||||||
|
pbs := readNBytes(src, len) |
||||||
|
|
||||||
|
for i := 0; i < len; i++ { |
||||||
|
// Check if the character needs encoding. Control characters, slashes,
|
||||||
|
// and the double quote need json encoding. Bytes above the ascii
|
||||||
|
// boundary needs utf8 encoding.
|
||||||
|
if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { |
||||||
|
// We encountered a character that needs to be encoded. Switch
|
||||||
|
// to complex version of the algorithm.
|
||||||
|
dst := []byte{'"'} |
||||||
|
dst = decodeStringComplex(dst, string(pbs), uint(i)) |
||||||
|
return append(dst, '"') |
||||||
|
} |
||||||
|
} |
||||||
|
// The string has no need for encoding an therefore is directly
|
||||||
|
// appended to the byte slice.
|
||||||
|
result = append(result, pbs...) |
||||||
|
return append(result, '"') |
||||||
|
} |
||||||
|
|
||||||
|
func array2Json(src *bufio.Reader, dst io.Writer) { |
||||||
|
dst.Write([]byte{'['}) |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeArray { |
||||||
|
panic(fmt.Errorf("Major type is: %d in array2Json", major)) |
||||||
|
} |
||||||
|
len := 0 |
||||||
|
unSpecifiedCount := false |
||||||
|
if minor == additionalTypeInfiniteCount { |
||||||
|
unSpecifiedCount = true |
||||||
|
} else { |
||||||
|
length := decodeIntAdditonalType(src, minor) |
||||||
|
len = int(length) |
||||||
|
} |
||||||
|
for i := 0; unSpecifiedCount || i < len; i++ { |
||||||
|
if unSpecifiedCount { |
||||||
|
pb, e := src.Peek(1) |
||||||
|
if e != nil { |
||||||
|
panic(e) |
||||||
|
} |
||||||
|
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { |
||||||
|
readByte(src) |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
cbor2JsonOneObject(src, dst) |
||||||
|
if unSpecifiedCount { |
||||||
|
pb, e := src.Peek(1) |
||||||
|
if e != nil { |
||||||
|
panic(e) |
||||||
|
} |
||||||
|
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { |
||||||
|
readByte(src) |
||||||
|
break |
||||||
|
} |
||||||
|
dst.Write([]byte{','}) |
||||||
|
} else if i+1 < len { |
||||||
|
dst.Write([]byte{','}) |
||||||
|
} |
||||||
|
} |
||||||
|
dst.Write([]byte{']'}) |
||||||
|
} |
||||||
|
|
||||||
|
func map2Json(src *bufio.Reader, dst io.Writer) { |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeMap { |
||||||
|
panic(fmt.Errorf("Major type is: %d in map2Json", major)) |
||||||
|
} |
||||||
|
len := 0 |
||||||
|
unSpecifiedCount := false |
||||||
|
if minor == additionalTypeInfiniteCount { |
||||||
|
unSpecifiedCount = true |
||||||
|
} else { |
||||||
|
length := decodeIntAdditonalType(src, minor) |
||||||
|
len = int(length) |
||||||
|
} |
||||||
|
dst.Write([]byte{'{'}) |
||||||
|
for i := 0; unSpecifiedCount || i < len; i++ { |
||||||
|
if unSpecifiedCount { |
||||||
|
pb, e := src.Peek(1) |
||||||
|
if e != nil { |
||||||
|
panic(e) |
||||||
|
} |
||||||
|
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { |
||||||
|
readByte(src) |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
cbor2JsonOneObject(src, dst) |
||||||
|
if i%2 == 0 { |
||||||
|
// Even position values are keys.
|
||||||
|
dst.Write([]byte{':'}) |
||||||
|
} else { |
||||||
|
if unSpecifiedCount { |
||||||
|
pb, e := src.Peek(1) |
||||||
|
if e != nil { |
||||||
|
panic(e) |
||||||
|
} |
||||||
|
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { |
||||||
|
readByte(src) |
||||||
|
break |
||||||
|
} |
||||||
|
dst.Write([]byte{','}) |
||||||
|
} else if i+1 < len { |
||||||
|
dst.Write([]byte{','}) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
dst.Write([]byte{'}'}) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeTagData(src *bufio.Reader) []byte { |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeTags { |
||||||
|
panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) |
||||||
|
} |
||||||
|
switch minor { |
||||||
|
case additionalTypeTimestamp: |
||||||
|
return decodeTimeStamp(src) |
||||||
|
|
||||||
|
// Tag value is larger than 256 (so uint16).
|
||||||
|
case additionalTypeIntUint16: |
||||||
|
val := decodeIntAdditonalType(src, minor) |
||||||
|
|
||||||
|
switch uint16(val) { |
||||||
|
case additionalTypeEmbeddedJSON: |
||||||
|
pb := readByte(src) |
||||||
|
dataMajor := pb & maskOutAdditionalType |
||||||
|
if dataMajor != majorTypeByteString { |
||||||
|
panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) |
||||||
|
} |
||||||
|
src.UnreadByte() |
||||||
|
return decodeString(src, true) |
||||||
|
|
||||||
|
case additionalTypeTagNetworkAddr: |
||||||
|
octets := decodeString(src, true) |
||||||
|
ss := []byte{'"'} |
||||||
|
switch len(octets) { |
||||||
|
case 6: // MAC address.
|
||||||
|
ha := net.HardwareAddr(octets) |
||||||
|
ss = append(append(ss, ha.String()...), '"') |
||||||
|
case 4: // IPv4 address.
|
||||||
|
fallthrough |
||||||
|
case 16: // IPv6 address.
|
||||||
|
ip := net.IP(octets) |
||||||
|
ss = append(append(ss, ip.String()...), '"') |
||||||
|
default: |
||||||
|
panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) |
||||||
|
} |
||||||
|
return ss |
||||||
|
|
||||||
|
case additionalTypeTagNetworkPrefix: |
||||||
|
pb := readByte(src) |
||||||
|
if pb != byte(majorTypeMap|0x1) { |
||||||
|
panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) |
||||||
|
} |
||||||
|
octets := decodeString(src, true) |
||||||
|
val := decodeInteger(src) |
||||||
|
ip := net.IP(octets) |
||||||
|
var mask net.IPMask |
||||||
|
pfxLen := int(val) |
||||||
|
if len(octets) == 4 { |
||||||
|
mask = net.CIDRMask(pfxLen, 32) |
||||||
|
} else { |
||||||
|
mask = net.CIDRMask(pfxLen, 128) |
||||||
|
} |
||||||
|
ipPfx := net.IPNet{IP: ip, Mask: mask} |
||||||
|
ss := []byte{'"'} |
||||||
|
ss = append(append(ss, ipPfx.String()...), '"') |
||||||
|
return ss |
||||||
|
|
||||||
|
case additionalTypeTagHexString: |
||||||
|
octets := decodeString(src, true) |
||||||
|
ss := []byte{'"'} |
||||||
|
for _, v := range octets { |
||||||
|
ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) |
||||||
|
} |
||||||
|
return append(ss, '"') |
||||||
|
|
||||||
|
default: |
||||||
|
panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) |
||||||
|
} |
||||||
|
} |
||||||
|
panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeTimeStamp(src *bufio.Reader) []byte { |
||||||
|
pb := readByte(src) |
||||||
|
src.UnreadByte() |
||||||
|
tsMajor := pb & maskOutAdditionalType |
||||||
|
if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { |
||||||
|
n := decodeInteger(src) |
||||||
|
t := time.Unix(n, 0) |
||||||
|
if decodeTimeZone != nil { |
||||||
|
t = t.In(decodeTimeZone) |
||||||
|
} else { |
||||||
|
t = t.In(time.UTC) |
||||||
|
} |
||||||
|
tsb := []byte{} |
||||||
|
tsb = append(tsb, '"') |
||||||
|
tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) |
||||||
|
tsb = append(tsb, '"') |
||||||
|
return tsb |
||||||
|
} else if tsMajor == majorTypeSimpleAndFloat { |
||||||
|
n, _ := decodeFloat(src) |
||||||
|
secs := int64(n) |
||||||
|
n -= float64(secs) |
||||||
|
n *= float64(1e9) |
||||||
|
t := time.Unix(secs, int64(n)) |
||||||
|
if decodeTimeZone != nil { |
||||||
|
t = t.In(decodeTimeZone) |
||||||
|
} else { |
||||||
|
t = t.In(time.UTC) |
||||||
|
} |
||||||
|
tsb := []byte{} |
||||||
|
tsb = append(tsb, '"') |
||||||
|
tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) |
||||||
|
tsb = append(tsb, '"') |
||||||
|
return tsb |
||||||
|
} |
||||||
|
panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) |
||||||
|
} |
||||||
|
|
||||||
|
func decodeSimpleFloat(src *bufio.Reader) []byte { |
||||||
|
pb := readByte(src) |
||||||
|
major := pb & maskOutAdditionalType |
||||||
|
minor := pb & maskOutMajorType |
||||||
|
if major != majorTypeSimpleAndFloat { |
||||||
|
panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) |
||||||
|
} |
||||||
|
switch minor { |
||||||
|
case additionalTypeBoolTrue: |
||||||
|
return []byte("true") |
||||||
|
case additionalTypeBoolFalse: |
||||||
|
return []byte("false") |
||||||
|
case additionalTypeNull: |
||||||
|
return []byte("null") |
||||||
|
case additionalTypeFloat16: |
||||||
|
fallthrough |
||||||
|
case additionalTypeFloat32: |
||||||
|
fallthrough |
||||||
|
case additionalTypeFloat64: |
||||||
|
src.UnreadByte() |
||||||
|
v, bc := decodeFloat(src) |
||||||
|
ba := []byte{} |
||||||
|
switch { |
||||||
|
case math.IsNaN(v): |
||||||
|
return []byte("\"NaN\"") |
||||||
|
case math.IsInf(v, 1): |
||||||
|
return []byte("\"+Inf\"") |
||||||
|
case math.IsInf(v, -1): |
||||||
|
return []byte("\"-Inf\"") |
||||||
|
} |
||||||
|
if bc == isFloat32 { |
||||||
|
ba = strconv.AppendFloat(ba, v, 'f', -1, 32) |
||||||
|
} else if bc == isFloat64 { |
||||||
|
ba = strconv.AppendFloat(ba, v, 'f', -1, 64) |
||||||
|
} else { |
||||||
|
panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) |
||||||
|
} |
||||||
|
return ba |
||||||
|
default: |
||||||
|
panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { |
||||||
|
pb, e := src.Peek(1) |
||||||
|
if e != nil { |
||||||
|
panic(e) |
||||||
|
} |
||||||
|
major := (pb[0] & maskOutAdditionalType) |
||||||
|
|
||||||
|
switch major { |
||||||
|
case majorTypeUnsignedInt: |
||||||
|
fallthrough |
||||||
|
case majorTypeNegativeInt: |
||||||
|
n := decodeInteger(src) |
||||||
|
dst.Write([]byte(strconv.Itoa(int(n)))) |
||||||
|
|
||||||
|
case majorTypeByteString: |
||||||
|
s := decodeString(src, false) |
||||||
|
dst.Write(s) |
||||||
|
|
||||||
|
case majorTypeUtf8String: |
||||||
|
s := decodeUTF8String(src) |
||||||
|
dst.Write(s) |
||||||
|
|
||||||
|
case majorTypeArray: |
||||||
|
array2Json(src, dst) |
||||||
|
|
||||||
|
case majorTypeMap: |
||||||
|
map2Json(src, dst) |
||||||
|
|
||||||
|
case majorTypeTags: |
||||||
|
s := decodeTagData(src) |
||||||
|
dst.Write(s) |
||||||
|
|
||||||
|
case majorTypeSimpleAndFloat: |
||||||
|
s := decodeSimpleFloat(src) |
||||||
|
dst.Write(s) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func moreBytesToRead(src *bufio.Reader) bool { |
||||||
|
_, e := src.ReadByte() |
||||||
|
if e == nil { |
||||||
|
src.UnreadByte() |
||||||
|
return true |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// Cbor2JsonManyObjects decodes all the CBOR Objects read from src
|
||||||
|
// reader. It keeps on decoding until reader returns EOF (error when reading).
|
||||||
|
// Decoded string is written to the dst. At the end of every CBOR Object
|
||||||
|
// newline is written to the output stream.
|
||||||
|
//
|
||||||
|
// Returns error (if any) that was encountered during decode.
|
||||||
|
// The child functions will generate a panic when error is encountered and
|
||||||
|
// this function will recover non-runtime Errors and return the reason as error.
|
||||||
|
func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { |
||||||
|
defer func() { |
||||||
|
if r := recover(); r != nil { |
||||||
|
if _, ok := r.(runtime.Error); ok { |
||||||
|
panic(r) |
||||||
|
} |
||||||
|
err = r.(error) |
||||||
|
} |
||||||
|
}() |
||||||
|
bufRdr := bufio.NewReader(src) |
||||||
|
for moreBytesToRead(bufRdr) { |
||||||
|
cbor2JsonOneObject(bufRdr, dst) |
||||||
|
dst.Write([]byte("\n")) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Detect if the bytes to be printed is Binary or not.
|
||||||
|
func binaryFmt(p []byte) bool { |
||||||
|
if len(p) > 0 && p[0] > 0x7F { |
||||||
|
return true |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
func getReader(str string) *bufio.Reader { |
||||||
|
return bufio.NewReader(strings.NewReader(str)) |
||||||
|
} |
||||||
|
|
||||||
|
// DecodeIfBinaryToString converts a binary formatted log msg to a
|
||||||
|
// JSON formatted String Log message - suitable for printing to Console/Syslog.
|
||||||
|
func DecodeIfBinaryToString(in []byte) string { |
||||||
|
if binaryFmt(in) { |
||||||
|
var b bytes.Buffer |
||||||
|
Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) |
||||||
|
return b.String() |
||||||
|
} |
||||||
|
return string(in) |
||||||
|
} |
||||||
|
|
||||||
|
// DecodeObjectToStr checks if the input is a binary format, if so,
|
||||||
|
// it will decode a single Object and return the decoded string.
|
||||||
|
func DecodeObjectToStr(in []byte) string { |
||||||
|
if binaryFmt(in) { |
||||||
|
var b bytes.Buffer |
||||||
|
cbor2JsonOneObject(getReader(string(in)), &b) |
||||||
|
return b.String() |
||||||
|
} |
||||||
|
return string(in) |
||||||
|
} |
||||||
|
|
||||||
|
// DecodeIfBinaryToBytes checks if the input is a binary format, if so,
|
||||||
|
// it will decode all Objects and return the decoded string as byte array.
|
||||||
|
func DecodeIfBinaryToBytes(in []byte) []byte { |
||||||
|
if binaryFmt(in) { |
||||||
|
var b bytes.Buffer |
||||||
|
Cbor2JsonManyObjects(bytes.NewReader(in), &b) |
||||||
|
return b.Bytes() |
||||||
|
} |
||||||
|
return in |
||||||
|
} |
@ -0,0 +1,95 @@ |
|||||||
|
package cbor |
||||||
|
|
||||||
|
import "fmt" |
||||||
|
|
||||||
|
// AppendStrings encodes and adds an array of strings to the dst byte array.
|
||||||
|
func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendString(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendString encodes and adds a string to the dst byte array.
|
||||||
|
func (Encoder) AppendString(dst []byte, s string) []byte { |
||||||
|
major := majorTypeUtf8String |
||||||
|
|
||||||
|
l := len(s) |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) |
||||||
|
} |
||||||
|
return append(dst, s...) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendStringers encodes and adds an array of Stringer values
|
||||||
|
// to the dst byte array.
|
||||||
|
func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
dst = e.AppendArrayStart(dst) |
||||||
|
dst = e.AppendStringer(dst, vals[0]) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = e.AppendStringer(dst, val) |
||||||
|
} |
||||||
|
} |
||||||
|
return e.AppendArrayEnd(dst) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendStringer encodes and adds the Stringer value to the dst
|
||||||
|
// byte array.
|
||||||
|
func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { |
||||||
|
if val == nil { |
||||||
|
return e.AppendNil(dst) |
||||||
|
} |
||||||
|
return e.AppendString(dst, val.String()) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendBytes encodes and adds an array of bytes to the dst byte array.
|
||||||
|
func (Encoder) AppendBytes(dst, s []byte) []byte { |
||||||
|
major := majorTypeByteString |
||||||
|
|
||||||
|
l := len(s) |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
return append(dst, s...) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendEmbeddedJSON adds a tag and embeds input JSON as such.
|
||||||
|
func AppendEmbeddedJSON(dst, s []byte) []byte { |
||||||
|
major := majorTypeTags |
||||||
|
minor := additionalTypeEmbeddedJSON |
||||||
|
|
||||||
|
// Append the TAG to indicate this is Embedded JSON.
|
||||||
|
dst = append(dst, byte(major|additionalTypeIntUint16)) |
||||||
|
dst = append(dst, byte(minor>>8)) |
||||||
|
dst = append(dst, byte(minor&0xff)) |
||||||
|
|
||||||
|
// Append the JSON Object as Byte String.
|
||||||
|
major = majorTypeByteString |
||||||
|
|
||||||
|
l := len(s) |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
return append(dst, s...) |
||||||
|
} |
@ -0,0 +1,93 @@ |
|||||||
|
package cbor |
||||||
|
|
||||||
|
import ( |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
func appendIntegerTimestamp(dst []byte, t time.Time) []byte { |
||||||
|
major := majorTypeTags |
||||||
|
minor := additionalTypeTimestamp |
||||||
|
dst = append(dst, byte(major|minor)) |
||||||
|
secs := t.Unix() |
||||||
|
var val uint64 |
||||||
|
if secs < 0 { |
||||||
|
major = majorTypeNegativeInt |
||||||
|
val = uint64(-secs - 1) |
||||||
|
} else { |
||||||
|
major = majorTypeUnsignedInt |
||||||
|
val = uint64(secs) |
||||||
|
} |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(val)) |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { |
||||||
|
major := majorTypeTags |
||||||
|
minor := additionalTypeTimestamp |
||||||
|
dst = append(dst, byte(major|minor)) |
||||||
|
secs := t.Unix() |
||||||
|
nanos := t.Nanosecond() |
||||||
|
var val float64 |
||||||
|
val = float64(secs)*1.0 + float64(nanos)*1E-9 |
||||||
|
return e.AppendFloat64(dst, val) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendTime encodes and adds a timestamp to the dst byte array.
|
||||||
|
func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { |
||||||
|
utc := t.UTC() |
||||||
|
if utc.Nanosecond() == 0 { |
||||||
|
return appendIntegerTimestamp(dst, utc) |
||||||
|
} |
||||||
|
return e.appendFloatTimestamp(dst, utc) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendTimes encodes and adds an array of timestamps to the dst byte array.
|
||||||
|
func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
|
||||||
|
for _, t := range vals { |
||||||
|
dst = e.AppendTime(dst, t, unused) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendDuration encodes and adds a duration to the dst byte array.
|
||||||
|
// useInt field indicates whether to store the duration as seconds (integer) or
|
||||||
|
// as seconds+nanoseconds (float).
|
||||||
|
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { |
||||||
|
if useInt { |
||||||
|
return e.AppendInt64(dst, int64(d/unit)) |
||||||
|
} |
||||||
|
return e.AppendFloat64(dst, float64(d)/float64(unit)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendDurations encodes and adds an array of durations to the dst byte array.
|
||||||
|
// useInt field indicates whether to store the duration as seconds (integer) or
|
||||||
|
// as seconds+nanoseconds (float).
|
||||||
|
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, d := range vals { |
||||||
|
dst = e.AppendDuration(dst, d, unit, useInt) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
@ -0,0 +1,477 @@ |
|||||||
|
package cbor |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"net" |
||||||
|
) |
||||||
|
|
||||||
|
// AppendNil inserts a 'Nil' object into the dst byte array.
|
||||||
|
func (Encoder) AppendNil(dst []byte) []byte { |
||||||
|
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendBeginMarker inserts a map start into the dst byte array.
|
||||||
|
func (Encoder) AppendBeginMarker(dst []byte) []byte { |
||||||
|
return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendEndMarker inserts a map end into the dst byte array.
|
||||||
|
func (Encoder) AppendEndMarker(dst []byte) []byte { |
||||||
|
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendObjectData takes an object in form of a byte array and appends to dst.
|
||||||
|
func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { |
||||||
|
// BeginMarker is present in the dst, which
|
||||||
|
// should not be copied when appending to existing data.
|
||||||
|
return append(dst, o[1:]...) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendArrayStart adds markers to indicate the start of an array.
|
||||||
|
func (Encoder) AppendArrayStart(dst []byte) []byte { |
||||||
|
return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendArrayEnd adds markers to indicate the end of an array.
|
||||||
|
func (Encoder) AppendArrayEnd(dst []byte) []byte { |
||||||
|
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendArrayDelim adds markers to indicate end of a particular array element.
|
||||||
|
func (Encoder) AppendArrayDelim(dst []byte) []byte { |
||||||
|
//No delimiters needed in cbor
|
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendLineBreak is a noop that keep API compat with json encoder.
|
||||||
|
func (Encoder) AppendLineBreak(dst []byte) []byte { |
||||||
|
// No line breaks needed in binary format.
|
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendBool encodes and inserts a boolean value into the dst byte array.
|
||||||
|
func (Encoder) AppendBool(dst []byte, val bool) []byte { |
||||||
|
b := additionalTypeBoolFalse |
||||||
|
if val { |
||||||
|
b = additionalTypeBoolTrue |
||||||
|
} |
||||||
|
return append(dst, byte(majorTypeSimpleAndFloat|b)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendBools encodes and inserts an array of boolean values into the dst byte array.
|
||||||
|
func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendBool(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt encodes and inserts an integer value into the dst byte array.
|
||||||
|
func (Encoder) AppendInt(dst []byte, val int) []byte { |
||||||
|
major := majorTypeUnsignedInt |
||||||
|
contentVal := val |
||||||
|
if val < 0 { |
||||||
|
major = majorTypeNegativeInt |
||||||
|
contentVal = -val - 1 |
||||||
|
} |
||||||
|
if contentVal <= additionalMax { |
||||||
|
lb := byte(contentVal) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(contentVal)) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts encodes and inserts an array of integer values into the dst byte array.
|
||||||
|
func (e Encoder) AppendInts(dst []byte, vals []int) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendInt(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt8 encodes and inserts an int8 value into the dst byte array.
|
||||||
|
func (e Encoder) AppendInt8(dst []byte, val int8) []byte { |
||||||
|
return e.AppendInt(dst, int(val)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts8 encodes and inserts an array of integer values into the dst byte array.
|
||||||
|
func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendInt(dst, int(v)) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt16 encodes and inserts a int16 value into the dst byte array.
|
||||||
|
func (e Encoder) AppendInt16(dst []byte, val int16) []byte { |
||||||
|
return e.AppendInt(dst, int(val)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts16 encodes and inserts an array of int16 values into the dst byte array.
|
||||||
|
func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendInt(dst, int(v)) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt32 encodes and inserts a int32 value into the dst byte array.
|
||||||
|
func (e Encoder) AppendInt32(dst []byte, val int32) []byte { |
||||||
|
return e.AppendInt(dst, int(val)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts32 encodes and inserts an array of int32 values into the dst byte array.
|
||||||
|
func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendInt(dst, int(v)) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt64 encodes and inserts a int64 value into the dst byte array.
|
||||||
|
func (Encoder) AppendInt64(dst []byte, val int64) []byte { |
||||||
|
major := majorTypeUnsignedInt |
||||||
|
contentVal := val |
||||||
|
if val < 0 { |
||||||
|
major = majorTypeNegativeInt |
||||||
|
contentVal = -val - 1 |
||||||
|
} |
||||||
|
if contentVal <= additionalMax { |
||||||
|
lb := byte(contentVal) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(contentVal)) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts64 encodes and inserts an array of int64 values into the dst byte array.
|
||||||
|
func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendInt64(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint encodes and inserts an unsigned integer value into the dst byte array.
|
||||||
|
func (e Encoder) AppendUint(dst []byte, val uint) []byte { |
||||||
|
return e.AppendInt64(dst, int64(val)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array.
|
||||||
|
func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendUint(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array.
|
||||||
|
func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { |
||||||
|
return e.AppendUint(dst, uint(val)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array.
|
||||||
|
func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendUint8(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint16 encodes and inserts a uint16 value into the dst byte array.
|
||||||
|
func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { |
||||||
|
return e.AppendUint(dst, uint(val)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array.
|
||||||
|
func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendUint16(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint32 encodes and inserts a uint32 value into the dst byte array.
|
||||||
|
func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { |
||||||
|
return e.AppendUint(dst, uint(val)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array.
|
||||||
|
func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendUint32(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint64 encodes and inserts a uint64 value into the dst byte array.
|
||||||
|
func (Encoder) AppendUint64(dst []byte, val uint64) []byte { |
||||||
|
major := majorTypeUnsignedInt |
||||||
|
contentVal := val |
||||||
|
if contentVal <= additionalMax { |
||||||
|
lb := byte(contentVal) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(contentVal)) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array.
|
||||||
|
func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendUint64(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
|
||||||
|
func (Encoder) AppendFloat32(dst []byte, val float32) []byte { |
||||||
|
switch { |
||||||
|
case math.IsNaN(float64(val)): |
||||||
|
return append(dst, "\xfa\x7f\xc0\x00\x00"...) |
||||||
|
case math.IsInf(float64(val), 1): |
||||||
|
return append(dst, "\xfa\x7f\x80\x00\x00"...) |
||||||
|
case math.IsInf(float64(val), -1): |
||||||
|
return append(dst, "\xfa\xff\x80\x00\x00"...) |
||||||
|
} |
||||||
|
major := majorTypeSimpleAndFloat |
||||||
|
subType := additionalTypeFloat32 |
||||||
|
n := math.Float32bits(val) |
||||||
|
var buf [4]byte |
||||||
|
for i := uint(0); i < 4; i++ { |
||||||
|
buf[i] = byte(n >> ((3 - i) * 8)) |
||||||
|
} |
||||||
|
return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3]) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
|
||||||
|
func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendFloat32(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
|
||||||
|
func (Encoder) AppendFloat64(dst []byte, val float64) []byte { |
||||||
|
switch { |
||||||
|
case math.IsNaN(val): |
||||||
|
return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) |
||||||
|
case math.IsInf(val, 1): |
||||||
|
return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) |
||||||
|
case math.IsInf(val, -1): |
||||||
|
return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) |
||||||
|
} |
||||||
|
major := majorTypeSimpleAndFloat |
||||||
|
subType := additionalTypeFloat64 |
||||||
|
n := math.Float64bits(val) |
||||||
|
dst = append(dst, byte(major|subType)) |
||||||
|
for i := uint(1); i <= 8; i++ { |
||||||
|
b := byte(n >> ((8 - i) * 8)) |
||||||
|
dst = append(dst, b) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
|
||||||
|
func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { |
||||||
|
major := majorTypeArray |
||||||
|
l := len(vals) |
||||||
|
if l == 0 { |
||||||
|
return e.AppendArrayEnd(e.AppendArrayStart(dst)) |
||||||
|
} |
||||||
|
if l <= additionalMax { |
||||||
|
lb := byte(l) |
||||||
|
dst = append(dst, byte(major|lb)) |
||||||
|
} else { |
||||||
|
dst = appendCborTypePrefix(dst, major, uint64(l)) |
||||||
|
} |
||||||
|
for _, v := range vals { |
||||||
|
dst = e.AppendFloat64(dst, v) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst.
|
||||||
|
func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { |
||||||
|
marshaled, err := JSONMarshalFunc(i) |
||||||
|
if err != nil { |
||||||
|
return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) |
||||||
|
} |
||||||
|
return AppendEmbeddedJSON(dst, marshaled) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6).
|
||||||
|
func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { |
||||||
|
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) |
||||||
|
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) |
||||||
|
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) |
||||||
|
return e.AppendBytes(dst, ip) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length).
|
||||||
|
func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { |
||||||
|
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) |
||||||
|
dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) |
||||||
|
dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) |
||||||
|
|
||||||
|
// Prefix is a tuple (aka MAP of 1 pair of elements) -
|
||||||
|
// first element is prefix, second is mask length.
|
||||||
|
dst = append(dst, byte(majorTypeMap|0x1)) |
||||||
|
dst = e.AppendBytes(dst, pfx.IP) |
||||||
|
maskLen, _ := pfx.Mask.Size() |
||||||
|
return e.AppendUint8(dst, uint8(maskLen)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendMACAddr encodes and inserts an Hardware (MAC) address.
|
||||||
|
func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { |
||||||
|
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) |
||||||
|
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) |
||||||
|
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) |
||||||
|
return e.AppendBytes(dst, ha) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendHex adds a TAG and inserts a hex bytes as a string.
|
||||||
|
func (e Encoder) AppendHex(dst []byte, val []byte) []byte { |
||||||
|
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) |
||||||
|
dst = append(dst, byte(additionalTypeTagHexString>>8)) |
||||||
|
dst = append(dst, byte(additionalTypeTagHexString&0xff)) |
||||||
|
return e.AppendBytes(dst, val) |
||||||
|
} |
@ -0,0 +1,19 @@ |
|||||||
|
package json |
||||||
|
|
||||||
|
// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice.
|
||||||
|
// Making it package level instead of embedded in Encoder brings
|
||||||
|
// some extra efforts at importing, but avoids value copy when the functions
|
||||||
|
// of Encoder being invoked.
|
||||||
|
// DO REMEMBER to set this variable at importing, or
|
||||||
|
// you might get a nil pointer dereference panic at runtime.
|
||||||
|
var JSONMarshalFunc func(v interface{}) ([]byte, error) |
||||||
|
|
||||||
|
type Encoder struct{} |
||||||
|
|
||||||
|
// AppendKey appends a new key to the output JSON.
|
||||||
|
func (e Encoder) AppendKey(dst []byte, key string) []byte { |
||||||
|
if dst[len(dst)-1] != '{' { |
||||||
|
dst = append(dst, ',') |
||||||
|
} |
||||||
|
return append(e.AppendString(dst, key), ':') |
||||||
|
} |
@ -0,0 +1,85 @@ |
|||||||
|
package json |
||||||
|
|
||||||
|
import "unicode/utf8" |
||||||
|
|
||||||
|
// AppendBytes is a mirror of appendString with []byte arg
|
||||||
|
func (Encoder) AppendBytes(dst, s []byte) []byte { |
||||||
|
dst = append(dst, '"') |
||||||
|
for i := 0; i < len(s); i++ { |
||||||
|
if !noEscapeTable[s[i]] { |
||||||
|
dst = appendBytesComplex(dst, s, i) |
||||||
|
return append(dst, '"') |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, s...) |
||||||
|
return append(dst, '"') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendHex encodes the input bytes to a hex string and appends
|
||||||
|
// the encoded string to the input byte slice.
|
||||||
|
//
|
||||||
|
// The operation loops though each byte and encodes it as hex using
|
||||||
|
// the hex lookup table.
|
||||||
|
func (Encoder) AppendHex(dst, s []byte) []byte { |
||||||
|
dst = append(dst, '"') |
||||||
|
for _, v := range s { |
||||||
|
dst = append(dst, hex[v>>4], hex[v&0x0f]) |
||||||
|
} |
||||||
|
return append(dst, '"') |
||||||
|
} |
||||||
|
|
||||||
|
// appendBytesComplex is a mirror of the appendStringComplex
|
||||||
|
// with []byte arg
|
||||||
|
func appendBytesComplex(dst, s []byte, i int) []byte { |
||||||
|
start := 0 |
||||||
|
for i < len(s) { |
||||||
|
b := s[i] |
||||||
|
if b >= utf8.RuneSelf { |
||||||
|
r, size := utf8.DecodeRune(s[i:]) |
||||||
|
if r == utf8.RuneError && size == 1 { |
||||||
|
if start < i { |
||||||
|
dst = append(dst, s[start:i]...) |
||||||
|
} |
||||||
|
dst = append(dst, `\ufffd`...) |
||||||
|
i += size |
||||||
|
start = i |
||||||
|
continue |
||||||
|
} |
||||||
|
i += size |
||||||
|
continue |
||||||
|
} |
||||||
|
if noEscapeTable[b] { |
||||||
|
i++ |
||||||
|
continue |
||||||
|
} |
||||||
|
// We encountered a character that needs to be encoded.
|
||||||
|
// Let's append the previous simple characters to the byte slice
|
||||||
|
// and switch our operation to read and encode the remainder
|
||||||
|
// characters byte-by-byte.
|
||||||
|
if start < i { |
||||||
|
dst = append(dst, s[start:i]...) |
||||||
|
} |
||||||
|
switch b { |
||||||
|
case '"', '\\': |
||||||
|
dst = append(dst, '\\', b) |
||||||
|
case '\b': |
||||||
|
dst = append(dst, '\\', 'b') |
||||||
|
case '\f': |
||||||
|
dst = append(dst, '\\', 'f') |
||||||
|
case '\n': |
||||||
|
dst = append(dst, '\\', 'n') |
||||||
|
case '\r': |
||||||
|
dst = append(dst, '\\', 'r') |
||||||
|
case '\t': |
||||||
|
dst = append(dst, '\\', 't') |
||||||
|
default: |
||||||
|
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) |
||||||
|
} |
||||||
|
i++ |
||||||
|
start = i |
||||||
|
} |
||||||
|
if start < len(s) { |
||||||
|
dst = append(dst, s[start:]...) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
@ -0,0 +1,149 @@ |
|||||||
|
package json |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"unicode/utf8" |
||||||
|
) |
||||||
|
|
||||||
|
const hex = "0123456789abcdef" |
||||||
|
|
||||||
|
var noEscapeTable = [256]bool{} |
||||||
|
|
||||||
|
func init() { |
||||||
|
for i := 0; i <= 0x7e; i++ { |
||||||
|
noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// AppendStrings encodes the input strings to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = e.AppendString(dst, vals[0]) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = e.AppendString(append(dst, ','), val) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendString encodes the input string to json and appends
|
||||||
|
// the encoded string to the input byte slice.
|
||||||
|
//
|
||||||
|
// The operation loops though each byte in the string looking
|
||||||
|
// for characters that need json or utf8 encoding. If the string
|
||||||
|
// does not need encoding, then the string is appended in it's
|
||||||
|
// entirety to the byte slice.
|
||||||
|
// If we encounter a byte that does need encoding, switch up
|
||||||
|
// the operation and perform a byte-by-byte read-encode-append.
|
||||||
|
func (Encoder) AppendString(dst []byte, s string) []byte { |
||||||
|
// Start with a double quote.
|
||||||
|
dst = append(dst, '"') |
||||||
|
// Loop through each character in the string.
|
||||||
|
for i := 0; i < len(s); i++ { |
||||||
|
// Check if the character needs encoding. Control characters, slashes,
|
||||||
|
// and the double quote need json encoding. Bytes above the ascii
|
||||||
|
// boundary needs utf8 encoding.
|
||||||
|
if !noEscapeTable[s[i]] { |
||||||
|
// We encountered a character that needs to be encoded. Switch
|
||||||
|
// to complex version of the algorithm.
|
||||||
|
dst = appendStringComplex(dst, s, i) |
||||||
|
return append(dst, '"') |
||||||
|
} |
||||||
|
} |
||||||
|
// The string has no need for encoding an therefore is directly
|
||||||
|
// appended to the byte slice.
|
||||||
|
dst = append(dst, s...) |
||||||
|
// End with a double quote
|
||||||
|
return append(dst, '"') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendStringers encodes the provided Stringer list to json and
|
||||||
|
// appends the encoded Stringer list to the input byte slice.
|
||||||
|
func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = e.AppendStringer(dst, vals[0]) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = e.AppendStringer(append(dst, ','), val) |
||||||
|
} |
||||||
|
} |
||||||
|
return append(dst, ']') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendStringer encodes the input Stringer to json and appends the
|
||||||
|
// encoded Stringer value to the input byte slice.
|
||||||
|
func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { |
||||||
|
if val == nil { |
||||||
|
return e.AppendInterface(dst, nil) |
||||||
|
} |
||||||
|
return e.AppendString(dst, val.String()) |
||||||
|
} |
||||||
|
|
||||||
|
//// appendStringComplex is used by appendString to take over an in
|
||||||
|
// progress JSON string encoding that encountered a character that needs
|
||||||
|
// to be encoded.
|
||||||
|
func appendStringComplex(dst []byte, s string, i int) []byte { |
||||||
|
start := 0 |
||||||
|
for i < len(s) { |
||||||
|
b := s[i] |
||||||
|
if b >= utf8.RuneSelf { |
||||||
|
r, size := utf8.DecodeRuneInString(s[i:]) |
||||||
|
if r == utf8.RuneError && size == 1 { |
||||||
|
// In case of error, first append previous simple characters to
|
||||||
|
// the byte slice if any and append a remplacement character code
|
||||||
|
// in place of the invalid sequence.
|
||||||
|
if start < i { |
||||||
|
dst = append(dst, s[start:i]...) |
||||||
|
} |
||||||
|
dst = append(dst, `\ufffd`...) |
||||||
|
i += size |
||||||
|
start = i |
||||||
|
continue |
||||||
|
} |
||||||
|
i += size |
||||||
|
continue |
||||||
|
} |
||||||
|
if noEscapeTable[b] { |
||||||
|
i++ |
||||||
|
continue |
||||||
|
} |
||||||
|
// We encountered a character that needs to be encoded.
|
||||||
|
// Let's append the previous simple characters to the byte slice
|
||||||
|
// and switch our operation to read and encode the remainder
|
||||||
|
// characters byte-by-byte.
|
||||||
|
if start < i { |
||||||
|
dst = append(dst, s[start:i]...) |
||||||
|
} |
||||||
|
switch b { |
||||||
|
case '"', '\\': |
||||||
|
dst = append(dst, '\\', b) |
||||||
|
case '\b': |
||||||
|
dst = append(dst, '\\', 'b') |
||||||
|
case '\f': |
||||||
|
dst = append(dst, '\\', 'f') |
||||||
|
case '\n': |
||||||
|
dst = append(dst, '\\', 'n') |
||||||
|
case '\r': |
||||||
|
dst = append(dst, '\\', 'r') |
||||||
|
case '\t': |
||||||
|
dst = append(dst, '\\', 't') |
||||||
|
default: |
||||||
|
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) |
||||||
|
} |
||||||
|
i++ |
||||||
|
start = i |
||||||
|
} |
||||||
|
if start < len(s) { |
||||||
|
dst = append(dst, s[start:]...) |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
@ -0,0 +1,106 @@ |
|||||||
|
package json |
||||||
|
|
||||||
|
import ( |
||||||
|
"strconv" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
// Import from zerolog/global.go
|
||||||
|
timeFormatUnix = "" |
||||||
|
timeFormatUnixMs = "UNIXMS" |
||||||
|
timeFormatUnixMicro = "UNIXMICRO" |
||||||
|
) |
||||||
|
|
||||||
|
// AppendTime formats the input time with the given format
|
||||||
|
// and appends the encoded string to the input byte slice.
|
||||||
|
func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { |
||||||
|
switch format { |
||||||
|
case timeFormatUnix: |
||||||
|
return e.AppendInt64(dst, t.Unix()) |
||||||
|
case timeFormatUnixMs: |
||||||
|
return e.AppendInt64(dst, t.UnixNano()/1000000) |
||||||
|
case timeFormatUnixMicro: |
||||||
|
return e.AppendInt64(dst, t.UnixNano()/1000) |
||||||
|
} |
||||||
|
return append(t.AppendFormat(append(dst, '"'), format), '"') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendTimes converts the input times with the given format
|
||||||
|
// and appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { |
||||||
|
switch format { |
||||||
|
case timeFormatUnix: |
||||||
|
return appendUnixTimes(dst, vals) |
||||||
|
case timeFormatUnixMs: |
||||||
|
return appendUnixMsTimes(dst, vals) |
||||||
|
} |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, t := range vals[1:] { |
||||||
|
dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
func appendUnixTimes(dst []byte, vals []time.Time) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendInt(dst, vals[0].Unix(), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, t := range vals[1:] { |
||||||
|
dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
func appendUnixMsTimes(dst []byte, vals []time.Time) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendInt(dst, vals[0].UnixNano()/1000000, 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, t := range vals[1:] { |
||||||
|
dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/1000000, 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendDuration formats the input duration with the given unit & format
|
||||||
|
// and appends the encoded string to the input byte slice.
|
||||||
|
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { |
||||||
|
if useInt { |
||||||
|
return strconv.AppendInt(dst, int64(d/unit), 10) |
||||||
|
} |
||||||
|
return e.AppendFloat64(dst, float64(d)/float64(unit)) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendDurations formats the input durations with the given unit & format
|
||||||
|
// and appends the encoded string list to the input byte slice.
|
||||||
|
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = e.AppendDuration(dst, vals[0], unit, useInt) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, d := range vals[1:] { |
||||||
|
dst = e.AppendDuration(append(dst, ','), d, unit, useInt) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
@ -0,0 +1,405 @@ |
|||||||
|
package json |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"net" |
||||||
|
"strconv" |
||||||
|
) |
||||||
|
|
||||||
|
// AppendNil inserts a 'Nil' object into the dst byte array.
|
||||||
|
func (Encoder) AppendNil(dst []byte) []byte { |
||||||
|
return append(dst, "null"...) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendBeginMarker inserts a map start into the dst byte array.
|
||||||
|
func (Encoder) AppendBeginMarker(dst []byte) []byte { |
||||||
|
return append(dst, '{') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendEndMarker inserts a map end into the dst byte array.
|
||||||
|
func (Encoder) AppendEndMarker(dst []byte) []byte { |
||||||
|
return append(dst, '}') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendLineBreak appends a line break.
|
||||||
|
func (Encoder) AppendLineBreak(dst []byte) []byte { |
||||||
|
return append(dst, '\n') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendArrayStart adds markers to indicate the start of an array.
|
||||||
|
func (Encoder) AppendArrayStart(dst []byte) []byte { |
||||||
|
return append(dst, '[') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendArrayEnd adds markers to indicate the end of an array.
|
||||||
|
func (Encoder) AppendArrayEnd(dst []byte) []byte { |
||||||
|
return append(dst, ']') |
||||||
|
} |
||||||
|
|
||||||
|
// AppendArrayDelim adds markers to indicate end of a particular array element.
|
||||||
|
func (Encoder) AppendArrayDelim(dst []byte) []byte { |
||||||
|
if len(dst) > 0 { |
||||||
|
return append(dst, ',') |
||||||
|
} |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendBool converts the input bool to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendBool(dst []byte, val bool) []byte { |
||||||
|
return strconv.AppendBool(dst, val) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendBools encodes the input bools to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendBools(dst []byte, vals []bool) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendBool(dst, vals[0]) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendBool(append(dst, ','), val) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt converts the input int to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendInt(dst []byte, val int) []byte { |
||||||
|
return strconv.AppendInt(dst, int64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts encodes the input ints to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendInts(dst []byte, vals []int) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendInt(dst, int64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendInt(append(dst, ','), int64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt8 converts the input []int8 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendInt8(dst []byte, val int8) []byte { |
||||||
|
return strconv.AppendInt(dst, int64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts8 encodes the input int8s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendInt(dst, int64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendInt(append(dst, ','), int64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt16 converts the input int16 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendInt16(dst []byte, val int16) []byte { |
||||||
|
return strconv.AppendInt(dst, int64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts16 encodes the input int16s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendInt(dst, int64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendInt(append(dst, ','), int64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt32 converts the input int32 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendInt32(dst []byte, val int32) []byte { |
||||||
|
return strconv.AppendInt(dst, int64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts32 encodes the input int32s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendInt(dst, int64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendInt(append(dst, ','), int64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInt64 converts the input int64 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendInt64(dst []byte, val int64) []byte { |
||||||
|
return strconv.AppendInt(dst, val, 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInts64 encodes the input int64s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendInt(dst, vals[0], 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendInt(append(dst, ','), val, 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint converts the input uint to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendUint(dst []byte, val uint) []byte { |
||||||
|
return strconv.AppendUint(dst, uint64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints encodes the input uints to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendUints(dst []byte, vals []uint) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendUint(dst, uint64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint8 converts the input uint8 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendUint8(dst []byte, val uint8) []byte { |
||||||
|
return strconv.AppendUint(dst, uint64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints8 encodes the input uint8s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendUint(dst, uint64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint16 converts the input uint16 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendUint16(dst []byte, val uint16) []byte { |
||||||
|
return strconv.AppendUint(dst, uint64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints16 encodes the input uint16s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendUint(dst, uint64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint32 converts the input uint32 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendUint32(dst []byte, val uint32) []byte { |
||||||
|
return strconv.AppendUint(dst, uint64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints32 encodes the input uint32s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendUint(dst, uint64(vals[0]), 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUint64 converts the input uint64 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendUint64(dst []byte, val uint64) []byte { |
||||||
|
return strconv.AppendUint(dst, uint64(val), 10) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendUints64 encodes the input uint64s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = strconv.AppendUint(dst, vals[0], 10) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = strconv.AppendUint(append(dst, ','), val, 10) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
func appendFloat(dst []byte, val float64, bitSize int) []byte { |
||||||
|
// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
|
||||||
|
// with an error, but a logging library wants the data to get thru so we
|
||||||
|
// make a tradeoff and store those types as string.
|
||||||
|
switch { |
||||||
|
case math.IsNaN(val): |
||||||
|
return append(dst, `"NaN"`...) |
||||||
|
case math.IsInf(val, 1): |
||||||
|
return append(dst, `"+Inf"`...) |
||||||
|
case math.IsInf(val, -1): |
||||||
|
return append(dst, `"-Inf"`...) |
||||||
|
} |
||||||
|
return strconv.AppendFloat(dst, val, 'f', -1, bitSize) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloat32 converts the input float32 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendFloat32(dst []byte, val float32) []byte { |
||||||
|
return appendFloat(dst, float64(val), 32) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloats32 encodes the input float32s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = appendFloat(dst, float64(vals[0]), 32) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = appendFloat(append(dst, ','), float64(val), 32) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloat64 converts the input float64 to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (Encoder) AppendFloat64(dst []byte, val float64) []byte { |
||||||
|
return appendFloat(dst, val, 64) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendFloats64 encodes the input float64s to json and
|
||||||
|
// appends the encoded string list to the input byte slice.
|
||||||
|
func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { |
||||||
|
if len(vals) == 0 { |
||||||
|
return append(dst, '[', ']') |
||||||
|
} |
||||||
|
dst = append(dst, '[') |
||||||
|
dst = appendFloat(dst, vals[0], 64) |
||||||
|
if len(vals) > 1 { |
||||||
|
for _, val := range vals[1:] { |
||||||
|
dst = appendFloat(append(dst, ','), val, 64) |
||||||
|
} |
||||||
|
} |
||||||
|
dst = append(dst, ']') |
||||||
|
return dst |
||||||
|
} |
||||||
|
|
||||||
|
// AppendInterface marshals the input interface to a string and
|
||||||
|
// appends the encoded string to the input byte slice.
|
||||||
|
func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { |
||||||
|
marshaled, err := JSONMarshalFunc(i) |
||||||
|
if err != nil { |
||||||
|
return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) |
||||||
|
} |
||||||
|
return append(dst, marshaled...) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendObjectData takes in an object that is already in a byte array
|
||||||
|
// and adds it to the dst.
|
||||||
|
func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { |
||||||
|
// Three conditions apply here:
|
||||||
|
// 1. new content starts with '{' - which should be dropped OR
|
||||||
|
// 2. new content starts with '{' - which should be replaced with ','
|
||||||
|
// to separate with existing content OR
|
||||||
|
// 3. existing content has already other fields
|
||||||
|
if o[0] == '{' { |
||||||
|
if len(dst) > 1 { |
||||||
|
dst = append(dst, ',') |
||||||
|
} |
||||||
|
o = o[1:] |
||||||
|
} else if len(dst) > 1 { |
||||||
|
dst = append(dst, ',') |
||||||
|
} |
||||||
|
return append(dst, o...) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendIPAddr adds IPv4 or IPv6 address to dst.
|
||||||
|
func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { |
||||||
|
return e.AppendString(dst, ip.String()) |
||||||
|
} |
||||||
|
|
||||||
|
// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst.
|
||||||
|
func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { |
||||||
|
return e.AppendString(dst, pfx.String()) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// AppendMACAddr adds MAC address to dst.
|
||||||
|
func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { |
||||||
|
return e.AppendString(dst, ha.String()) |
||||||
|
} |
@ -0,0 +1,457 @@ |
|||||||
|
// Package zerolog provides a lightweight logging library dedicated to JSON logging.
|
||||||
|
//
|
||||||
|
// A global Logger can be use for simple logging:
|
||||||
|
//
|
||||||
|
// import "github.com/rs/zerolog/log"
|
||||||
|
//
|
||||||
|
// log.Info().Msg("hello world")
|
||||||
|
// // Output: {"time":1494567715,"level":"info","message":"hello world"}
|
||||||
|
//
|
||||||
|
// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log".
|
||||||
|
//
|
||||||
|
// Fields can be added to log messages:
|
||||||
|
//
|
||||||
|
// log.Info().Str("foo", "bar").Msg("hello world")
|
||||||
|
// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
|
||||||
|
//
|
||||||
|
// Create logger instance to manage different outputs:
|
||||||
|
//
|
||||||
|
// logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
|
||||||
|
// logger.Info().
|
||||||
|
// Str("foo", "bar").
|
||||||
|
// Msg("hello world")
|
||||||
|
// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
|
||||||
|
//
|
||||||
|
// Sub-loggers let you chain loggers with additional context:
|
||||||
|
//
|
||||||
|
// sublogger := log.With().Str("component": "foo").Logger()
|
||||||
|
// sublogger.Info().Msg("hello world")
|
||||||
|
// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
|
||||||
|
//
|
||||||
|
// Level logging
|
||||||
|
//
|
||||||
|
// zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||||
|
//
|
||||||
|
// log.Debug().Msg("filtered out message")
|
||||||
|
// log.Info().Msg("routed message")
|
||||||
|
//
|
||||||
|
// if e := log.Debug(); e.Enabled() {
|
||||||
|
// // Compute log output only if enabled.
|
||||||
|
// value := compute()
|
||||||
|
// e.Str("foo": value).Msg("some debug message")
|
||||||
|
// }
|
||||||
|
// // Output: {"level":"info","time":1494567715,"routed message"}
|
||||||
|
//
|
||||||
|
// Customize automatic field names:
|
||||||
|
//
|
||||||
|
// log.TimestampFieldName = "t"
|
||||||
|
// log.LevelFieldName = "p"
|
||||||
|
// log.MessageFieldName = "m"
|
||||||
|
//
|
||||||
|
// log.Info().Msg("hello world")
|
||||||
|
// // Output: {"t":1494567715,"p":"info","m":"hello world"}
|
||||||
|
//
|
||||||
|
// Log with no level and message:
|
||||||
|
//
|
||||||
|
// log.Log().Str("foo","bar").Msg("")
|
||||||
|
// // Output: {"time":1494567715,"foo":"bar"}
|
||||||
|
//
|
||||||
|
// Add contextual fields to global Logger:
|
||||||
|
//
|
||||||
|
// log.Logger = log.With().Str("foo", "bar").Logger()
|
||||||
|
//
|
||||||
|
// Sample logs:
|
||||||
|
//
|
||||||
|
// sampled := log.Sample(&zerolog.BasicSampler{N: 10})
|
||||||
|
// sampled.Info().Msg("will be logged every 10 messages")
|
||||||
|
//
|
||||||
|
// Log with contextual hooks:
|
||||||
|
//
|
||||||
|
// // Create the hook:
|
||||||
|
// type SeverityHook struct{}
|
||||||
|
//
|
||||||
|
// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
|
||||||
|
// if level != zerolog.NoLevel {
|
||||||
|
// e.Str("severity", level.String())
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // And use it:
|
||||||
|
// var h SeverityHook
|
||||||
|
// log := zerolog.New(os.Stdout).Hook(h)
|
||||||
|
// log.Warn().Msg("")
|
||||||
|
// // Output: {"level":"warn","severity":"warn"}
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Caveats
|
||||||
|
//
|
||||||
|
// There is no fields deduplication out-of-the-box.
|
||||||
|
// Using the same key multiple times creates new key in final JSON each time.
|
||||||
|
//
|
||||||
|
// logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
|
||||||
|
// logger.Info().
|
||||||
|
// Timestamp().
|
||||||
|
// Msg("dup")
|
||||||
|
// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"}
|
||||||
|
//
|
||||||
|
// In this case, many consumers will take the last value,
|
||||||
|
// but this is not guaranteed; check yours if in doubt.
|
||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"io/ioutil" |
||||||
|
"os" |
||||||
|
"strconv" |
||||||
|
) |
||||||
|
|
||||||
|
// Level defines log levels.
|
||||||
|
type Level int8 |
||||||
|
|
||||||
|
const ( |
||||||
|
// DebugLevel defines debug log level.
|
||||||
|
DebugLevel Level = iota |
||||||
|
// InfoLevel defines info log level.
|
||||||
|
InfoLevel |
||||||
|
// WarnLevel defines warn log level.
|
||||||
|
WarnLevel |
||||||
|
// ErrorLevel defines error log level.
|
||||||
|
ErrorLevel |
||||||
|
// FatalLevel defines fatal log level.
|
||||||
|
FatalLevel |
||||||
|
// PanicLevel defines panic log level.
|
||||||
|
PanicLevel |
||||||
|
// NoLevel defines an absent log level.
|
||||||
|
NoLevel |
||||||
|
// Disabled disables the logger.
|
||||||
|
Disabled |
||||||
|
|
||||||
|
// TraceLevel defines trace log level.
|
||||||
|
TraceLevel Level = -1 |
||||||
|
// Values less than TraceLevel are handled as numbers.
|
||||||
|
) |
||||||
|
|
||||||
|
func (l Level) String() string { |
||||||
|
switch l { |
||||||
|
case TraceLevel: |
||||||
|
return LevelTraceValue |
||||||
|
case DebugLevel: |
||||||
|
return LevelDebugValue |
||||||
|
case InfoLevel: |
||||||
|
return LevelInfoValue |
||||||
|
case WarnLevel: |
||||||
|
return LevelWarnValue |
||||||
|
case ErrorLevel: |
||||||
|
return LevelErrorValue |
||||||
|
case FatalLevel: |
||||||
|
return LevelFatalValue |
||||||
|
case PanicLevel: |
||||||
|
return LevelPanicValue |
||||||
|
case Disabled: |
||||||
|
return "disabled" |
||||||
|
case NoLevel: |
||||||
|
return "" |
||||||
|
} |
||||||
|
return strconv.Itoa(int(l)) |
||||||
|
} |
||||||
|
|
||||||
|
// ParseLevel converts a level string into a zerolog Level value.
|
||||||
|
// returns an error if the input string does not match known values.
|
||||||
|
func ParseLevel(levelStr string) (Level, error) { |
||||||
|
switch levelStr { |
||||||
|
case LevelFieldMarshalFunc(TraceLevel): |
||||||
|
return TraceLevel, nil |
||||||
|
case LevelFieldMarshalFunc(DebugLevel): |
||||||
|
return DebugLevel, nil |
||||||
|
case LevelFieldMarshalFunc(InfoLevel): |
||||||
|
return InfoLevel, nil |
||||||
|
case LevelFieldMarshalFunc(WarnLevel): |
||||||
|
return WarnLevel, nil |
||||||
|
case LevelFieldMarshalFunc(ErrorLevel): |
||||||
|
return ErrorLevel, nil |
||||||
|
case LevelFieldMarshalFunc(FatalLevel): |
||||||
|
return FatalLevel, nil |
||||||
|
case LevelFieldMarshalFunc(PanicLevel): |
||||||
|
return PanicLevel, nil |
||||||
|
case LevelFieldMarshalFunc(Disabled): |
||||||
|
return Disabled, nil |
||||||
|
case LevelFieldMarshalFunc(NoLevel): |
||||||
|
return NoLevel, nil |
||||||
|
} |
||||||
|
i, err := strconv.Atoi(levelStr) |
||||||
|
if err != nil { |
||||||
|
return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) |
||||||
|
} |
||||||
|
if i > 127 || i < -128 { |
||||||
|
return NoLevel, fmt.Errorf("Out-Of-Bounds Level: '%d', defaulting to NoLevel", i) |
||||||
|
} |
||||||
|
return Level(i), nil |
||||||
|
} |
||||||
|
|
||||||
|
// A Logger represents an active logging object that generates lines
|
||||||
|
// of JSON output to an io.Writer. Each logging operation makes a single
|
||||||
|
// call to the Writer's Write method. There is no guarantee on access
|
||||||
|
// serialization to the Writer. If your Writer is not thread safe,
|
||||||
|
// you may consider a sync wrapper.
|
||||||
|
type Logger struct { |
||||||
|
w LevelWriter |
||||||
|
level Level |
||||||
|
sampler Sampler |
||||||
|
context []byte |
||||||
|
hooks []Hook |
||||||
|
stack bool |
||||||
|
} |
||||||
|
|
||||||
|
// New creates a root logger with given output writer. If the output writer implements
|
||||||
|
// the LevelWriter interface, the WriteLevel method will be called instead of the Write
|
||||||
|
// one.
|
||||||
|
//
|
||||||
|
// Each logging operation makes a single call to the Writer's Write method. There is no
|
||||||
|
// guarantee on access serialization to the Writer. If your Writer is not thread safe,
|
||||||
|
// you may consider using sync wrapper.
|
||||||
|
func New(w io.Writer) Logger { |
||||||
|
if w == nil { |
||||||
|
w = ioutil.Discard |
||||||
|
} |
||||||
|
lw, ok := w.(LevelWriter) |
||||||
|
if !ok { |
||||||
|
lw = levelWriterAdapter{w} |
||||||
|
} |
||||||
|
return Logger{w: lw, level: TraceLevel} |
||||||
|
} |
||||||
|
|
||||||
|
// Nop returns a disabled logger for which all operation are no-op.
|
||||||
|
func Nop() Logger { |
||||||
|
return New(nil).Level(Disabled) |
||||||
|
} |
||||||
|
|
||||||
|
// Output duplicates the current logger and sets w as its output.
|
||||||
|
func (l Logger) Output(w io.Writer) Logger { |
||||||
|
l2 := New(w) |
||||||
|
l2.level = l.level |
||||||
|
l2.sampler = l.sampler |
||||||
|
l2.stack = l.stack |
||||||
|
if len(l.hooks) > 0 { |
||||||
|
l2.hooks = append(l2.hooks, l.hooks...) |
||||||
|
} |
||||||
|
if l.context != nil { |
||||||
|
l2.context = make([]byte, len(l.context), cap(l.context)) |
||||||
|
copy(l2.context, l.context) |
||||||
|
} |
||||||
|
return l2 |
||||||
|
} |
||||||
|
|
||||||
|
// With creates a child logger with the field added to its context.
|
||||||
|
func (l Logger) With() Context { |
||||||
|
context := l.context |
||||||
|
l.context = make([]byte, 0, 500) |
||||||
|
if context != nil { |
||||||
|
l.context = append(l.context, context...) |
||||||
|
} else { |
||||||
|
// This is needed for AppendKey to not check len of input
|
||||||
|
// thus making it inlinable
|
||||||
|
l.context = enc.AppendBeginMarker(l.context) |
||||||
|
} |
||||||
|
return Context{l} |
||||||
|
} |
||||||
|
|
||||||
|
// UpdateContext updates the internal logger's context.
|
||||||
|
//
|
||||||
|
// Use this method with caution. If unsure, prefer the With method.
|
||||||
|
func (l *Logger) UpdateContext(update func(c Context) Context) { |
||||||
|
if l == disabledLogger { |
||||||
|
return |
||||||
|
} |
||||||
|
if cap(l.context) == 0 { |
||||||
|
l.context = make([]byte, 0, 500) |
||||||
|
} |
||||||
|
if len(l.context) == 0 { |
||||||
|
l.context = enc.AppendBeginMarker(l.context) |
||||||
|
} |
||||||
|
c := update(Context{*l}) |
||||||
|
l.context = c.l.context |
||||||
|
} |
||||||
|
|
||||||
|
// Level creates a child logger with the minimum accepted level set to level.
|
||||||
|
func (l Logger) Level(lvl Level) Logger { |
||||||
|
l.level = lvl |
||||||
|
return l |
||||||
|
} |
||||||
|
|
||||||
|
// GetLevel returns the current Level of l.
|
||||||
|
func (l Logger) GetLevel() Level { |
||||||
|
return l.level |
||||||
|
} |
||||||
|
|
||||||
|
// Sample returns a logger with the s sampler.
|
||||||
|
func (l Logger) Sample(s Sampler) Logger { |
||||||
|
l.sampler = s |
||||||
|
return l |
||||||
|
} |
||||||
|
|
||||||
|
// Hook returns a logger with the h Hook.
|
||||||
|
func (l Logger) Hook(h Hook) Logger { |
||||||
|
l.hooks = append(l.hooks, h) |
||||||
|
return l |
||||||
|
} |
||||||
|
|
||||||
|
// Trace starts a new message with trace level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Trace() *Event { |
||||||
|
return l.newEvent(TraceLevel, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// Debug starts a new message with debug level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Debug() *Event { |
||||||
|
return l.newEvent(DebugLevel, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// Info starts a new message with info level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Info() *Event { |
||||||
|
return l.newEvent(InfoLevel, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// Warn starts a new message with warn level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Warn() *Event { |
||||||
|
return l.newEvent(WarnLevel, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// Error starts a new message with error level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Error() *Event { |
||||||
|
return l.newEvent(ErrorLevel, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// Err starts a new message with error level with err as a field if not nil or
|
||||||
|
// with info level if err is nil.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Err(err error) *Event { |
||||||
|
if err != nil { |
||||||
|
return l.Error().Err(err) |
||||||
|
} |
||||||
|
|
||||||
|
return l.Info() |
||||||
|
} |
||||||
|
|
||||||
|
// Fatal starts a new message with fatal level. The os.Exit(1) function
|
||||||
|
// is called by the Msg method, which terminates the program immediately.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Fatal() *Event { |
||||||
|
return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) |
||||||
|
} |
||||||
|
|
||||||
|
// Panic starts a new message with panic level. The panic() function
|
||||||
|
// is called by the Msg method, which stops the ordinary flow of a goroutine.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Panic() *Event { |
||||||
|
return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) |
||||||
|
} |
||||||
|
|
||||||
|
// WithLevel starts a new message with level. Unlike Fatal and Panic
|
||||||
|
// methods, WithLevel does not terminate the program or stop the ordinary
|
||||||
|
// flow of a gourotine when used with their respective levels.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) WithLevel(level Level) *Event { |
||||||
|
switch level { |
||||||
|
case TraceLevel: |
||||||
|
return l.Trace() |
||||||
|
case DebugLevel: |
||||||
|
return l.Debug() |
||||||
|
case InfoLevel: |
||||||
|
return l.Info() |
||||||
|
case WarnLevel: |
||||||
|
return l.Warn() |
||||||
|
case ErrorLevel: |
||||||
|
return l.Error() |
||||||
|
case FatalLevel: |
||||||
|
return l.newEvent(FatalLevel, nil) |
||||||
|
case PanicLevel: |
||||||
|
return l.newEvent(PanicLevel, nil) |
||||||
|
case NoLevel: |
||||||
|
return l.Log() |
||||||
|
case Disabled: |
||||||
|
return nil |
||||||
|
default: |
||||||
|
return l.newEvent(level, nil) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Log starts a new message with no level. Setting GlobalLevel to Disabled
|
||||||
|
// will still disable events produced by this method.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func (l *Logger) Log() *Event { |
||||||
|
return l.newEvent(NoLevel, nil) |
||||||
|
} |
||||||
|
|
||||||
|
// Print sends a log event using debug level and no extra field.
|
||||||
|
// Arguments are handled in the manner of fmt.Print.
|
||||||
|
func (l *Logger) Print(v ...interface{}) { |
||||||
|
if e := l.Debug(); e.Enabled() { |
||||||
|
e.CallerSkipFrame(1).Msg(fmt.Sprint(v...)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Printf sends a log event using debug level and no extra field.
|
||||||
|
// Arguments are handled in the manner of fmt.Printf.
|
||||||
|
func (l *Logger) Printf(format string, v ...interface{}) { |
||||||
|
if e := l.Debug(); e.Enabled() { |
||||||
|
e.CallerSkipFrame(1).Msg(fmt.Sprintf(format, v...)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Write implements the io.Writer interface. This is useful to set as a writer
|
||||||
|
// for the standard library log.
|
||||||
|
func (l Logger) Write(p []byte) (n int, err error) { |
||||||
|
n = len(p) |
||||||
|
if n > 0 && p[n-1] == '\n' { |
||||||
|
// Trim CR added by stdlog.
|
||||||
|
p = p[0 : n-1] |
||||||
|
} |
||||||
|
l.Log().CallerSkipFrame(1).Msg(string(p)) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func (l *Logger) newEvent(level Level, done func(string)) *Event { |
||||||
|
enabled := l.should(level) |
||||||
|
if !enabled { |
||||||
|
return nil |
||||||
|
} |
||||||
|
e := newEvent(l.w, level) |
||||||
|
e.done = done |
||||||
|
e.ch = l.hooks |
||||||
|
if level != NoLevel && LevelFieldName != "" { |
||||||
|
e.Str(LevelFieldName, LevelFieldMarshalFunc(level)) |
||||||
|
} |
||||||
|
if l.context != nil && len(l.context) > 1 { |
||||||
|
e.buf = enc.AppendObjectData(e.buf, l.context) |
||||||
|
} |
||||||
|
if l.stack { |
||||||
|
e.Stack() |
||||||
|
} |
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
// should returns true if the log event should be logged.
|
||||||
|
func (l *Logger) should(lvl Level) bool { |
||||||
|
if lvl < l.level || lvl < GlobalLevel() { |
||||||
|
return false |
||||||
|
} |
||||||
|
if l.sampler != nil && !samplingDisabled() { |
||||||
|
return l.sampler.Sample(lvl) |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
@ -0,0 +1,131 @@ |
|||||||
|
// Package log provides a global logger for zerolog.
|
||||||
|
package log |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
|
||||||
|
"github.com/rs/zerolog" |
||||||
|
) |
||||||
|
|
||||||
|
// Logger is the global logger.
|
||||||
|
var Logger = zerolog.New(os.Stderr).With().Timestamp().Logger() |
||||||
|
|
||||||
|
// Output duplicates the global logger and sets w as its output.
|
||||||
|
func Output(w io.Writer) zerolog.Logger { |
||||||
|
return Logger.Output(w) |
||||||
|
} |
||||||
|
|
||||||
|
// With creates a child logger with the field added to its context.
|
||||||
|
func With() zerolog.Context { |
||||||
|
return Logger.With() |
||||||
|
} |
||||||
|
|
||||||
|
// Level creates a child logger with the minimum accepted level set to level.
|
||||||
|
func Level(level zerolog.Level) zerolog.Logger { |
||||||
|
return Logger.Level(level) |
||||||
|
} |
||||||
|
|
||||||
|
// Sample returns a logger with the s sampler.
|
||||||
|
func Sample(s zerolog.Sampler) zerolog.Logger { |
||||||
|
return Logger.Sample(s) |
||||||
|
} |
||||||
|
|
||||||
|
// Hook returns a logger with the h Hook.
|
||||||
|
func Hook(h zerolog.Hook) zerolog.Logger { |
||||||
|
return Logger.Hook(h) |
||||||
|
} |
||||||
|
|
||||||
|
// Err starts a new message with error level with err as a field if not nil or
|
||||||
|
// with info level if err is nil.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Err(err error) *zerolog.Event { |
||||||
|
return Logger.Err(err) |
||||||
|
} |
||||||
|
|
||||||
|
// Trace starts a new message with trace level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Trace() *zerolog.Event { |
||||||
|
return Logger.Trace() |
||||||
|
} |
||||||
|
|
||||||
|
// Debug starts a new message with debug level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Debug() *zerolog.Event { |
||||||
|
return Logger.Debug() |
||||||
|
} |
||||||
|
|
||||||
|
// Info starts a new message with info level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Info() *zerolog.Event { |
||||||
|
return Logger.Info() |
||||||
|
} |
||||||
|
|
||||||
|
// Warn starts a new message with warn level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Warn() *zerolog.Event { |
||||||
|
return Logger.Warn() |
||||||
|
} |
||||||
|
|
||||||
|
// Error starts a new message with error level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Error() *zerolog.Event { |
||||||
|
return Logger.Error() |
||||||
|
} |
||||||
|
|
||||||
|
// Fatal starts a new message with fatal level. The os.Exit(1) function
|
||||||
|
// is called by the Msg method.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Fatal() *zerolog.Event { |
||||||
|
return Logger.Fatal() |
||||||
|
} |
||||||
|
|
||||||
|
// Panic starts a new message with panic level. The message is also sent
|
||||||
|
// to the panic function.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Panic() *zerolog.Event { |
||||||
|
return Logger.Panic() |
||||||
|
} |
||||||
|
|
||||||
|
// WithLevel starts a new message with level.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func WithLevel(level zerolog.Level) *zerolog.Event { |
||||||
|
return Logger.WithLevel(level) |
||||||
|
} |
||||||
|
|
||||||
|
// Log starts a new message with no level. Setting zerolog.GlobalLevel to
|
||||||
|
// zerolog.Disabled will still disable events produced by this method.
|
||||||
|
//
|
||||||
|
// You must call Msg on the returned event in order to send the event.
|
||||||
|
func Log() *zerolog.Event { |
||||||
|
return Logger.Log() |
||||||
|
} |
||||||
|
|
||||||
|
// Print sends a log event using debug level and no extra field.
|
||||||
|
// Arguments are handled in the manner of fmt.Print.
|
||||||
|
func Print(v ...interface{}) { |
||||||
|
Logger.Debug().CallerSkipFrame(1).Msg(fmt.Sprint(v...)) |
||||||
|
} |
||||||
|
|
||||||
|
// Printf sends a log event using debug level and no extra field.
|
||||||
|
// Arguments are handled in the manner of fmt.Printf.
|
||||||
|
func Printf(format string, v ...interface{}) { |
||||||
|
Logger.Debug().CallerSkipFrame(1).Msgf(format, v...) |
||||||
|
} |
||||||
|
|
||||||
|
// Ctx returns the Logger associated with the ctx. If no logger
|
||||||
|
// is associated, a disabled logger is returned.
|
||||||
|
func Ctx(ctx context.Context) *zerolog.Logger { |
||||||
|
return zerolog.Ctx(ctx) |
||||||
|
} |
@ -0,0 +1,5 @@ |
|||||||
|
// +build !go1.12
|
||||||
|
|
||||||
|
package zerolog |
||||||
|
|
||||||
|
const contextCallerSkipFrameCount = 3 |
After Width: | Height: | Size: 82 KiB |
@ -0,0 +1,134 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"math/rand" |
||||||
|
"sync/atomic" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
// Often samples log every ~ 10 events.
|
||||||
|
Often = RandomSampler(10) |
||||||
|
// Sometimes samples log every ~ 100 events.
|
||||||
|
Sometimes = RandomSampler(100) |
||||||
|
// Rarely samples log every ~ 1000 events.
|
||||||
|
Rarely = RandomSampler(1000) |
||||||
|
) |
||||||
|
|
||||||
|
// Sampler defines an interface to a log sampler.
|
||||||
|
type Sampler interface { |
||||||
|
// Sample returns true if the event should be part of the sample, false if
|
||||||
|
// the event should be dropped.
|
||||||
|
Sample(lvl Level) bool |
||||||
|
} |
||||||
|
|
||||||
|
// RandomSampler use a PRNG to randomly sample an event out of N events,
|
||||||
|
// regardless of their level.
|
||||||
|
type RandomSampler uint32 |
||||||
|
|
||||||
|
// Sample implements the Sampler interface.
|
||||||
|
func (s RandomSampler) Sample(lvl Level) bool { |
||||||
|
if s <= 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
if rand.Intn(int(s)) != 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// BasicSampler is a sampler that will send every Nth events, regardless of
|
||||||
|
// their level.
|
||||||
|
type BasicSampler struct { |
||||||
|
N uint32 |
||||||
|
counter uint32 |
||||||
|
} |
||||||
|
|
||||||
|
// Sample implements the Sampler interface.
|
||||||
|
func (s *BasicSampler) Sample(lvl Level) bool { |
||||||
|
n := s.N |
||||||
|
if n == 1 { |
||||||
|
return true |
||||||
|
} |
||||||
|
c := atomic.AddUint32(&s.counter, 1) |
||||||
|
return c%n == 1 |
||||||
|
} |
||||||
|
|
||||||
|
// BurstSampler lets Burst events pass per Period then pass the decision to
|
||||||
|
// NextSampler. If Sampler is not set, all subsequent events are rejected.
|
||||||
|
type BurstSampler struct { |
||||||
|
// Burst is the maximum number of event per period allowed before calling
|
||||||
|
// NextSampler.
|
||||||
|
Burst uint32 |
||||||
|
// Period defines the burst period. If 0, NextSampler is always called.
|
||||||
|
Period time.Duration |
||||||
|
// NextSampler is the sampler used after the burst is reached. If nil,
|
||||||
|
// events are always rejected after the burst.
|
||||||
|
NextSampler Sampler |
||||||
|
|
||||||
|
counter uint32 |
||||||
|
resetAt int64 |
||||||
|
} |
||||||
|
|
||||||
|
// Sample implements the Sampler interface.
|
||||||
|
func (s *BurstSampler) Sample(lvl Level) bool { |
||||||
|
if s.Burst > 0 && s.Period > 0 { |
||||||
|
if s.inc() <= s.Burst { |
||||||
|
return true |
||||||
|
} |
||||||
|
} |
||||||
|
if s.NextSampler == nil { |
||||||
|
return false |
||||||
|
} |
||||||
|
return s.NextSampler.Sample(lvl) |
||||||
|
} |
||||||
|
|
||||||
|
func (s *BurstSampler) inc() uint32 { |
||||||
|
now := time.Now().UnixNano() |
||||||
|
resetAt := atomic.LoadInt64(&s.resetAt) |
||||||
|
var c uint32 |
||||||
|
if now > resetAt { |
||||||
|
c = 1 |
||||||
|
atomic.StoreUint32(&s.counter, c) |
||||||
|
newResetAt := now + s.Period.Nanoseconds() |
||||||
|
reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) |
||||||
|
if !reset { |
||||||
|
// Lost the race with another goroutine trying to reset.
|
||||||
|
c = atomic.AddUint32(&s.counter, 1) |
||||||
|
} |
||||||
|
} else { |
||||||
|
c = atomic.AddUint32(&s.counter, 1) |
||||||
|
} |
||||||
|
return c |
||||||
|
} |
||||||
|
|
||||||
|
// LevelSampler applies a different sampler for each level.
|
||||||
|
type LevelSampler struct { |
||||||
|
TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler |
||||||
|
} |
||||||
|
|
||||||
|
func (s LevelSampler) Sample(lvl Level) bool { |
||||||
|
switch lvl { |
||||||
|
case TraceLevel: |
||||||
|
if s.TraceSampler != nil { |
||||||
|
return s.TraceSampler.Sample(lvl) |
||||||
|
} |
||||||
|
case DebugLevel: |
||||||
|
if s.DebugSampler != nil { |
||||||
|
return s.DebugSampler.Sample(lvl) |
||||||
|
} |
||||||
|
case InfoLevel: |
||||||
|
if s.InfoSampler != nil { |
||||||
|
return s.InfoSampler.Sample(lvl) |
||||||
|
} |
||||||
|
case WarnLevel: |
||||||
|
if s.WarnSampler != nil { |
||||||
|
return s.WarnSampler.Sample(lvl) |
||||||
|
} |
||||||
|
case ErrorLevel: |
||||||
|
if s.ErrorSampler != nil { |
||||||
|
return s.ErrorSampler.Sample(lvl) |
||||||
|
} |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
@ -0,0 +1,80 @@ |
|||||||
|
// +build !windows
|
||||||
|
// +build !binary_log
|
||||||
|
|
||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"io" |
||||||
|
) |
||||||
|
|
||||||
|
// See http://cee.mitre.org/language/1.0-beta1/clt.html#syslog
|
||||||
|
// or https://www.rsyslog.com/json-elasticsearch/
|
||||||
|
const ceePrefix = "@cee:" |
||||||
|
|
||||||
|
// SyslogWriter is an interface matching a syslog.Writer struct.
|
||||||
|
type SyslogWriter interface { |
||||||
|
io.Writer |
||||||
|
Debug(m string) error |
||||||
|
Info(m string) error |
||||||
|
Warning(m string) error |
||||||
|
Err(m string) error |
||||||
|
Emerg(m string) error |
||||||
|
Crit(m string) error |
||||||
|
} |
||||||
|
|
||||||
|
type syslogWriter struct { |
||||||
|
w SyslogWriter |
||||||
|
prefix string |
||||||
|
} |
||||||
|
|
||||||
|
// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level
|
||||||
|
// method matching the zerolog level.
|
||||||
|
func SyslogLevelWriter(w SyslogWriter) LevelWriter { |
||||||
|
return syslogWriter{w, ""} |
||||||
|
} |
||||||
|
|
||||||
|
// SyslogCEEWriter wraps a SyslogWriter with a SyslogLevelWriter that adds a
|
||||||
|
// MITRE CEE prefix for JSON syslog entries, compatible with rsyslog
|
||||||
|
// and syslog-ng JSON logging support.
|
||||||
|
// See https://www.rsyslog.com/json-elasticsearch/
|
||||||
|
func SyslogCEEWriter(w SyslogWriter) LevelWriter { |
||||||
|
return syslogWriter{w, ceePrefix} |
||||||
|
} |
||||||
|
|
||||||
|
func (sw syslogWriter) Write(p []byte) (n int, err error) { |
||||||
|
var pn int |
||||||
|
if sw.prefix != "" { |
||||||
|
pn, err = sw.w.Write([]byte(sw.prefix)) |
||||||
|
if err != nil { |
||||||
|
return pn, err |
||||||
|
} |
||||||
|
} |
||||||
|
n, err = sw.w.Write(p) |
||||||
|
return pn + n, err |
||||||
|
} |
||||||
|
|
||||||
|
// WriteLevel implements LevelWriter interface.
|
||||||
|
func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { |
||||||
|
switch level { |
||||||
|
case TraceLevel: |
||||||
|
case DebugLevel: |
||||||
|
err = sw.w.Debug(sw.prefix + string(p)) |
||||||
|
case InfoLevel: |
||||||
|
err = sw.w.Info(sw.prefix + string(p)) |
||||||
|
case WarnLevel: |
||||||
|
err = sw.w.Warning(sw.prefix + string(p)) |
||||||
|
case ErrorLevel: |
||||||
|
err = sw.w.Err(sw.prefix + string(p)) |
||||||
|
case FatalLevel: |
||||||
|
err = sw.w.Emerg(sw.prefix + string(p)) |
||||||
|
case PanicLevel: |
||||||
|
err = sw.w.Crit(sw.prefix + string(p)) |
||||||
|
case NoLevel: |
||||||
|
err = sw.w.Info(sw.prefix + string(p)) |
||||||
|
default: |
||||||
|
panic("invalid level") |
||||||
|
} |
||||||
|
// Any CEE prefix is not part of the message, so we don't include its length
|
||||||
|
n = len(p) |
||||||
|
return |
||||||
|
} |
@ -0,0 +1,154 @@ |
|||||||
|
package zerolog |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"io" |
||||||
|
"path" |
||||||
|
"runtime" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
) |
||||||
|
|
||||||
|
// LevelWriter defines as interface a writer may implement in order
|
||||||
|
// to receive level information with payload.
|
||||||
|
type LevelWriter interface { |
||||||
|
io.Writer |
||||||
|
WriteLevel(level Level, p []byte) (n int, err error) |
||||||
|
} |
||||||
|
|
||||||
|
type levelWriterAdapter struct { |
||||||
|
io.Writer |
||||||
|
} |
||||||
|
|
||||||
|
func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { |
||||||
|
return lw.Write(p) |
||||||
|
} |
||||||
|
|
||||||
|
type syncWriter struct { |
||||||
|
mu sync.Mutex |
||||||
|
lw LevelWriter |
||||||
|
} |
||||||
|
|
||||||
|
// SyncWriter wraps w so that each call to Write is synchronized with a mutex.
|
||||||
|
// This syncer can be used to wrap the call to writer's Write method if it is
|
||||||
|
// not thread safe. Note that you do not need this wrapper for os.File Write
|
||||||
|
// operations on POSIX and Windows systems as they are already thread-safe.
|
||||||
|
func SyncWriter(w io.Writer) io.Writer { |
||||||
|
if lw, ok := w.(LevelWriter); ok { |
||||||
|
return &syncWriter{lw: lw} |
||||||
|
} |
||||||
|
return &syncWriter{lw: levelWriterAdapter{w}} |
||||||
|
} |
||||||
|
|
||||||
|
// Write implements the io.Writer interface.
|
||||||
|
func (s *syncWriter) Write(p []byte) (n int, err error) { |
||||||
|
s.mu.Lock() |
||||||
|
defer s.mu.Unlock() |
||||||
|
return s.lw.Write(p) |
||||||
|
} |
||||||
|
|
||||||
|
// WriteLevel implements the LevelWriter interface.
|
||||||
|
func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { |
||||||
|
s.mu.Lock() |
||||||
|
defer s.mu.Unlock() |
||||||
|
return s.lw.WriteLevel(l, p) |
||||||
|
} |
||||||
|
|
||||||
|
type multiLevelWriter struct { |
||||||
|
writers []LevelWriter |
||||||
|
} |
||||||
|
|
||||||
|
func (t multiLevelWriter) Write(p []byte) (n int, err error) { |
||||||
|
for _, w := range t.writers { |
||||||
|
if _n, _err := w.Write(p); err == nil { |
||||||
|
n = _n |
||||||
|
if _err != nil { |
||||||
|
err = _err |
||||||
|
} else if _n != len(p) { |
||||||
|
err = io.ErrShortWrite |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { |
||||||
|
for _, w := range t.writers { |
||||||
|
if _n, _err := w.WriteLevel(l, p); err == nil { |
||||||
|
n = _n |
||||||
|
if _err != nil { |
||||||
|
err = _err |
||||||
|
} else if _n != len(p) { |
||||||
|
err = io.ErrShortWrite |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// MultiLevelWriter creates a writer that duplicates its writes to all the
|
||||||
|
// provided writers, similar to the Unix tee(1) command. If some writers
|
||||||
|
// implement LevelWriter, their WriteLevel method will be used instead of Write.
|
||||||
|
func MultiLevelWriter(writers ...io.Writer) LevelWriter { |
||||||
|
lwriters := make([]LevelWriter, 0, len(writers)) |
||||||
|
for _, w := range writers { |
||||||
|
if lw, ok := w.(LevelWriter); ok { |
||||||
|
lwriters = append(lwriters, lw) |
||||||
|
} else { |
||||||
|
lwriters = append(lwriters, levelWriterAdapter{w}) |
||||||
|
} |
||||||
|
} |
||||||
|
return multiLevelWriter{lwriters} |
||||||
|
} |
||||||
|
|
||||||
|
// TestingLog is the logging interface of testing.TB.
|
||||||
|
type TestingLog interface { |
||||||
|
Log(args ...interface{}) |
||||||
|
Logf(format string, args ...interface{}) |
||||||
|
Helper() |
||||||
|
} |
||||||
|
|
||||||
|
// TestWriter is a writer that writes to testing.TB.
|
||||||
|
type TestWriter struct { |
||||||
|
T TestingLog |
||||||
|
|
||||||
|
// Frame skips caller frames to capture the original file and line numbers.
|
||||||
|
Frame int |
||||||
|
} |
||||||
|
|
||||||
|
// NewTestWriter creates a writer that logs to the testing.TB.
|
||||||
|
func NewTestWriter(t TestingLog) TestWriter { |
||||||
|
return TestWriter{T: t} |
||||||
|
} |
||||||
|
|
||||||
|
// Write to testing.TB.
|
||||||
|
func (t TestWriter) Write(p []byte) (n int, err error) { |
||||||
|
t.T.Helper() |
||||||
|
|
||||||
|
n = len(p) |
||||||
|
|
||||||
|
// Strip trailing newline because t.Log always adds one.
|
||||||
|
p = bytes.TrimRight(p, "\n") |
||||||
|
|
||||||
|
// Try to correct the log file and line number to the caller.
|
||||||
|
if t.Frame > 0 { |
||||||
|
_, origFile, origLine, _ := runtime.Caller(1) |
||||||
|
_, frameFile, frameLine, ok := runtime.Caller(1 + t.Frame) |
||||||
|
if ok { |
||||||
|
erase := strings.Repeat("\b", len(path.Base(origFile))+len(strconv.Itoa(origLine))+3) |
||||||
|
t.T.Logf("%s%s:%d: %s", erase, path.Base(frameFile), frameLine, p) |
||||||
|
return n, err |
||||||
|
} |
||||||
|
} |
||||||
|
t.T.Log(string(p)) |
||||||
|
|
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// ConsoleTestWriter creates an option that correctly sets the file frame depth for testing.TB log.
|
||||||
|
func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { |
||||||
|
return func(w *ConsoleWriter) { |
||||||
|
w.Out = TestWriter{T: t, Frame: 6} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,34 @@ |
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects) |
||||||
|
*.o |
||||||
|
*.a |
||||||
|
*.so |
||||||
|
|
||||||
|
# Folders |
||||||
|
_obj |
||||||
|
_test |
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes |
||||||
|
*.[568vq] |
||||||
|
[568vq].out |
||||||
|
|
||||||
|
*.cgo1.go |
||||||
|
*.cgo2.c |
||||||
|
_cgo_defun.c |
||||||
|
_cgo_gotypes.go |
||||||
|
_cgo_export.* |
||||||
|
|
||||||
|
_testmain.go |
||||||
|
|
||||||
|
*.exe |
||||||
|
*.test |
||||||
|
*.prof |
||||||
|
|
||||||
|
.idea |
||||||
|
.DS_Store |
||||||
|
coverage.txt |
||||||
|
|
||||||
|
# Terraform artifacts |
||||||
|
*.zip |
||||||
|
.terraform* |
||||||
|
terraform* |
||||||
|
/examples/awslambdaechobot/awslambdaechobot |
@ -0,0 +1,13 @@ |
|||||||
|
language: go |
||||||
|
|
||||||
|
go: |
||||||
|
- 1.13.x |
||||||
|
|
||||||
|
install: |
||||||
|
- go get -t -v |
||||||
|
|
||||||
|
script: |
||||||
|
- go test -coverprofile=coverage.txt -covermode=atomic |
||||||
|
|
||||||
|
after_success: |
||||||
|
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then bash <(curl -s https://codecov.io/bash); fi |
@ -0,0 +1,22 @@ |
|||||||
|
The MIT License (MIT) |
||||||
|
|
||||||
|
Copyright (c) 2015 llya Kowalewski |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all |
||||||
|
copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||||
|
SOFTWARE. |
||||||
|
|
@ -0,0 +1,474 @@ |
|||||||
|
# Telebot |
||||||
|
>"I never knew creating Telegram bots could be so _sexy_!" |
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/gopkg.in/tucnak/telebot.v2?status.svg)](https://godoc.org/gopkg.in/tucnak/telebot.v2) |
||||||
|
[![Travis](https://travis-ci.org/tucnak/telebot.svg?branch=v2)](https://travis-ci.org/tucnak/telebot) |
||||||
|
[![codecov.io](https://codecov.io/gh/tucnak/telebot/coverage.svg?branch=develop)](https://codecov.io/gh/tucnak/telebot) |
||||||
|
[![Discuss on Telegram](https://img.shields.io/badge/telegram-discuss-0088cc.svg)](https://t.me/go_telebot) |
||||||
|
|
||||||
|
```bash |
||||||
|
go get -u gopkg.in/tucnak/telebot.v2 |
||||||
|
``` |
||||||
|
|
||||||
|
* [Overview](#overview) |
||||||
|
* [Getting Started](#getting-started) |
||||||
|
- [Poller](#poller) |
||||||
|
- [Commands](#commands) |
||||||
|
- [Files](#files) |
||||||
|
- [Sendable](#sendable) |
||||||
|
- [Editable](#editable) |
||||||
|
- [Keyboards](#keyboards) |
||||||
|
- [Inline mode](#inline-mode) |
||||||
|
* [Contributing](#contributing) |
||||||
|
* [Donate](#donate) |
||||||
|
* [License](#license) |
||||||
|
|
||||||
|
# Overview |
||||||
|
Telebot is a bot framework for [Telegram Bot API](https://core.telegram.org/bots/api). |
||||||
|
This package provides the best of its kind API for command routing, inline query requests and keyboards, as well |
||||||
|
as callbacks. Actually, I went a couple steps further, so instead of making a 1:1 API wrapper I chose to focus on |
||||||
|
the beauty of API and performance. Some of the strong sides of telebot are: |
||||||
|
|
||||||
|
* Real concise API |
||||||
|
* Command routing |
||||||
|
* Middleware |
||||||
|
* Transparent File API |
||||||
|
* Effortless bot callbacks |
||||||
|
|
||||||
|
All the methods of telebot API are _extremely_ easy to memorize and get used to. Also, consider Telebot a |
||||||
|
highload-ready solution. I'll test and benchmark the most popular actions and if necessary, optimize |
||||||
|
against them without sacrificing API quality. |
||||||
|
|
||||||
|
# Getting Started |
||||||
|
Let's take a look at the minimal telebot setup: |
||||||
|
```go |
||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"log" |
||||||
|
"time" |
||||||
|
|
||||||
|
tb "gopkg.in/tucnak/telebot.v2" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
b, err := tb.NewBot(tb.Settings{ |
||||||
|
// You can also set custom API URL. |
||||||
|
// If field is empty it equals to "https://api.telegram.org". |
||||||
|
URL: "http://195.129.111.17:8012", |
||||||
|
|
||||||
|
Token: "TOKEN_HERE", |
||||||
|
Poller: &tb.LongPoller{Timeout: 10 * time.Second}, |
||||||
|
}) |
||||||
|
|
||||||
|
if err != nil { |
||||||
|
log.Fatal(err) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
b.Handle("/hello", func(m *tb.Message) { |
||||||
|
b.Send(m.Sender, "Hello World!") |
||||||
|
}) |
||||||
|
|
||||||
|
b.Start() |
||||||
|
} |
||||||
|
|
||||||
|
``` |
||||||
|
|
||||||
|
Simple, innit? Telebot's routing system takes care of delivering updates |
||||||
|
to their endpoints, so in order to get to handle any meaningful event, |
||||||
|
all you got to do is just plug your function to one of the Telebot-provided |
||||||
|
endpoints. You can find the full list |
||||||
|
[here](https://godoc.org/gopkg.in/tucnak/telebot.v2#pkg-constants). |
||||||
|
|
||||||
|
```go |
||||||
|
b, _ := tb.NewBot(settings) |
||||||
|
|
||||||
|
b.Handle(tb.OnText, func(m *tb.Message) { |
||||||
|
// all the text messages that weren't |
||||||
|
// captured by existing handlers |
||||||
|
}) |
||||||
|
|
||||||
|
b.Handle(tb.OnPhoto, func(m *tb.Message) { |
||||||
|
// photos only |
||||||
|
}) |
||||||
|
|
||||||
|
b.Handle(tb.OnChannelPost, func (m *tb.Message) { |
||||||
|
// channel posts only |
||||||
|
}) |
||||||
|
|
||||||
|
b.Handle(tb.OnQuery, func (q *tb.Query) { |
||||||
|
// incoming inline queries |
||||||
|
}) |
||||||
|
``` |
||||||
|
|
||||||
|
There's dozens of supported endpoints (see package consts). Let me know |
||||||
|
if you'd like to see some endpoint or endpoint idea implemented. This system |
||||||
|
is completely extensible, so I can introduce them without breaking |
||||||
|
backwards-compatibility. |
||||||
|
|
||||||
|
## Poller |
||||||
|
Telebot doesn't really care how you provide it with incoming updates, as long |
||||||
|
as you set it up with a Poller, or call ProcessUpdate for each update (see |
||||||
|
[examples/awslambdaechobot](examples/awslambdaechobot)): |
||||||
|
|
||||||
|
```go |
||||||
|
// Poller is a provider of Updates. |
||||||
|
// |
||||||
|
// All pollers must implement Poll(), which accepts bot |
||||||
|
// pointer and subscription channel and start polling |
||||||
|
// synchronously straight away. |
||||||
|
type Poller interface { |
||||||
|
// Poll is supposed to take the bot object |
||||||
|
// subscription channel and start polling |
||||||
|
// for Updates immediately. |
||||||
|
// |
||||||
|
// Poller must listen for stop constantly and close |
||||||
|
// it as soon as it's done polling. |
||||||
|
Poll(b *Bot, updates chan Update, stop chan struct{}) |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
Telegram Bot API supports long polling and webhook integration. Poller means you |
||||||
|
can plug telebot into whatever existing bot infrastructure (load balancers?) you |
||||||
|
need, if you need to. Another great thing about pollers is that you can chain |
||||||
|
them, making some sort of middleware: |
||||||
|
```go |
||||||
|
poller := &tb.LongPoller{Timeout: 15 * time.Second} |
||||||
|
spamProtected := tb.NewMiddlewarePoller(poller, func(upd *tb.Update) bool { |
||||||
|
if upd.Message == nil { |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
if strings.Contains(upd.Message.Text, "spam") { |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
return true |
||||||
|
}) |
||||||
|
|
||||||
|
bot, _ := tb.NewBot(tb.Settings{ |
||||||
|
// ... |
||||||
|
Poller: spamProtected, |
||||||
|
}) |
||||||
|
|
||||||
|
// graceful shutdown |
||||||
|
time.AfterFunc(N * time.Second, b.Stop) |
||||||
|
|
||||||
|
// blocks until shutdown |
||||||
|
bot.Start() |
||||||
|
|
||||||
|
fmt.Println(poller.LastUpdateID) // 134237 |
||||||
|
``` |
||||||
|
|
||||||
|
## Commands |
||||||
|
When handling commands, Telebot supports both direct (`/command`) and group-like |
||||||
|
syntax (`/command@botname`) and will never deliver messages addressed to some |
||||||
|
other bot, even if [privacy mode](https://core.telegram.org/bots#privacy-mode) is off. |
||||||
|
For simplified deep-linking, telebot also extracts payload: |
||||||
|
```go |
||||||
|
// Command: /start <PAYLOAD> |
||||||
|
b.Handle("/start", func(m *tb.Message) { |
||||||
|
if !m.Private() { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
fmt.Println(m.Payload) // <PAYLOAD> |
||||||
|
}) |
||||||
|
``` |
||||||
|
|
||||||
|
## Files |
||||||
|
>Telegram allows files up to 20 MB in size. |
||||||
|
|
||||||
|
Telebot allows to both upload (from disk / by URL) and download (from Telegram) |
||||||
|
and files in bot's scope. Also, sending any kind of media with a File created |
||||||
|
from disk will upload the file to Telegram automatically: |
||||||
|
```go |
||||||
|
a := &tb.Audio{File: tb.FromDisk("file.ogg")} |
||||||
|
|
||||||
|
fmt.Println(a.OnDisk()) // true |
||||||
|
fmt.Println(a.InCloud()) // false |
||||||
|
|
||||||
|
// Will upload the file from disk and send it to recipient |
||||||
|
bot.Send(recipient, a) |
||||||
|
|
||||||
|
// Next time you'll be sending this very *Audio, Telebot won't |
||||||
|
// re-upload the same file but rather utilize its Telegram FileID |
||||||
|
bot.Send(otherRecipient, a) |
||||||
|
|
||||||
|
fmt.Println(a.OnDisk()) // true |
||||||
|
fmt.Println(a.InCloud()) // true |
||||||
|
fmt.Println(a.FileID) // <telegram file id: ABC-DEF1234ghIkl-zyx57W2v1u123ew11> |
||||||
|
``` |
||||||
|
|
||||||
|
You might want to save certain `File`s in order to avoid re-uploading. Feel free |
||||||
|
to marshal them into whatever format, `File` only contain public fields, so no |
||||||
|
data will ever be lost. |
||||||
|
|
||||||
|
## Sendable |
||||||
|
Send is undoubtedly the most important method in Telebot. `Send()` accepts a |
||||||
|
`Recipient` (could be user, group or a channel) and a `Sendable`. Other types other than |
||||||
|
the telebot-provided media types (`Photo`, `Audio`, `Video`, etc.) are `Sendable`. |
||||||
|
If you create composite types of your own, and they satisfy the `Sendable` interface, |
||||||
|
Telebot will be able to send them out. |
||||||
|
|
||||||
|
```go |
||||||
|
// Sendable is any object that can send itself. |
||||||
|
// |
||||||
|
// This is pretty cool, since it lets bots implement |
||||||
|
// custom Sendables for complex kinds of media or |
||||||
|
// chat objects spanning across multiple messages. |
||||||
|
type Sendable interface { |
||||||
|
Send(*Bot, Recipient, *SendOptions) (*Message, error) |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
The only type at the time that doesn't fit `Send()` is `Album` and there is a reason |
||||||
|
for that. Albums were added not so long ago, so they are slightly quirky for backwards |
||||||
|
compatibilities sake. In fact, an `Album` can be sent, but never received. Instead, |
||||||
|
Telegram returns a `[]Message`, one for each media object in the album: |
||||||
|
```go |
||||||
|
p := &tb.Photo{File: tb.FromDisk("chicken.jpg")} |
||||||
|
v := &tb.Video{File: tb.FromURL("http://video.mp4")} |
||||||
|
|
||||||
|
msgs, err := b.SendAlbum(user, tb.Album{p, v}) |
||||||
|
``` |
||||||
|
|
||||||
|
### Send options |
||||||
|
Send options are objects and flags you can pass to `Send()`, `Edit()` and friends |
||||||
|
as optional arguments (following the recipient and the text/media). The most |
||||||
|
important one is called `SendOptions`, it lets you control _all_ the properties of |
||||||
|
the message supported by Telegram. The only drawback is that it's rather |
||||||
|
inconvenient to use at times, so `Send()` supports multiple shorthands: |
||||||
|
```go |
||||||
|
// regular send options |
||||||
|
b.Send(user, "text", &tb.SendOptions{ |
||||||
|
// ... |
||||||
|
}) |
||||||
|
|
||||||
|
// ReplyMarkup is a part of SendOptions, |
||||||
|
// but often it's the only option you need |
||||||
|
b.Send(user, "text", &tb.ReplyMarkup{ |
||||||
|
// ... |
||||||
|
}) |
||||||
|
|
||||||
|
// flags: no notification && no web link preview |
||||||
|
b.Send(user, "text", tb.Silent, tb.NoPreview) |
||||||
|
``` |
||||||
|
|
||||||
|
Full list of supported option-flags you can find |
||||||
|
[here](https://github.com/tucnak/telebot/blob/v2/options.go#L9). |
||||||
|
|
||||||
|
## Editable |
||||||
|
If you want to edit some existing message, you don't really need to store the |
||||||
|
original `*Message` object. In fact, upon edit, Telegram only requires `chat_id` |
||||||
|
and `message_id`. So you don't really need the Message as the whole. Also you |
||||||
|
might want to store references to certain messages in the database, so I thought |
||||||
|
it made sense for *any* Go struct to be editable as a Telegram message, to implement |
||||||
|
`Editable`: |
||||||
|
```go |
||||||
|
// Editable is an interface for all objects that |
||||||
|
// provide "message signature", a pair of 32-bit |
||||||
|
// message ID and 64-bit chat ID, both required |
||||||
|
// for edit operations. |
||||||
|
// |
||||||
|
// Use case: DB model struct for messages to-be |
||||||
|
// edited with, say two columns: msg_id,chat_id |
||||||
|
// could easily implement MessageSig() making |
||||||
|
// instances of stored messages editable. |
||||||
|
type Editable interface { |
||||||
|
// MessageSig is a "message signature". |
||||||
|
// |
||||||
|
// For inline messages, return chatID = 0. |
||||||
|
MessageSig() (messageID string, chatID int64) |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
For example, `Message` type is Editable. Here is the implementation of `StoredMessage` |
||||||
|
type, provided by telebot: |
||||||
|
```go |
||||||
|
// StoredMessage is an example struct suitable for being |
||||||
|
// stored in the database as-is or being embedded into |
||||||
|
// a larger struct, which is often the case (you might |
||||||
|
// want to store some metadata alongside, or might not.) |
||||||
|
type StoredMessage struct { |
||||||
|
MessageID string `sql:"message_id" json:"message_id"` |
||||||
|
ChatID int64 `sql:"chat_id" json:"chat_id"` |
||||||
|
} |
||||||
|
|
||||||
|
func (x StoredMessage) MessageSig() (string, int64) { |
||||||
|
return x.MessageID, x.ChatID |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
Why bother at all? Well, it allows you to do things like this: |
||||||
|
```go |
||||||
|
// just two integer columns in the database |
||||||
|
var msgs []tb.StoredMessage |
||||||
|
db.Find(&msgs) // gorm syntax |
||||||
|
|
||||||
|
for _, msg := range msgs { |
||||||
|
bot.Edit(&msg, "Updated text") |
||||||
|
// or |
||||||
|
bot.Delete(&msg) |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
I find it incredibly neat. Worth noting, at this point of time there exists |
||||||
|
another method in the Edit family, `EditCaption()` which is of a pretty |
||||||
|
rare use, so I didn't bother including it to `Edit()`, just like I did with |
||||||
|
`SendAlbum()` as it would inevitably lead to unnecessary complications. |
||||||
|
```go |
||||||
|
var m *Message |
||||||
|
|
||||||
|
// change caption of a photo, audio, etc. |
||||||
|
bot.EditCaption(m, "new caption") |
||||||
|
``` |
||||||
|
|
||||||
|
## Keyboards |
||||||
|
Telebot supports both kinds of keyboards Telegram provides: reply and inline |
||||||
|
keyboards. Any button can also act as an endpoints for `Handle()`. |
||||||
|
|
||||||
|
In `v2.2` we're introducing a little more convenient way in building keyboards. |
||||||
|
The main goal is to avoid a lot of boilerplate and to make code clearer. |
||||||
|
|
||||||
|
```go |
||||||
|
func main() { |
||||||
|
b, _ := tb.NewBot(tb.Settings{...}) |
||||||
|
|
||||||
|
var ( |
||||||
|
// Universal markup builders. |
||||||
|
menu = &tb.ReplyMarkup{ResizeReplyKeyboard: true} |
||||||
|
selector = &tb.ReplyMarkup{} |
||||||
|
|
||||||
|
// Reply buttons. |
||||||
|
btnHelp = menu.Text("ℹ Help") |
||||||
|
btnSettings = menu.Text("⚙ Settings") |
||||||
|
|
||||||
|
// Inline buttons. |
||||||
|
// |
||||||
|
// Pressing it will cause the client to |
||||||
|
// send the bot a callback. |
||||||
|
// |
||||||
|
// Make sure Unique stays unique as per button kind, |
||||||
|
// as it has to be for callback routing to work. |
||||||
|
// |
||||||
|
btnPrev = selector.Data("⬅", "prev", ...) |
||||||
|
btnNext = selector.Data("➡", "next", ...) |
||||||
|
) |
||||||
|
|
||||||
|
menu.Reply( |
||||||
|
menu.Row(btnHelp), |
||||||
|
menu.Row(btnSettings), |
||||||
|
) |
||||||
|
selector.Inline( |
||||||
|
selector.Row(btnPrev, btnNext), |
||||||
|
) |
||||||
|
|
||||||
|
// Command: /start <PAYLOAD> |
||||||
|
b.Handle("/start", func(m *tb.Message) { |
||||||
|
if !m.Private() { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
b.Send(m.Sender, "Hello!", menu) |
||||||
|
}) |
||||||
|
|
||||||
|
// On reply button pressed (message) |
||||||
|
b.Handle(&btnHelp, func(m *tb.Message) {...}) |
||||||
|
|
||||||
|
// On inline button pressed (callback) |
||||||
|
b.Handle(&btnPrev, func(c *tb.Callback) { |
||||||
|
// ... |
||||||
|
// Always respond! |
||||||
|
b.Respond(c, &tb.CallbackResponse{...}) |
||||||
|
}) |
||||||
|
|
||||||
|
b.Start() |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
You can use markup constructor for every type of possible buttons: |
||||||
|
```go |
||||||
|
r := &tb.ReplyMarkup{} |
||||||
|
|
||||||
|
// Reply buttons: |
||||||
|
r.Text("Hello!") |
||||||
|
r.Contact("Send phone number") |
||||||
|
r.Location("Send location") |
||||||
|
r.Poll(tb.PollQuiz) |
||||||
|
|
||||||
|
// Inline buttons: |
||||||
|
r.Data("Show help", "help") // data is optional |
||||||
|
r.Data("Delete item", "delete", item.ID) |
||||||
|
r.URL("Visit", "https://google.com") |
||||||
|
r.Query("Search", query) |
||||||
|
r.QueryChat("Share", query) |
||||||
|
r.Login("Login", &tb.Login{...}) |
||||||
|
``` |
||||||
|
|
||||||
|
## Inline mode |
||||||
|
So if you want to handle incoming inline queries you better plug the `tb.OnQuery` |
||||||
|
endpoint and then use the `Answer()` method to send a list of inline queries |
||||||
|
back. I think at the time of writing, telebot supports all of the provided result |
||||||
|
types (but not the cached ones). This is how it looks like: |
||||||
|
|
||||||
|
```go |
||||||
|
b.Handle(tb.OnQuery, func(q *tb.Query) { |
||||||
|
urls := []string{ |
||||||
|
"http://photo.jpg", |
||||||
|
"http://photo2.jpg", |
||||||
|
} |
||||||
|
|
||||||
|
results := make(tb.Results, len(urls)) // []tb.Result |
||||||
|
for i, url := range urls { |
||||||
|
result := &tb.PhotoResult{ |
||||||
|
URL: url, |
||||||
|
|
||||||
|
// required for photos |
||||||
|
ThumbURL: url, |
||||||
|
} |
||||||
|
|
||||||
|
results[i] = result |
||||||
|
// needed to set a unique string ID for each result |
||||||
|
results[i].SetResultID(strconv.Itoa(i)) |
||||||
|
} |
||||||
|
|
||||||
|
err := b.Answer(q, &tb.QueryResponse{ |
||||||
|
Results: results, |
||||||
|
CacheTime: 60, // a minute |
||||||
|
}) |
||||||
|
|
||||||
|
if err != nil { |
||||||
|
log.Println(err) |
||||||
|
} |
||||||
|
}) |
||||||
|
``` |
||||||
|
|
||||||
|
There's not much to talk about really. It also supports some form of authentication |
||||||
|
through deep-linking. For that, use fields `SwitchPMText` and `SwitchPMParameter` |
||||||
|
of `QueryResponse`. |
||||||
|
|
||||||
|
# Contributing |
||||||
|
|
||||||
|
1. Fork it |
||||||
|
2. Clone develop: `git clone -b develop https://github.com/tucnak/telebot` |
||||||
|
3. Create your feature branch: `git checkout -b new-feature` |
||||||
|
4. Make changes and add them: `git add .` |
||||||
|
5. Commit: `git commit -m "Add some feature"` |
||||||
|
6. Push: `git push origin new-feature` |
||||||
|
7. Pull request |
||||||
|
|
||||||
|
# Donate |
||||||
|
|
||||||
|
I do coding for fun but I also try to search for interesting solutions and |
||||||
|
optimize them as much as possible. |
||||||
|
If you feel like it's a good piece of software, I wouldn't mind a tip! |
||||||
|
|
||||||
|
Litecoin: `ltc1qskt5ltrtyg7esfjm0ftx6jnacwffhpzpqmerus` |
||||||
|
|
||||||
|
Ethereum: `0xB78A2Ac1D83a0aD0b993046F9fDEfC5e619efCAB` |
||||||
|
|
||||||
|
# License |
||||||
|
|
||||||
|
Telebot is distributed under MIT. |
@ -0,0 +1,275 @@ |
|||||||
|
package telebot |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"strconv" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// ChatInviteLink object represents an invite for a chat.
|
||||||
|
type ChatInviteLink struct { |
||||||
|
// The invite link.
|
||||||
|
InviteLink string `json:"invite_link"` |
||||||
|
|
||||||
|
// The creator of the link.
|
||||||
|
Creator *User `json:"creator"` |
||||||
|
|
||||||
|
// If the link is primary.
|
||||||
|
IsPrimary bool `json:"is_primary"` |
||||||
|
|
||||||
|
// If the link is revoked.
|
||||||
|
IsRevoked bool `json:"is_revoked"` |
||||||
|
|
||||||
|
// (Optional) Point in time when the link will expire, use
|
||||||
|
// ChatInviteLink.ExpireDate() to get time.Time
|
||||||
|
ExpireUnixtime int64 `json:"expire_date,omitempty"` |
||||||
|
|
||||||
|
// (Optional) Maximum number of users that can be members of
|
||||||
|
// the chat simultaneously.
|
||||||
|
MemberLimit int `json:"member_limit,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ExpireDate returns the moment of the link expiration in local time.
|
||||||
|
func (c *ChatInviteLink) ExpireDate() time.Time { |
||||||
|
return time.Unix(c.ExpireUnixtime, 0) |
||||||
|
} |
||||||
|
|
||||||
|
// ChatMemberUpdated object represents changes in the status of a chat member.
|
||||||
|
type ChatMemberUpdated struct { |
||||||
|
// Chat where the user belongs to.
|
||||||
|
Chat Chat `json:"chat"` |
||||||
|
|
||||||
|
// From which user the action was triggered.
|
||||||
|
From User `json:"from"` |
||||||
|
|
||||||
|
// Unixtime, use ChatMemberUpdated.Time() to get time.Time
|
||||||
|
Unixtime int64 `json:"date"` |
||||||
|
|
||||||
|
// Previous information about the chat member.
|
||||||
|
OldChatMember *ChatMember `json:"old_chat_member"` |
||||||
|
|
||||||
|
// New information about the chat member.
|
||||||
|
NewChatMember *ChatMember `json:"new_chat_member"` |
||||||
|
|
||||||
|
// (Optional) InviteLink which was used by the user to
|
||||||
|
// join the chat; for joining by invite link events only.
|
||||||
|
InviteLink *ChatInviteLink `json:"invite_link"` |
||||||
|
} |
||||||
|
|
||||||
|
// Time returns the moment of the change in local time.
|
||||||
|
func (c *ChatMemberUpdated) Time() time.Time { |
||||||
|
return time.Unix(c.Unixtime, 0) |
||||||
|
} |
||||||
|
|
||||||
|
// Rights is a list of privileges available to chat members.
|
||||||
|
type Rights struct { |
||||||
|
CanBeEdited bool `json:"can_be_edited"` |
||||||
|
CanChangeInfo bool `json:"can_change_info"` |
||||||
|
CanPostMessages bool `json:"can_post_messages"` |
||||||
|
CanEditMessages bool `json:"can_edit_messages"` |
||||||
|
CanDeleteMessages bool `json:"can_delete_messages"` |
||||||
|
CanInviteUsers bool `json:"can_invite_users"` |
||||||
|
CanRestrictMembers bool `json:"can_restrict_members"` |
||||||
|
CanPinMessages bool `json:"can_pin_messages"` |
||||||
|
CanPromoteMembers bool `json:"can_promote_members"` |
||||||
|
CanSendMessages bool `json:"can_send_messages"` |
||||||
|
CanSendMedia bool `json:"can_send_media_messages"` |
||||||
|
CanSendPolls bool `json:"can_send_polls"` |
||||||
|
CanSendOther bool `json:"can_send_other_messages"` |
||||||
|
CanAddPreviews bool `json:"can_add_web_page_previews"` |
||||||
|
CanManageVoiceChats bool `json:"can_manage_voice_chats"` |
||||||
|
CanManageChat bool `json:"can_manage_chat"` |
||||||
|
} |
||||||
|
|
||||||
|
// NoRights is the default Rights{}.
|
||||||
|
func NoRights() Rights { return Rights{} } |
||||||
|
|
||||||
|
// NoRestrictions should be used when un-restricting or
|
||||||
|
// un-promoting user.
|
||||||
|
//
|
||||||
|
// member.Rights = tb.NoRestrictions()
|
||||||
|
// bot.Restrict(chat, member)
|
||||||
|
//
|
||||||
|
func NoRestrictions() Rights { |
||||||
|
return Rights{ |
||||||
|
CanBeEdited: true, |
||||||
|
CanChangeInfo: false, |
||||||
|
CanPostMessages: false, |
||||||
|
CanEditMessages: false, |
||||||
|
CanDeleteMessages: false, |
||||||
|
CanInviteUsers: false, |
||||||
|
CanRestrictMembers: false, |
||||||
|
CanPinMessages: false, |
||||||
|
CanPromoteMembers: false, |
||||||
|
CanSendMessages: true, |
||||||
|
CanSendMedia: true, |
||||||
|
CanSendPolls: true, |
||||||
|
CanSendOther: true, |
||||||
|
CanAddPreviews: true, |
||||||
|
CanManageVoiceChats: false, |
||||||
|
CanManageChat: false, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// AdminRights could be used to promote user to admin.
|
||||||
|
func AdminRights() Rights { |
||||||
|
return Rights{ |
||||||
|
CanBeEdited: true, |
||||||
|
CanChangeInfo: true, |
||||||
|
CanPostMessages: true, |
||||||
|
CanEditMessages: true, |
||||||
|
CanDeleteMessages: true, |
||||||
|
CanInviteUsers: true, |
||||||
|
CanRestrictMembers: true, |
||||||
|
CanPinMessages: true, |
||||||
|
CanPromoteMembers: true, |
||||||
|
CanSendMessages: true, |
||||||
|
CanSendMedia: true, |
||||||
|
CanSendPolls: true, |
||||||
|
CanSendOther: true, |
||||||
|
CanAddPreviews: true, |
||||||
|
CanManageVoiceChats: true, |
||||||
|
CanManageChat: true, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Forever is a ExpireUnixtime of "forever" banning.
|
||||||
|
func Forever() int64 { |
||||||
|
return time.Now().Add(367 * 24 * time.Hour).Unix() |
||||||
|
} |
||||||
|
|
||||||
|
// Ban will ban user from chat until `member.RestrictedUntil`.
|
||||||
|
func (b *Bot) Ban(chat *Chat, member *ChatMember, revokeMessages ...bool) error { |
||||||
|
params := map[string]string{ |
||||||
|
"chat_id": chat.Recipient(), |
||||||
|
"user_id": member.User.Recipient(), |
||||||
|
"until_date": strconv.FormatInt(member.RestrictedUntil, 10), |
||||||
|
} |
||||||
|
if len(revokeMessages) > 0 { |
||||||
|
params["revoke_messages"] = strconv.FormatBool(revokeMessages[0]) |
||||||
|
} |
||||||
|
|
||||||
|
_, err := b.Raw("kickChatMember", params) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Unban will unban user from chat, who would have thought eh?
|
||||||
|
// forBanned does nothing if the user is not banned.
|
||||||
|
func (b *Bot) Unban(chat *Chat, user *User, forBanned ...bool) error { |
||||||
|
params := map[string]string{ |
||||||
|
"chat_id": chat.Recipient(), |
||||||
|
"user_id": user.Recipient(), |
||||||
|
} |
||||||
|
|
||||||
|
if len(forBanned) > 0 { |
||||||
|
params["only_if_banned"] = strconv.FormatBool(forBanned[0]) |
||||||
|
} |
||||||
|
|
||||||
|
_, err := b.Raw("unbanChatMember", params) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Restrict lets you restrict a subset of member's rights until
|
||||||
|
// member.RestrictedUntil, such as:
|
||||||
|
//
|
||||||
|
// * can send messages
|
||||||
|
// * can send media
|
||||||
|
// * can send other
|
||||||
|
// * can add web page previews
|
||||||
|
//
|
||||||
|
func (b *Bot) Restrict(chat *Chat, member *ChatMember) error { |
||||||
|
prv, until := member.Rights, member.RestrictedUntil |
||||||
|
|
||||||
|
params := map[string]interface{}{ |
||||||
|
"chat_id": chat.Recipient(), |
||||||
|
"user_id": member.User.Recipient(), |
||||||
|
"until_date": strconv.FormatInt(until, 10), |
||||||
|
} |
||||||
|
embedRights(params, prv) |
||||||
|
|
||||||
|
_, err := b.Raw("restrictChatMember", params) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Promote lets you update member's admin rights, such as:
|
||||||
|
//
|
||||||
|
// * can change info
|
||||||
|
// * can post messages
|
||||||
|
// * can edit messages
|
||||||
|
// * can delete messages
|
||||||
|
// * can invite users
|
||||||
|
// * can restrict members
|
||||||
|
// * can pin messages
|
||||||
|
// * can promote members
|
||||||
|
//
|
||||||
|
func (b *Bot) Promote(chat *Chat, member *ChatMember) error { |
||||||
|
prv := member.Rights |
||||||
|
|
||||||
|
params := map[string]interface{}{ |
||||||
|
"chat_id": chat.Recipient(), |
||||||
|
"user_id": member.User.Recipient(), |
||||||
|
"is_anonymous": member.Anonymous, |
||||||
|
} |
||||||
|
embedRights(params, prv) |
||||||
|
|
||||||
|
_, err := b.Raw("promoteChatMember", params) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// AdminsOf returns a member list of chat admins.
|
||||||
|
//
|
||||||
|
// On success, returns an Array of ChatMember objects that
|
||||||
|
// contains information about all chat administrators except other bots.
|
||||||
|
// If the chat is a group or a supergroup and
|
||||||
|
// no administrators were appointed, only the creator will be returned.
|
||||||
|
func (b *Bot) AdminsOf(chat *Chat) ([]ChatMember, error) { |
||||||
|
params := map[string]string{ |
||||||
|
"chat_id": chat.Recipient(), |
||||||
|
} |
||||||
|
|
||||||
|
data, err := b.Raw("getChatAdministrators", params) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
var resp struct { |
||||||
|
Result []ChatMember |
||||||
|
} |
||||||
|
if err := json.Unmarshal(data, &resp); err != nil { |
||||||
|
return nil, wrapError(err) |
||||||
|
} |
||||||
|
return resp.Result, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Len returns the number of members in a chat.
|
||||||
|
func (b *Bot) Len(chat *Chat) (int, error) { |
||||||
|
params := map[string]string{ |
||||||
|
"chat_id": chat.Recipient(), |
||||||
|
} |
||||||
|
|
||||||
|
data, err := b.Raw("getChatMembersCount", params) |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
|
||||||
|
var resp struct { |
||||||
|
Result int |
||||||
|
} |
||||||
|
if err := json.Unmarshal(data, &resp); err != nil { |
||||||
|
return 0, wrapError(err) |
||||||
|
} |
||||||
|
return resp.Result, nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetAdminTitle sets a custom title for an administrator.
|
||||||
|
// A title should be 0-16 characters length, emoji are not allowed.
|
||||||
|
func (b *Bot) SetAdminTitle(chat *Chat, user *User, title string) error { |
||||||
|
params := map[string]string{ |
||||||
|
"chat_id": chat.Recipient(), |
||||||
|
"user_id": user.Recipient(), |
||||||
|
"custom_title": title, |
||||||
|
} |
||||||
|
|
||||||
|
_, err := b.Raw("setChatAdministratorCustomTitle", params) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,232 @@ |
|||||||
|
package telebot |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"io" |
||||||
|
"io/ioutil" |
||||||
|
"log" |
||||||
|
"mime/multipart" |
||||||
|
"net/http" |
||||||
|
"os" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/pkg/errors" |
||||||
|
) |
||||||
|
|
||||||
|
// Raw lets you call any method of Bot API manually.
|
||||||
|
// It also handles API errors, so you only need to unwrap
|
||||||
|
// result field from json data.
|
||||||
|
func (b *Bot) Raw(method string, payload interface{}) ([]byte, error) { |
||||||
|
url := b.URL + "/bot" + b.Token + "/" + method |
||||||
|
|
||||||
|
var buf bytes.Buffer |
||||||
|
if err := json.NewEncoder(&buf).Encode(payload); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
resp, err := b.client.Post(url, "application/json", &buf) |
||||||
|
if err != nil { |
||||||
|
return nil, wrapError(err) |
||||||
|
} |
||||||
|
resp.Close = true |
||||||
|
defer resp.Body.Close() |
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(resp.Body) |
||||||
|
if err != nil { |
||||||
|
return nil, wrapError(err) |
||||||
|
} |
||||||
|
|
||||||
|
if b.verbose { |
||||||
|
body, _ := json.Marshal(payload) |
||||||
|
body = bytes.ReplaceAll(body, []byte(`\"`), []byte(`"`)) |
||||||
|
body = bytes.ReplaceAll(body, []byte(`"{`), []byte(`{`)) |
||||||
|
body = bytes.ReplaceAll(body, []byte(`}"`), []byte(`}`)) |
||||||
|
|
||||||
|
indent := func(b []byte) string { |
||||||
|
buf.Reset() |
||||||
|
json.Indent(&buf, b, "", "\t") |
||||||
|
return buf.String() |
||||||
|
} |
||||||
|
|
||||||
|
log.Printf("[verbose] telebot: sent request\n"+ |
||||||
|
"Method: %v\nParams: %v\nResponse: %v", |
||||||
|
method, indent(body), indent(data)) |
||||||
|
} |
||||||
|
|
||||||
|
// returning data as well
|
||||||
|
return data, extractOk(data) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Bot) sendFiles(method string, files map[string]File, params map[string]string) ([]byte, error) { |
||||||
|
rawFiles := map[string]interface{}{} |
||||||
|
for name, f := range files { |
||||||
|
switch { |
||||||
|
case f.InCloud(): |
||||||
|
params[name] = f.FileID |
||||||
|
case f.FileURL != "": |
||||||
|
params[name] = f.FileURL |
||||||
|
case f.OnDisk(): |
||||||
|
rawFiles[name] = f.FileLocal |
||||||
|
case f.FileReader != nil: |
||||||
|
rawFiles[name] = f.FileReader |
||||||
|
default: |
||||||
|
return nil, errors.Errorf("telebot: File for field %s doesn't exist", name) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if len(rawFiles) == 0 { |
||||||
|
return b.Raw(method, params) |
||||||
|
} |
||||||
|
|
||||||
|
pipeReader, pipeWriter := io.Pipe() |
||||||
|
writer := multipart.NewWriter(pipeWriter) |
||||||
|
|
||||||
|
go func() { |
||||||
|
defer pipeWriter.Close() |
||||||
|
|
||||||
|
for field, file := range rawFiles { |
||||||
|
if err := addFileToWriter(writer, files[field].fileName, field, file); err != nil { |
||||||
|
pipeWriter.CloseWithError(err) |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
for field, value := range params { |
||||||
|
if err := writer.WriteField(field, value); err != nil { |
||||||
|
pipeWriter.CloseWithError(err) |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
if err := writer.Close(); err != nil { |
||||||
|
pipeWriter.CloseWithError(err) |
||||||
|
return |
||||||
|
} |
||||||
|
}() |
||||||
|
|
||||||
|
url := b.URL + "/bot" + b.Token + "/" + method |
||||||
|
|
||||||
|
resp, err := b.client.Post(url, writer.FormDataContentType(), pipeReader) |
||||||
|
if err != nil { |
||||||
|
err = wrapError(err) |
||||||
|
pipeReader.CloseWithError(err) |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
resp.Close = true |
||||||
|
defer resp.Body.Close() |
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusInternalServerError { |
||||||
|
return nil, ErrInternal |
||||||
|
} |
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(resp.Body) |
||||||
|
if err != nil { |
||||||
|
return nil, wrapError(err) |
||||||
|
} |
||||||
|
|
||||||
|
return data, extractOk(data) |
||||||
|
} |
||||||
|
|
||||||
|
func addFileToWriter(writer *multipart.Writer, filename, field string, file interface{}) error { |
||||||
|
var reader io.Reader |
||||||
|
if r, ok := file.(io.Reader); ok { |
||||||
|
reader = r |
||||||
|
} else if path, ok := file.(string); ok { |
||||||
|
f, err := os.Open(path) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer f.Close() |
||||||
|
reader = f |
||||||
|
} else { |
||||||
|
return errors.Errorf("telebot: File for field %v should be an io.ReadCloser or string", field) |
||||||
|
} |
||||||
|
|
||||||
|
part, err := writer.CreateFormFile(field, filename) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
_, err = io.Copy(part, reader) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Bot) sendText(to Recipient, text string, opt *SendOptions) (*Message, error) { |
||||||
|
params := map[string]string{ |
||||||
|
"chat_id": to.Recipient(), |
||||||
|
"text": text, |
||||||
|
} |
||||||
|
b.embedSendOptions(params, opt) |
||||||
|
|
||||||
|
data, err := b.Raw("sendMessage", params) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return extractMessage(data) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Bot) sendObject(f *File, what string, params map[string]string, files map[string]File) (*Message, error) { |
||||||
|
sendWhat := "send" + strings.Title(what) |
||||||
|
|
||||||
|
if what == "videoNote" { |
||||||
|
what = "video_note" |
||||||
|
} |
||||||
|
|
||||||
|
sendFiles := map[string]File{what: *f} |
||||||
|
for k, v := range files { |
||||||
|
sendFiles[k] = v |
||||||
|
} |
||||||
|
|
||||||
|
data, err := b.sendFiles(sendWhat, sendFiles, params) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return extractMessage(data) |
||||||
|
} |
||||||
|
|
||||||
|
func (b *Bot) getMe() (*User, error) { |
||||||
|
data, err := b.Raw("getMe", nil) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
var resp struct { |
||||||
|
Result *User |
||||||
|
} |
||||||
|
if err := json.Unmarshal(data, &resp); err != nil { |
||||||
|
return nil, wrapError(err) |
||||||
|
} |
||||||
|
return resp.Result, nil |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
func (b *Bot) getUpdates(offset, limit int, timeout time.Duration, allowed []string) ([]Update, error) { |
||||||
|
params := map[string]string{ |
||||||
|
"offset": strconv.Itoa(offset), |
||||||
|
"timeout": strconv.Itoa(int(timeout / time.Second)), |
||||||
|
} |
||||||
|
|
||||||
|
if limit != 0 { |
||||||
|
params["limit"] = strconv.Itoa(limit) |
||||||
|
} |
||||||
|
if len(allowed) > 0 { |
||||||
|
data, _ := json.Marshal(allowed) |
||||||
|
params["allowed_updates"] = string(data) |
||||||
|
} |
||||||
|
|
||||||
|
data, err := b.Raw("getUpdates", params) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
var resp struct { |
||||||
|
Result []Update |
||||||
|
} |
||||||
|
if err := json.Unmarshal(data, &resp); err != nil { |
||||||
|
return nil, wrapError(err) |
||||||
|
} |
||||||
|
return resp.Result, nil |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,136 @@ |
|||||||
|
package telebot |
||||||
|
|
||||||
|
import "encoding/json" |
||||||
|
|
||||||
|
// CallbackEndpoint is an interface any element capable
|
||||||
|
// of responding to a callback `\f<unique>`.
|
||||||
|
type CallbackEndpoint interface { |
||||||
|
CallbackUnique() string |
||||||
|
} |
||||||
|
|
||||||
|
// Callback object represents a query from a callback button in an
|
||||||
|
// inline keyboard.
|
||||||
|
type Callback struct { |
||||||
|
ID string `json:"id"` |
||||||
|
|
||||||
|
// For message sent to channels, Sender may be empty
|
||||||
|
Sender *User `json:"from"` |
||||||
|
|
||||||
|
// Message will be set if the button that originated the query
|
||||||
|
// was attached to a message sent by a bot.
|
||||||
|
Message *Message `json:"message"` |
||||||
|
|
||||||
|
// MessageID will be set if the button was attached to a message
|
||||||
|
// sent via the bot in inline mode.
|
||||||
|
MessageID string `json:"inline_message_id"` |
||||||
|
|
||||||
|
// Data associated with the callback button. Be aware that
|
||||||
|
// a bad client can send arbitrary data in this field.
|
||||||
|
Data string `json:"data"` |
||||||
|
} |
||||||
|
|
||||||
|
// IsInline says whether message is an inline message.
|
||||||
|
func (c *Callback) IsInline() bool { |
||||||
|
return c.MessageID != "" |
||||||
|
} |
||||||
|
|
||||||
|
// CallbackResponse builds a response to a Callback query.
|
||||||
|
//
|
||||||
|
// See also: https://core.telegram.org/bots/api#answerCallbackQuery
|
||||||
|
type CallbackResponse struct { |
||||||
|
// The ID of the callback to which this is a response.
|
||||||
|
//
|
||||||
|
// Note: Telebot sets this field automatically!
|
||||||
|
CallbackID string `json:"callback_query_id"` |
||||||
|
|
||||||
|
// Text of the notification. If not specified, nothing will be
|
||||||
|
// shown to the user.
|
||||||
|
Text string `json:"text,omitempty"` |
||||||
|
|
||||||
|
// (Optional) If true, an alert will be shown by the client instead
|
||||||
|
// of a notification at the top of the chat screen. Defaults to false.
|
||||||
|
ShowAlert bool `json:"show_alert,omitempty"` |
||||||
|
|
||||||
|
// (Optional) URL that will be opened by the user's client.
|
||||||
|
// If you have created a Game and accepted the conditions via
|
||||||
|
// @BotFather, specify the URL that opens your game.
|
||||||
|
//
|
||||||
|
// Note: this will only work if the query comes from a game
|
||||||
|
// callback button. Otherwise, you may use deep-linking:
|
||||||
|
// https://telegram.me/your_bot?start=XXXX
|
||||||
|
URL string `json:"url,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// InlineButton represents a button displayed in the message.
|
||||||
|
type InlineButton struct { |
||||||
|
// Unique slagish name for this kind of button,
|
||||||
|
// try to be as specific as possible.
|
||||||
|
//
|
||||||
|
// It will be used as a callback endpoint.
|
||||||
|
Unique string `json:"unique,omitempty"` |
||||||
|
|
||||||
|
Text string `json:"text"` |
||||||
|
URL string `json:"url,omitempty"` |
||||||
|
Data string `json:"callback_data,omitempty"` |
||||||
|
InlineQuery string `json:"switch_inline_query,omitempty"` |
||||||
|
InlineQueryChat string `json:"switch_inline_query_current_chat"` |
||||||
|
Login *Login `json:"login_url,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// With returns a copy of the button with data.
|
||||||
|
func (t *InlineButton) With(data string) *InlineButton { |
||||||
|
return &InlineButton{ |
||||||
|
Unique: t.Unique, |
||||||
|
Text: t.Text, |
||||||
|
URL: t.URL, |
||||||
|
InlineQuery: t.InlineQuery, |
||||||
|
InlineQueryChat: t.InlineQueryChat, |
||||||
|
Login: t.Login, |
||||||
|
Data: data, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// CallbackUnique returns InlineButton.Unique.
|
||||||
|
func (t *InlineButton) CallbackUnique() string { |
||||||
|
return "\f" + t.Unique |
||||||
|
} |
||||||
|
|
||||||
|
// CallbackUnique returns KeyboardButton.Text.
|
||||||
|
func (t *ReplyButton) CallbackUnique() string { |
||||||
|
return t.Text |
||||||
|
} |
||||||
|
|
||||||
|
// CallbackUnique implements CallbackEndpoint.
|
||||||
|
func (t *Btn) CallbackUnique() string { |
||||||
|
if t.Unique != "" { |
||||||
|
return "\f" + t.Unique |
||||||
|
} |
||||||
|
return t.Text |
||||||
|
} |
||||||
|
|
||||||
|
// Login represents a parameter of the inline keyboard button
|
||||||
|
// used to automatically authorize a user. Serves as a great replacement
|
||||||
|
// for the Telegram Login Widget when the user is coming from Telegram.
|
||||||
|
type Login struct { |
||||||
|
URL string `json:"url"` |
||||||
|
Text string `json:"forward_text,omitempty"` |
||||||
|
Username string `json:"bot_username,omitempty"` |
||||||
|
WriteAccess bool `json:"request_write_access,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler interface.
|
||||||
|
// It needed to avoid InlineQueryChat and Login fields conflict.
|
||||||
|
// If you have Login field in your button, InlineQueryChat must be skipped.
|
||||||
|
func (t *InlineButton) MarshalJSON() ([]byte, error) { |
||||||
|
type InlineButtonJSON InlineButton |
||||||
|
|
||||||
|
if t.Login != nil { |
||||||
|
return json.Marshal(struct { |
||||||
|
InlineButtonJSON |
||||||
|
InlineQueryChat string `json:"switch_inline_query_current_chat,omitempty"` |
||||||
|
}{ |
||||||
|
InlineButtonJSON: InlineButtonJSON(*t), |
||||||
|
}) |
||||||
|
} |
||||||
|
return json.Marshal(InlineButtonJSON(*t)) |
||||||
|
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue