Andrey Kovalev
3 years ago
commit
d838fbf2b4
548 changed files with 164542 additions and 0 deletions
@ -0,0 +1,37 @@ |
|||||||
|
kind: pipeline |
||||||
|
type: docker |
||||||
|
name: build |
||||||
|
|
||||||
|
steps: |
||||||
|
- name: build |
||||||
|
image: plugins/docker |
||||||
|
settings: |
||||||
|
registry: cr.selcloud.ru |
||||||
|
username: |
||||||
|
from_secret: docker_username |
||||||
|
password: |
||||||
|
from_secret: docker_password |
||||||
|
repo: cr.selcloud.ru/russia9/${DRONE_REPO_NAME}/${DRONE_COMMIT_BRANCH} |
||||||
|
tags: |
||||||
|
- latest |
||||||
|
- ${DRONE_COMMIT_SHA} |
||||||
|
cache_from: |
||||||
|
- cr.selcloud.ru/russia9/${DRONE_REPO_NAME}/${DRONE_COMMIT_BRANCH}:latest |
||||||
|
|
||||||
|
- name: deploy |
||||||
|
image: appleboy/drone-ssh |
||||||
|
settings: |
||||||
|
host: |
||||||
|
from_secret: ssh_address |
||||||
|
username: |
||||||
|
from_secret: ssh_username |
||||||
|
key: |
||||||
|
from_secret: ssh_key |
||||||
|
port: 22 |
||||||
|
script_stop: true |
||||||
|
script: |
||||||
|
- docker-compose -f /srv/chatwars-spy/docker-compose.yml pull |
||||||
|
- docker-compose -f /srv/chatwars-spy/docker-compose.yml up -d |
||||||
|
when: |
||||||
|
branch: |
||||||
|
- master |
@ -0,0 +1,63 @@ |
|||||||
|
# Created by .ignore support plugin (hsz.mobi) |
||||||
|
### Go template |
||||||
|
# Binaries for programs and plugins |
||||||
|
*.exe |
||||||
|
*.exe~ |
||||||
|
*.dll |
||||||
|
*.so |
||||||
|
*.dylib |
||||||
|
|
||||||
|
# Test binary, built with `go test -c` |
||||||
|
*.test |
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE |
||||||
|
*.out |
||||||
|
|
||||||
|
# Dependency directories (remove the comment below to include it) |
||||||
|
# vendor/ |
||||||
|
|
||||||
|
### Vim template |
||||||
|
# Swap |
||||||
|
[._]*.s[a-v][a-z] |
||||||
|
!*.svg # comment out if you don't need vector files |
||||||
|
[._]*.sw[a-p] |
||||||
|
[._]s[a-rt-v][a-z] |
||||||
|
[._]ss[a-gi-z] |
||||||
|
[._]sw[a-p] |
||||||
|
|
||||||
|
# Session |
||||||
|
Session.vim |
||||||
|
Sessionx.vim |
||||||
|
|
||||||
|
# Temporary |
||||||
|
.netrwhist |
||||||
|
*~ |
||||||
|
# Auto-generated tag files |
||||||
|
tags |
||||||
|
# Persistent undo |
||||||
|
[._]*.un~ |
||||||
|
|
||||||
|
### JetBrains template |
||||||
|
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider |
||||||
|
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 |
||||||
|
|
||||||
|
# User-specific stuff |
||||||
|
.idea |
||||||
|
|
||||||
|
# CMake |
||||||
|
cmake-build-*/ |
||||||
|
|
||||||
|
# File-based project format |
||||||
|
*.iws |
||||||
|
|
||||||
|
# IntelliJ |
||||||
|
out/ |
||||||
|
|
||||||
|
# JIRA plugin |
||||||
|
atlassian-ide-plugin.xml |
||||||
|
|
||||||
|
# Crashlytics plugin (for Android Studio and IntelliJ) |
||||||
|
com_crashlytics_export_strings.xml |
||||||
|
crashlytics.properties |
||||||
|
crashlytics-build.properties |
||||||
|
fabric.properties |
@ -0,0 +1,13 @@ |
|||||||
|
FROM golang:1.18 |
||||||
|
|
||||||
|
# Set app workdir |
||||||
|
WORKDIR /go/src/app |
||||||
|
|
||||||
|
# Copy application sources |
||||||
|
COPY . . |
||||||
|
|
||||||
|
# Build app |
||||||
|
RUN go build -o app gitea.russia9.dev/Russia9/chatwars-spy/cmd/main |
||||||
|
|
||||||
|
# Run app |
||||||
|
CMD ["./app"] |
@ -0,0 +1,674 @@ |
|||||||
|
GNU GENERAL PUBLIC LICENSE |
||||||
|
Version 3, 29 June 2007 |
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> |
||||||
|
Everyone is permitted to copy and distribute verbatim copies |
||||||
|
of this license document, but changing it is not allowed. |
||||||
|
|
||||||
|
Preamble |
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for |
||||||
|
software and other kinds of works. |
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed |
||||||
|
to take away your freedom to share and change the works. By contrast, |
||||||
|
the GNU General Public License is intended to guarantee your freedom to |
||||||
|
share and change all versions of a program--to make sure it remains free |
||||||
|
software for all its users. We, the Free Software Foundation, use the |
||||||
|
GNU General Public License for most of our software; it applies also to |
||||||
|
any other work released this way by its authors. You can apply it to |
||||||
|
your programs, too. |
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not |
||||||
|
price. Our General Public Licenses are designed to make sure that you |
||||||
|
have the freedom to distribute copies of free software (and charge for |
||||||
|
them if you wish), that you receive source code or can get it if you |
||||||
|
want it, that you can change the software or use pieces of it in new |
||||||
|
free programs, and that you know you can do these things. |
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you |
||||||
|
these rights or asking you to surrender the rights. Therefore, you have |
||||||
|
certain responsibilities if you distribute copies of the software, or if |
||||||
|
you modify it: responsibilities to respect the freedom of others. |
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether |
||||||
|
gratis or for a fee, you must pass on to the recipients the same |
||||||
|
freedoms that you received. You must make sure that they, too, receive |
||||||
|
or can get the source code. And you must show them these terms so they |
||||||
|
know their rights. |
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps: |
||||||
|
(1) assert copyright on the software, and (2) offer you this License |
||||||
|
giving you legal permission to copy, distribute and/or modify it. |
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains |
||||||
|
that there is no warranty for this free software. For both users' and |
||||||
|
authors' sake, the GPL requires that modified versions be marked as |
||||||
|
changed, so that their problems will not be attributed erroneously to |
||||||
|
authors of previous versions. |
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run |
||||||
|
modified versions of the software inside them, although the manufacturer |
||||||
|
can do so. This is fundamentally incompatible with the aim of |
||||||
|
protecting users' freedom to change the software. The systematic |
||||||
|
pattern of such abuse occurs in the area of products for individuals to |
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we |
||||||
|
have designed this version of the GPL to prohibit the practice for those |
||||||
|
products. If such problems arise substantially in other domains, we |
||||||
|
stand ready to extend this provision to those domains in future versions |
||||||
|
of the GPL, as needed to protect the freedom of users. |
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents. |
||||||
|
States should not allow patents to restrict development and use of |
||||||
|
software on general-purpose computers, but in those that do, we wish to |
||||||
|
avoid the special danger that patents applied to a free program could |
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that |
||||||
|
patents cannot be used to render the program non-free. |
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and |
||||||
|
modification follow. |
||||||
|
|
||||||
|
TERMS AND CONDITIONS |
||||||
|
|
||||||
|
0. Definitions. |
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License. |
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of |
||||||
|
works, such as semiconductor masks. |
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this |
||||||
|
License. Each licensee is addressed as "you". "Licensees" and |
||||||
|
"recipients" may be individuals or organizations. |
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work |
||||||
|
in a fashion requiring copyright permission, other than the making of an |
||||||
|
exact copy. The resulting work is called a "modified version" of the |
||||||
|
earlier work or a work "based on" the earlier work. |
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based |
||||||
|
on the Program. |
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without |
||||||
|
permission, would make you directly or secondarily liable for |
||||||
|
infringement under applicable copyright law, except executing it on a |
||||||
|
computer or modifying a private copy. Propagation includes copying, |
||||||
|
distribution (with or without modification), making available to the |
||||||
|
public, and in some countries other activities as well. |
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other |
||||||
|
parties to make or receive copies. Mere interaction with a user through |
||||||
|
a computer network, with no transfer of a copy, is not conveying. |
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices" |
||||||
|
to the extent that it includes a convenient and prominently visible |
||||||
|
feature that (1) displays an appropriate copyright notice, and (2) |
||||||
|
tells the user that there is no warranty for the work (except to the |
||||||
|
extent that warranties are provided), that licensees may convey the |
||||||
|
work under this License, and how to view a copy of this License. If |
||||||
|
the interface presents a list of user commands or options, such as a |
||||||
|
menu, a prominent item in the list meets this criterion. |
||||||
|
|
||||||
|
1. Source Code. |
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work |
||||||
|
for making modifications to it. "Object code" means any non-source |
||||||
|
form of a work. |
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official |
||||||
|
standard defined by a recognized standards body, or, in the case of |
||||||
|
interfaces specified for a particular programming language, one that |
||||||
|
is widely used among developers working in that language. |
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other |
||||||
|
than the work as a whole, that (a) is included in the normal form of |
||||||
|
packaging a Major Component, but which is not part of that Major |
||||||
|
Component, and (b) serves only to enable use of the work with that |
||||||
|
Major Component, or to implement a Standard Interface for which an |
||||||
|
implementation is available to the public in source code form. A |
||||||
|
"Major Component", in this context, means a major essential component |
||||||
|
(kernel, window system, and so on) of the specific operating system |
||||||
|
(if any) on which the executable work runs, or a compiler used to |
||||||
|
produce the work, or an object code interpreter used to run it. |
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all |
||||||
|
the source code needed to generate, install, and (for an executable |
||||||
|
work) run the object code and to modify the work, including scripts to |
||||||
|
control those activities. However, it does not include the work's |
||||||
|
System Libraries, or general-purpose tools or generally available free |
||||||
|
programs which are used unmodified in performing those activities but |
||||||
|
which are not part of the work. For example, Corresponding Source |
||||||
|
includes interface definition files associated with source files for |
||||||
|
the work, and the source code for shared libraries and dynamically |
||||||
|
linked subprograms that the work is specifically designed to require, |
||||||
|
such as by intimate data communication or control flow between those |
||||||
|
subprograms and other parts of the work. |
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users |
||||||
|
can regenerate automatically from other parts of the Corresponding |
||||||
|
Source. |
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that |
||||||
|
same work. |
||||||
|
|
||||||
|
2. Basic Permissions. |
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of |
||||||
|
copyright on the Program, and are irrevocable provided the stated |
||||||
|
conditions are met. This License explicitly affirms your unlimited |
||||||
|
permission to run the unmodified Program. The output from running a |
||||||
|
covered work is covered by this License only if the output, given its |
||||||
|
content, constitutes a covered work. This License acknowledges your |
||||||
|
rights of fair use or other equivalent, as provided by copyright law. |
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not |
||||||
|
convey, without conditions so long as your license otherwise remains |
||||||
|
in force. You may convey covered works to others for the sole purpose |
||||||
|
of having them make modifications exclusively for you, or provide you |
||||||
|
with facilities for running those works, provided that you comply with |
||||||
|
the terms of this License in conveying all material for which you do |
||||||
|
not control copyright. Those thus making or running the covered works |
||||||
|
for you must do so exclusively on your behalf, under your direction |
||||||
|
and control, on terms that prohibit them from making any copies of |
||||||
|
your copyrighted material outside their relationship with you. |
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under |
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10 |
||||||
|
makes it unnecessary. |
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law. |
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological |
||||||
|
measure under any applicable law fulfilling obligations under article |
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or |
||||||
|
similar laws prohibiting or restricting circumvention of such |
||||||
|
measures. |
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid |
||||||
|
circumvention of technological measures to the extent such circumvention |
||||||
|
is effected by exercising rights under this License with respect to |
||||||
|
the covered work, and you disclaim any intention to limit operation or |
||||||
|
modification of the work as a means of enforcing, against the work's |
||||||
|
users, your or third parties' legal rights to forbid circumvention of |
||||||
|
technological measures. |
||||||
|
|
||||||
|
4. Conveying Verbatim Copies. |
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you |
||||||
|
receive it, in any medium, provided that you conspicuously and |
||||||
|
appropriately publish on each copy an appropriate copyright notice; |
||||||
|
keep intact all notices stating that this License and any |
||||||
|
non-permissive terms added in accord with section 7 apply to the code; |
||||||
|
keep intact all notices of the absence of any warranty; and give all |
||||||
|
recipients a copy of this License along with the Program. |
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey, |
||||||
|
and you may offer support or warranty protection for a fee. |
||||||
|
|
||||||
|
5. Conveying Modified Source Versions. |
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to |
||||||
|
produce it from the Program, in the form of source code under the |
||||||
|
terms of section 4, provided that you also meet all of these conditions: |
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified |
||||||
|
it, and giving a relevant date. |
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is |
||||||
|
released under this License and any conditions added under section |
||||||
|
7. This requirement modifies the requirement in section 4 to |
||||||
|
"keep intact all notices". |
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this |
||||||
|
License to anyone who comes into possession of a copy. This |
||||||
|
License will therefore apply, along with any applicable section 7 |
||||||
|
additional terms, to the whole of the work, and all its parts, |
||||||
|
regardless of how they are packaged. This License gives no |
||||||
|
permission to license the work in any other way, but it does not |
||||||
|
invalidate such permission if you have separately received it. |
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display |
||||||
|
Appropriate Legal Notices; however, if the Program has interactive |
||||||
|
interfaces that do not display Appropriate Legal Notices, your |
||||||
|
work need not make them do so. |
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent |
||||||
|
works, which are not by their nature extensions of the covered work, |
||||||
|
and which are not combined with it such as to form a larger program, |
||||||
|
in or on a volume of a storage or distribution medium, is called an |
||||||
|
"aggregate" if the compilation and its resulting copyright are not |
||||||
|
used to limit the access or legal rights of the compilation's users |
||||||
|
beyond what the individual works permit. Inclusion of a covered work |
||||||
|
in an aggregate does not cause this License to apply to the other |
||||||
|
parts of the aggregate. |
||||||
|
|
||||||
|
6. Conveying Non-Source Forms. |
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms |
||||||
|
of sections 4 and 5, provided that you also convey the |
||||||
|
machine-readable Corresponding Source under the terms of this License, |
||||||
|
in one of these ways: |
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product |
||||||
|
(including a physical distribution medium), accompanied by the |
||||||
|
Corresponding Source fixed on a durable physical medium |
||||||
|
customarily used for software interchange. |
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product |
||||||
|
(including a physical distribution medium), accompanied by a |
||||||
|
written offer, valid for at least three years and valid for as |
||||||
|
long as you offer spare parts or customer support for that product |
||||||
|
model, to give anyone who possesses the object code either (1) a |
||||||
|
copy of the Corresponding Source for all the software in the |
||||||
|
product that is covered by this License, on a durable physical |
||||||
|
medium customarily used for software interchange, for a price no |
||||||
|
more than your reasonable cost of physically performing this |
||||||
|
conveying of source, or (2) access to copy the |
||||||
|
Corresponding Source from a network server at no charge. |
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the |
||||||
|
written offer to provide the Corresponding Source. This |
||||||
|
alternative is allowed only occasionally and noncommercially, and |
||||||
|
only if you received the object code with such an offer, in accord |
||||||
|
with subsection 6b. |
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated |
||||||
|
place (gratis or for a charge), and offer equivalent access to the |
||||||
|
Corresponding Source in the same way through the same place at no |
||||||
|
further charge. You need not require recipients to copy the |
||||||
|
Corresponding Source along with the object code. If the place to |
||||||
|
copy the object code is a network server, the Corresponding Source |
||||||
|
may be on a different server (operated by you or a third party) |
||||||
|
that supports equivalent copying facilities, provided you maintain |
||||||
|
clear directions next to the object code saying where to find the |
||||||
|
Corresponding Source. Regardless of what server hosts the |
||||||
|
Corresponding Source, you remain obligated to ensure that it is |
||||||
|
available for as long as needed to satisfy these requirements. |
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided |
||||||
|
you inform other peers where the object code and Corresponding |
||||||
|
Source of the work are being offered to the general public at no |
||||||
|
charge under subsection 6d. |
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded |
||||||
|
from the Corresponding Source as a System Library, need not be |
||||||
|
included in conveying the object code work. |
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any |
||||||
|
tangible personal property which is normally used for personal, family, |
||||||
|
or household purposes, or (2) anything designed or sold for incorporation |
||||||
|
into a dwelling. In determining whether a product is a consumer product, |
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular |
||||||
|
product received by a particular user, "normally used" refers to a |
||||||
|
typical or common use of that class of product, regardless of the status |
||||||
|
of the particular user or of the way in which the particular user |
||||||
|
actually uses, or expects or is expected to use, the product. A product |
||||||
|
is a consumer product regardless of whether the product has substantial |
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent |
||||||
|
the only significant mode of use of the product. |
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods, |
||||||
|
procedures, authorization keys, or other information required to install |
||||||
|
and execute modified versions of a covered work in that User Product from |
||||||
|
a modified version of its Corresponding Source. The information must |
||||||
|
suffice to ensure that the continued functioning of the modified object |
||||||
|
code is in no case prevented or interfered with solely because |
||||||
|
modification has been made. |
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or |
||||||
|
specifically for use in, a User Product, and the conveying occurs as |
||||||
|
part of a transaction in which the right of possession and use of the |
||||||
|
User Product is transferred to the recipient in perpetuity or for a |
||||||
|
fixed term (regardless of how the transaction is characterized), the |
||||||
|
Corresponding Source conveyed under this section must be accompanied |
||||||
|
by the Installation Information. But this requirement does not apply |
||||||
|
if neither you nor any third party retains the ability to install |
||||||
|
modified object code on the User Product (for example, the work has |
||||||
|
been installed in ROM). |
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a |
||||||
|
requirement to continue to provide support service, warranty, or updates |
||||||
|
for a work that has been modified or installed by the recipient, or for |
||||||
|
the User Product in which it has been modified or installed. Access to a |
||||||
|
network may be denied when the modification itself materially and |
||||||
|
adversely affects the operation of the network or violates the rules and |
||||||
|
protocols for communication across the network. |
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided, |
||||||
|
in accord with this section must be in a format that is publicly |
||||||
|
documented (and with an implementation available to the public in |
||||||
|
source code form), and must require no special password or key for |
||||||
|
unpacking, reading or copying. |
||||||
|
|
||||||
|
7. Additional Terms. |
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this |
||||||
|
License by making exceptions from one or more of its conditions. |
||||||
|
Additional permissions that are applicable to the entire Program shall |
||||||
|
be treated as though they were included in this License, to the extent |
||||||
|
that they are valid under applicable law. If additional permissions |
||||||
|
apply only to part of the Program, that part may be used separately |
||||||
|
under those permissions, but the entire Program remains governed by |
||||||
|
this License without regard to the additional permissions. |
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option |
||||||
|
remove any additional permissions from that copy, or from any part of |
||||||
|
it. (Additional permissions may be written to require their own |
||||||
|
removal in certain cases when you modify the work.) You may place |
||||||
|
additional permissions on material, added by you to a covered work, |
||||||
|
for which you have or can give appropriate copyright permission. |
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you |
||||||
|
add to a covered work, you may (if authorized by the copyright holders of |
||||||
|
that material) supplement the terms of this License with terms: |
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the |
||||||
|
terms of sections 15 and 16 of this License; or |
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or |
||||||
|
author attributions in that material or in the Appropriate Legal |
||||||
|
Notices displayed by works containing it; or |
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or |
||||||
|
requiring that modified versions of such material be marked in |
||||||
|
reasonable ways as different from the original version; or |
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or |
||||||
|
authors of the material; or |
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some |
||||||
|
trade names, trademarks, or service marks; or |
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that |
||||||
|
material by anyone who conveys the material (or modified versions of |
||||||
|
it) with contractual assumptions of liability to the recipient, for |
||||||
|
any liability that these contractual assumptions directly impose on |
||||||
|
those licensors and authors. |
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further |
||||||
|
restrictions" within the meaning of section 10. If the Program as you |
||||||
|
received it, or any part of it, contains a notice stating that it is |
||||||
|
governed by this License along with a term that is a further |
||||||
|
restriction, you may remove that term. If a license document contains |
||||||
|
a further restriction but permits relicensing or conveying under this |
||||||
|
License, you may add to a covered work material governed by the terms |
||||||
|
of that license document, provided that the further restriction does |
||||||
|
not survive such relicensing or conveying. |
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you |
||||||
|
must place, in the relevant source files, a statement of the |
||||||
|
additional terms that apply to those files, or a notice indicating |
||||||
|
where to find the applicable terms. |
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the |
||||||
|
form of a separately written license, or stated as exceptions; |
||||||
|
the above requirements apply either way. |
||||||
|
|
||||||
|
8. Termination. |
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly |
||||||
|
provided under this License. Any attempt otherwise to propagate or |
||||||
|
modify it is void, and will automatically terminate your rights under |
||||||
|
this License (including any patent licenses granted under the third |
||||||
|
paragraph of section 11). |
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your |
||||||
|
license from a particular copyright holder is reinstated (a) |
||||||
|
provisionally, unless and until the copyright holder explicitly and |
||||||
|
finally terminates your license, and (b) permanently, if the copyright |
||||||
|
holder fails to notify you of the violation by some reasonable means |
||||||
|
prior to 60 days after the cessation. |
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is |
||||||
|
reinstated permanently if the copyright holder notifies you of the |
||||||
|
violation by some reasonable means, this is the first time you have |
||||||
|
received notice of violation of this License (for any work) from that |
||||||
|
copyright holder, and you cure the violation prior to 30 days after |
||||||
|
your receipt of the notice. |
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the |
||||||
|
licenses of parties who have received copies or rights from you under |
||||||
|
this License. If your rights have been terminated and not permanently |
||||||
|
reinstated, you do not qualify to receive new licenses for the same |
||||||
|
material under section 10. |
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies. |
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or |
||||||
|
run a copy of the Program. Ancillary propagation of a covered work |
||||||
|
occurring solely as a consequence of using peer-to-peer transmission |
||||||
|
to receive a copy likewise does not require acceptance. However, |
||||||
|
nothing other than this License grants you permission to propagate or |
||||||
|
modify any covered work. These actions infringe copyright if you do |
||||||
|
not accept this License. Therefore, by modifying or propagating a |
||||||
|
covered work, you indicate your acceptance of this License to do so. |
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients. |
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically |
||||||
|
receives a license from the original licensors, to run, modify and |
||||||
|
propagate that work, subject to this License. You are not responsible |
||||||
|
for enforcing compliance by third parties with this License. |
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an |
||||||
|
organization, or substantially all assets of one, or subdividing an |
||||||
|
organization, or merging organizations. If propagation of a covered |
||||||
|
work results from an entity transaction, each party to that |
||||||
|
transaction who receives a copy of the work also receives whatever |
||||||
|
licenses to the work the party's predecessor in interest had or could |
||||||
|
give under the previous paragraph, plus a right to possession of the |
||||||
|
Corresponding Source of the work from the predecessor in interest, if |
||||||
|
the predecessor has it or can get it with reasonable efforts. |
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the |
||||||
|
rights granted or affirmed under this License. For example, you may |
||||||
|
not impose a license fee, royalty, or other charge for exercise of |
||||||
|
rights granted under this License, and you may not initiate litigation |
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that |
||||||
|
any patent claim is infringed by making, using, selling, offering for |
||||||
|
sale, or importing the Program or any portion of it. |
||||||
|
|
||||||
|
11. Patents. |
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this |
||||||
|
License of the Program or a work on which the Program is based. The |
||||||
|
work thus licensed is called the contributor's "contributor version". |
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims |
||||||
|
owned or controlled by the contributor, whether already acquired or |
||||||
|
hereafter acquired, that would be infringed by some manner, permitted |
||||||
|
by this License, of making, using, or selling its contributor version, |
||||||
|
but do not include claims that would be infringed only as a |
||||||
|
consequence of further modification of the contributor version. For |
||||||
|
purposes of this definition, "control" includes the right to grant |
||||||
|
patent sublicenses in a manner consistent with the requirements of |
||||||
|
this License. |
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free |
||||||
|
patent license under the contributor's essential patent claims, to |
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and |
||||||
|
propagate the contents of its contributor version. |
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express |
||||||
|
agreement or commitment, however denominated, not to enforce a patent |
||||||
|
(such as an express permission to practice a patent or covenant not to |
||||||
|
sue for patent infringement). To "grant" such a patent license to a |
||||||
|
party means to make such an agreement or commitment not to enforce a |
||||||
|
patent against the party. |
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license, |
||||||
|
and the Corresponding Source of the work is not available for anyone |
||||||
|
to copy, free of charge and under the terms of this License, through a |
||||||
|
publicly available network server or other readily accessible means, |
||||||
|
then you must either (1) cause the Corresponding Source to be so |
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the |
||||||
|
patent license for this particular work, or (3) arrange, in a manner |
||||||
|
consistent with the requirements of this License, to extend the patent |
||||||
|
license to downstream recipients. "Knowingly relying" means you have |
||||||
|
actual knowledge that, but for the patent license, your conveying the |
||||||
|
covered work in a country, or your recipient's use of the covered work |
||||||
|
in a country, would infringe one or more identifiable patents in that |
||||||
|
country that you have reason to believe are valid. |
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or |
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a |
||||||
|
covered work, and grant a patent license to some of the parties |
||||||
|
receiving the covered work authorizing them to use, propagate, modify |
||||||
|
or convey a specific copy of the covered work, then the patent license |
||||||
|
you grant is automatically extended to all recipients of the covered |
||||||
|
work and works based on it. |
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within |
||||||
|
the scope of its coverage, prohibits the exercise of, or is |
||||||
|
conditioned on the non-exercise of one or more of the rights that are |
||||||
|
specifically granted under this License. You may not convey a covered |
||||||
|
work if you are a party to an arrangement with a third party that is |
||||||
|
in the business of distributing software, under which you make payment |
||||||
|
to the third party based on the extent of your activity of conveying |
||||||
|
the work, and under which the third party grants, to any of the |
||||||
|
parties who would receive the covered work from you, a discriminatory |
||||||
|
patent license (a) in connection with copies of the covered work |
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily |
||||||
|
for and in connection with specific products or compilations that |
||||||
|
contain the covered work, unless you entered into that arrangement, |
||||||
|
or that patent license was granted, prior to 28 March 2007. |
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting |
||||||
|
any implied license or other defenses to infringement that may |
||||||
|
otherwise be available to you under applicable patent law. |
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom. |
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or |
||||||
|
otherwise) that contradict the conditions of this License, they do not |
||||||
|
excuse you from the conditions of this License. If you cannot convey a |
||||||
|
covered work so as to satisfy simultaneously your obligations under this |
||||||
|
License and any other pertinent obligations, then as a consequence you may |
||||||
|
not convey it at all. For example, if you agree to terms that obligate you |
||||||
|
to collect a royalty for further conveying from those to whom you convey |
||||||
|
the Program, the only way you could satisfy both those terms and this |
||||||
|
License would be to refrain entirely from conveying the Program. |
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License. |
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have |
||||||
|
permission to link or combine any covered work with a work licensed |
||||||
|
under version 3 of the GNU Affero General Public License into a single |
||||||
|
combined work, and to convey the resulting work. The terms of this |
||||||
|
License will continue to apply to the part which is the covered work, |
||||||
|
but the special requirements of the GNU Affero General Public License, |
||||||
|
section 13, concerning interaction through a network will apply to the |
||||||
|
combination as such. |
||||||
|
|
||||||
|
14. Revised Versions of this License. |
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of |
||||||
|
the GNU General Public License from time to time. Such new versions will |
||||||
|
be similar in spirit to the present version, but may differ in detail to |
||||||
|
address new problems or concerns. |
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the |
||||||
|
Program specifies that a certain numbered version of the GNU General |
||||||
|
Public License "or any later version" applies to it, you have the |
||||||
|
option of following the terms and conditions either of that numbered |
||||||
|
version or of any later version published by the Free Software |
||||||
|
Foundation. If the Program does not specify a version number of the |
||||||
|
GNU General Public License, you may choose any version ever published |
||||||
|
by the Free Software Foundation. |
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future |
||||||
|
versions of the GNU General Public License can be used, that proxy's |
||||||
|
public statement of acceptance of a version permanently authorizes you |
||||||
|
to choose that version for the Program. |
||||||
|
|
||||||
|
Later license versions may give you additional or different |
||||||
|
permissions. However, no additional obligations are imposed on any |
||||||
|
author or copyright holder as a result of your choosing to follow a |
||||||
|
later version. |
||||||
|
|
||||||
|
15. Disclaimer of Warranty. |
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY |
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT |
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY |
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, |
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM |
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF |
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION. |
||||||
|
|
||||||
|
16. Limitation of Liability. |
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING |
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS |
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY |
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE |
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF |
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD |
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), |
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF |
||||||
|
SUCH DAMAGES. |
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16. |
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided |
||||||
|
above cannot be given local legal effect according to their terms, |
||||||
|
reviewing courts shall apply local law that most closely approximates |
||||||
|
an absolute waiver of all civil liability in connection with the |
||||||
|
Program, unless a warranty or assumption of liability accompanies a |
||||||
|
copy of the Program in return for a fee. |
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS |
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs |
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest |
||||||
|
possible use to the public, the best way to achieve this is to make it |
||||||
|
free software which everyone can redistribute and change under these terms. |
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest |
||||||
|
to attach them to the start of each source file to most effectively |
||||||
|
state the exclusion of warranty; and each file should have at least |
||||||
|
the "copyright" line and a pointer to where the full notice is found. |
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.> |
||||||
|
Copyright (C) <year> <name of author> |
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify |
||||||
|
it under the terms of the GNU General Public License as published by |
||||||
|
the Free Software Foundation, either version 3 of the License, or |
||||||
|
(at your option) any later version. |
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful, |
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||||
|
GNU General Public License for more details. |
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License |
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>. |
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail. |
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short |
||||||
|
notice like this when it starts in an interactive mode: |
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author> |
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. |
||||||
|
This is free software, and you are welcome to redistribute it |
||||||
|
under certain conditions; type `show c' for details. |
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate |
||||||
|
parts of the General Public License. Of course, your program's commands |
||||||
|
might be different; for a GUI interface, you would use an "about box". |
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school, |
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary. |
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see |
||||||
|
<https://www.gnu.org/licenses/>. |
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program |
||||||
|
into proprietary programs. If your program is a subroutine library, you |
||||||
|
may consider it more useful to permit linking proprietary applications with |
||||||
|
the library. If this is what you want to do, use the GNU Lesser General |
||||||
|
Public License instead of this License. But first, please read |
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>. |
@ -0,0 +1,2 @@ |
|||||||
|
# ChatWars Spy |
||||||
|
Ищет пользователей ChatWars из информации о дуэлях, предложениях и сделках на бирже. |
@ -0,0 +1,170 @@ |
|||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"encoding/json" |
||||||
|
"gitea.russia9.dev/Russia9/chatwars-spy/internal/utils" |
||||||
|
"gitea.russia9.dev/Russia9/chatwars-spy/pkg/domain" |
||||||
|
"gitea.russia9.dev/Russia9/chatwars-spy/pkg/message" |
||||||
|
"gitea.russia9.dev/Russia9/chatwars-spy/pkg/storage/mongodb" |
||||||
|
"github.com/confluentinc/confluent-kafka-go/kafka" |
||||||
|
"github.com/google/uuid" |
||||||
|
"github.com/rs/zerolog" |
||||||
|
"github.com/rs/zerolog/log" |
||||||
|
"go.mongodb.org/mongo-driver/mongo" |
||||||
|
"go.mongodb.org/mongo-driver/mongo/options" |
||||||
|
"os" |
||||||
|
"strconv" |
||||||
|
) |
||||||
|
|
||||||
|
func main() { |
||||||
|
// Log settings
|
||||||
|
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix |
||||||
|
pretty, err := strconv.ParseBool(os.Getenv("LOG_PRETTY")) |
||||||
|
if err != nil { |
||||||
|
pretty = false |
||||||
|
} |
||||||
|
if pretty { |
||||||
|
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) |
||||||
|
} |
||||||
|
switch os.Getenv("LOG_LEVEL") { |
||||||
|
case "DISABLED": |
||||||
|
zerolog.SetGlobalLevel(zerolog.Disabled) |
||||||
|
case "PANIC": |
||||||
|
zerolog.SetGlobalLevel(zerolog.PanicLevel) |
||||||
|
case "FATAL": |
||||||
|
zerolog.SetGlobalLevel(zerolog.FatalLevel) |
||||||
|
case "ERROR": |
||||||
|
zerolog.SetGlobalLevel(zerolog.ErrorLevel) |
||||||
|
case "WARN": |
||||||
|
zerolog.SetGlobalLevel(zerolog.WarnLevel) |
||||||
|
case "DEBUG": |
||||||
|
zerolog.SetGlobalLevel(zerolog.DebugLevel) |
||||||
|
case "TRACE": |
||||||
|
zerolog.SetGlobalLevel(zerolog.TraceLevel) |
||||||
|
default: |
||||||
|
zerolog.SetGlobalLevel(zerolog.InfoLevel) |
||||||
|
} |
||||||
|
|
||||||
|
version := utils.GetEnv("CHATWARS_VERSION", "cw3") |
||||||
|
|
||||||
|
// Kafka consumer init
|
||||||
|
consumer, err := kafka.NewConsumer(&kafka.ConfigMap{ |
||||||
|
"bootstrap.servers": utils.GetEnv("KAFKA_ADDRESS", "digest-api.chtwrs.com:9092"), |
||||||
|
"group.id": version + "-" + uuid.New().String(), |
||||||
|
"auto.offset.reset": "latest", |
||||||
|
}) |
||||||
|
if err != nil { |
||||||
|
log.Fatal().Err(err).Send() |
||||||
|
} |
||||||
|
|
||||||
|
// DB init
|
||||||
|
client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(os.Getenv("MONGO_URI"))) |
||||||
|
if err != nil { |
||||||
|
log.Fatal().Err(err).Send() |
||||||
|
} |
||||||
|
defer client.Disconnect(context.Background()) |
||||||
|
err = client.Ping(context.Background(), nil) |
||||||
|
if err != nil { |
||||||
|
log.Fatal().Err(err).Send() |
||||||
|
} |
||||||
|
db := client.Database(utils.GetEnv("MONGO_DB", "chatwars")) |
||||||
|
repo := mongodb.NewUserRepo(db) |
||||||
|
|
||||||
|
// Kafka subscribe
|
||||||
|
err = consumer.SubscribeTopics([]string{version + "-offers", version + "-deals", version + "-duels"}, nil) |
||||||
|
if err != nil { |
||||||
|
log.Fatal().Err(err).Send() |
||||||
|
} |
||||||
|
|
||||||
|
log.Info().Msg("Started watching") |
||||||
|
|
||||||
|
// Get messages
|
||||||
|
for { |
||||||
|
// Read message
|
||||||
|
raw, err := consumer.ReadMessage(-1) |
||||||
|
if err != nil { |
||||||
|
log.Error().Str("source", "consumer").Err(err).Send() |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
log.Trace().Bytes("message", raw.Value).Msg("received message") |
||||||
|
|
||||||
|
switch *raw.TopicPartition.Topic { |
||||||
|
case version + "-offers": // Offers
|
||||||
|
// Decode message
|
||||||
|
var msg message.Offer |
||||||
|
err = json.Unmarshal(raw.Value, &msg) |
||||||
|
if err != nil { |
||||||
|
log.Warn().Err(err).Str("topic", *raw.TopicPartition.Topic).Send() |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// Store user
|
||||||
|
_ = repo.Store(context.Background(), |
||||||
|
&domain.User{ |
||||||
|
ID: msg.SellerID, |
||||||
|
Castle: msg.SellerCastle, |
||||||
|
Name: msg.SellerName, |
||||||
|
Source: "offers", |
||||||
|
}, |
||||||
|
) |
||||||
|
case version + "-duels": // Duels
|
||||||
|
// Decode message
|
||||||
|
var msg message.Duel |
||||||
|
err = json.Unmarshal(raw.Value, &msg) |
||||||
|
if err != nil { |
||||||
|
log.Warn().Err(err).Str("topic", *raw.TopicPartition.Topic).Send() |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// Store users
|
||||||
|
_ = repo.Store(context.Background(), |
||||||
|
&domain.User{ |
||||||
|
ID: msg.Winner.ID, |
||||||
|
Castle: msg.Winner.Castle, |
||||||
|
Guild: msg.Winner.Tag, |
||||||
|
Name: msg.Winner.Name, |
||||||
|
Level: msg.Winner.Level, |
||||||
|
Source: "duels", |
||||||
|
}, |
||||||
|
) |
||||||
|
_ = repo.Store(context.Background(), |
||||||
|
&domain.User{ |
||||||
|
ID: msg.Loser.ID, |
||||||
|
Castle: msg.Loser.Castle, |
||||||
|
Guild: msg.Loser.Tag, |
||||||
|
Name: msg.Loser.Name, |
||||||
|
Level: msg.Loser.Level, |
||||||
|
Source: "duels", |
||||||
|
}, |
||||||
|
) |
||||||
|
case version + "-deals": // Deals
|
||||||
|
// Decode message
|
||||||
|
var msg message.Deal |
||||||
|
err = json.Unmarshal(raw.Value, &msg) |
||||||
|
if err != nil { |
||||||
|
log.Warn().Err(err).Str("topic", *raw.TopicPartition.Topic).Send() |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// Store users
|
||||||
|
_ = repo.Store(context.Background(), |
||||||
|
&domain.User{ |
||||||
|
ID: msg.SellerID, |
||||||
|
Castle: msg.SellerCastle, |
||||||
|
Name: msg.SellerName, |
||||||
|
Source: "deals", |
||||||
|
}, |
||||||
|
) |
||||||
|
_ = repo.Store(context.Background(), |
||||||
|
&domain.User{ |
||||||
|
ID: msg.BuyerID, |
||||||
|
Castle: msg.BuyerCastle, |
||||||
|
Name: msg.BuyerName, |
||||||
|
Source: "deals", |
||||||
|
}, |
||||||
|
) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,26 @@ |
|||||||
|
module gitea.russia9.dev/Russia9/chatwars-spy |
||||||
|
|
||||||
|
go 1.18 |
||||||
|
|
||||||
|
require ( |
||||||
|
github.com/confluentinc/confluent-kafka-go v1.8.2 |
||||||
|
github.com/google/uuid v1.3.0 |
||||||
|
github.com/rs/zerolog v1.26.1 |
||||||
|
go.mongodb.org/mongo-driver v1.9.0 |
||||||
|
) |
||||||
|
|
||||||
|
require ( |
||||||
|
github.com/go-stack/stack v1.8.1 // indirect |
||||||
|
github.com/golang/snappy v0.0.4 // indirect |
||||||
|
github.com/klauspost/compress v1.15.1 // indirect |
||||||
|
github.com/pkg/errors v0.9.1 // indirect |
||||||
|
github.com/stretchr/testify v1.7.0 // indirect |
||||||
|
github.com/xdg-go/pbkdf2 v1.0.0 // indirect |
||||||
|
github.com/xdg-go/scram v1.1.1 // indirect |
||||||
|
github.com/xdg-go/stringprep v1.0.3 // indirect |
||||||
|
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect |
||||||
|
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect |
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect |
||||||
|
golang.org/x/text v0.3.7 // indirect |
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect |
||||||
|
) |
@ -0,0 +1,96 @@ |
|||||||
|
github.com/confluentinc/confluent-kafka-go v1.8.2 h1:PBdbvYpyOdFLehj8j+9ba7FL4c4Moxn79gy9cYKxG5E= |
||||||
|
github.com/confluentinc/confluent-kafka-go v1.8.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= |
||||||
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= |
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= |
||||||
|
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= |
||||||
|
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= |
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= |
||||||
|
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= |
||||||
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= |
||||||
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= |
||||||
|
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= |
||||||
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= |
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= |
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= |
||||||
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= |
||||||
|
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= |
||||||
|
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= |
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= |
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= |
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= |
||||||
|
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= |
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= |
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= |
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
||||||
|
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= |
||||||
|
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc= |
||||||
|
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc= |
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= |
||||||
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= |
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= |
||||||
|
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= |
||||||
|
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= |
||||||
|
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= |
||||||
|
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= |
||||||
|
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= |
||||||
|
github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= |
||||||
|
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= |
||||||
|
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= |
||||||
|
github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= |
||||||
|
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= |
||||||
|
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= |
||||||
|
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= |
||||||
|
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= |
||||||
|
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= |
||||||
|
go.mongodb.org/mongo-driver v1.9.0 h1:f3aLGJvQmBl8d9S40IL+jEyBC6hfLPbJjv9t5hEM9ck= |
||||||
|
go.mongodb.org/mongo-driver v1.9.0/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= |
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= |
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= |
||||||
|
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= |
||||||
|
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= |
||||||
|
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= |
||||||
|
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= |
||||||
|
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= |
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= |
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= |
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= |
||||||
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= |
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= |
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= |
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||||
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= |
||||||
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= |
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= |
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= |
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= |
||||||
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= |
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= |
||||||
|
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= |
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= |
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= |
||||||
|
golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= |
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= |
||||||
|
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= |
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= |
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= |
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= |
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
@ -0,0 +1,10 @@ |
|||||||
|
package utils |
||||||
|
|
||||||
|
import "os" |
||||||
|
|
||||||
|
func GetEnv(key, fallback string) string { |
||||||
|
if value, ok := os.LookupEnv(key); ok { |
||||||
|
return value |
||||||
|
} |
||||||
|
return fallback |
||||||
|
} |
@ -0,0 +1,22 @@ |
|||||||
|
package domain |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
type User struct { |
||||||
|
ID string `bson:"_id"` |
||||||
|
Castle string |
||||||
|
Guild string |
||||||
|
Name string |
||||||
|
Level int |
||||||
|
Source string |
||||||
|
|
||||||
|
FirstSeen time.Time |
||||||
|
LastSeen time.Time |
||||||
|
} |
||||||
|
|
||||||
|
type UserRepo interface { |
||||||
|
Store(ctx context.Context, object *User) error |
||||||
|
} |
@ -0,0 +1,13 @@ |
|||||||
|
package message |
||||||
|
|
||||||
|
type Deal struct { |
||||||
|
SellerID string `json:"sellerId"` |
||||||
|
SellerCastle string `json:"sellerCastle"` |
||||||
|
SellerName string `json:"sellerName"` |
||||||
|
BuyerID string `json:"buyerId"` |
||||||
|
BuyerCastle string `json:"buyerCastle"` |
||||||
|
BuyerName string `json:"buyerName"` |
||||||
|
Item string `json:"item"` |
||||||
|
Quantity int `json:"qty"` |
||||||
|
Price int `json:"price"` |
||||||
|
} |
@ -0,0 +1,17 @@ |
|||||||
|
package message |
||||||
|
|
||||||
|
type Duel struct { |
||||||
|
Winner DuelUser `json:"winner"` |
||||||
|
Loser DuelUser `json:"loser"` |
||||||
|
IsChallenge bool `json:"isChallenge"` |
||||||
|
IsGuildDuel bool `json:"isGuildDuel"` |
||||||
|
} |
||||||
|
|
||||||
|
type DuelUser struct { |
||||||
|
ID string `json:"id"` |
||||||
|
Name string `json:"name"` |
||||||
|
Tag string `json:"tag,omitempty"` |
||||||
|
Castle string `json:"castle"` |
||||||
|
Level int `json:"level"` |
||||||
|
Health int `json:"hp"` |
||||||
|
} |
@ -0,0 +1,20 @@ |
|||||||
|
package message |
||||||
|
|
||||||
|
type Lot struct { |
||||||
|
LotID string `json:"lotId"` |
||||||
|
ItemName string `json:"itemName"` |
||||||
|
SellerTag string `json:"sellerTag"` |
||||||
|
SellerName string `json:"sellerName"` |
||||||
|
Quality string `json:"quality"` |
||||||
|
SellerCastle string `json:"sellerCastle"` |
||||||
|
Condition string `json:"condition"` |
||||||
|
EndAt string `json:"endAt"` |
||||||
|
StartAt string `json:"startedAt"` |
||||||
|
BuyerCastle string `json:"buyerCastle"` |
||||||
|
Status string `json:"status"` |
||||||
|
FinishedAt string `json:"finishedAt"` |
||||||
|
BuyerTag string `json:"buyerTag"` |
||||||
|
BuyerName string `json:"buyerName"` |
||||||
|
Price int `json:"price"` |
||||||
|
Stats map[string]int |
||||||
|
} |
@ -0,0 +1,10 @@ |
|||||||
|
package message |
||||||
|
|
||||||
|
type Offer struct { |
||||||
|
SellerID string `json:"sellerId"` |
||||||
|
SellerName string `json:"sellerName"` |
||||||
|
SellerCastle string `json:"sellerCastle"` |
||||||
|
Item string `json:"item"` |
||||||
|
Quantity int `json:"qty"` |
||||||
|
Price int `json:"price"` |
||||||
|
} |
@ -0,0 +1,23 @@ |
|||||||
|
package message |
||||||
|
|
||||||
|
type Store struct { |
||||||
|
Link string `json:"link"` |
||||||
|
Name string `json:"name"` |
||||||
|
OwnerName string `json:"ownerName"` |
||||||
|
OwnerCastle string `json:"ownerCastle"` |
||||||
|
Kind string `json:"kind"` |
||||||
|
Mana int `json:"mana"` |
||||||
|
Offers []StoreOffer `json:"offers"` |
||||||
|
Specialization map[string]int `json:"specialization,omitempty"` |
||||||
|
QualityCraftLevel int `json:"qualityCraftLevel"` |
||||||
|
MaintenanceEnabled bool `json:"maintenanceEnabled"` |
||||||
|
MaintenanceCost int `json:"maintenanceCost"` |
||||||
|
GuildDiscount int `json:"guildDiscount"` |
||||||
|
CastleDiscount int `json:"castleDiscount"` |
||||||
|
} |
||||||
|
|
||||||
|
type StoreOffer struct { |
||||||
|
Item string `json:"item"` |
||||||
|
Price int `json:"price"` |
||||||
|
Mana int `json:"mana"` |
||||||
|
} |
@ -0,0 +1,52 @@ |
|||||||
|
package mongodb |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"errors" |
||||||
|
"gitea.russia9.dev/Russia9/chatwars-spy/pkg/domain" |
||||||
|
"go.mongodb.org/mongo-driver/bson" |
||||||
|
"go.mongodb.org/mongo-driver/mongo" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
func NewUserRepo(database *mongo.Database) domain.UserRepo { |
||||||
|
return &userRepo{Collection: database.Collection("users")} |
||||||
|
} |
||||||
|
|
||||||
|
type userRepo struct { |
||||||
|
Collection *mongo.Collection |
||||||
|
} |
||||||
|
|
||||||
|
func (r *userRepo) Store(ctx context.Context, object *domain.User) error { |
||||||
|
cursor := r.Collection.FindOne(ctx, bson.M{"_id": object.ID}) |
||||||
|
if errors.Is(cursor.Err(), mongo.ErrNoDocuments) { |
||||||
|
object.FirstSeen = time.Now() |
||||||
|
object.LastSeen = time.Now() |
||||||
|
_, err := r.Collection.InsertOne(ctx, object) |
||||||
|
return err |
||||||
|
} else if cursor.Err() != nil { |
||||||
|
return cursor.Err() |
||||||
|
} |
||||||
|
|
||||||
|
// Get old user
|
||||||
|
var old domain.User |
||||||
|
err := cursor.Decode(&old) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// Get old guild if empty
|
||||||
|
if object.Guild == "" { |
||||||
|
object.Guild = old.Guild |
||||||
|
} |
||||||
|
|
||||||
|
// Set old source
|
||||||
|
object.Source = old.Source |
||||||
|
|
||||||
|
// Set LastSeen date
|
||||||
|
object.LastSeen = time.Now() |
||||||
|
|
||||||
|
// Update object in DB
|
||||||
|
_, err = r.Collection.ReplaceOne(ctx, bson.M{"_id": object.ID}, object) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,202 @@ |
|||||||
|
Apache License |
||||||
|
Version 2.0, January 2004 |
||||||
|
http://www.apache.org/licenses/ |
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||||
|
|
||||||
|
1. Definitions. |
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, |
||||||
|
and distribution as defined by Sections 1 through 9 of this document. |
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by |
||||||
|
the copyright owner that is granting the License. |
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all |
||||||
|
other entities that control, are controlled by, or are under common |
||||||
|
control with that entity. For the purposes of this definition, |
||||||
|
"control" means (i) the power, direct or indirect, to cause the |
||||||
|
direction or management of such entity, whether by contract or |
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity. |
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity |
||||||
|
exercising permissions granted by this License. |
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, |
||||||
|
including but not limited to software source code, documentation |
||||||
|
source, and configuration files. |
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical |
||||||
|
transformation or translation of a Source form, including but |
||||||
|
not limited to compiled object code, generated documentation, |
||||||
|
and conversions to other media types. |
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or |
||||||
|
Object form, made available under the License, as indicated by a |
||||||
|
copyright notice that is included in or attached to the work |
||||||
|
(an example is provided in the Appendix below). |
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object |
||||||
|
form, that is based on (or derived from) the Work and for which the |
||||||
|
editorial revisions, annotations, elaborations, or other modifications |
||||||
|
represent, as a whole, an original work of authorship. For the purposes |
||||||
|
of this License, Derivative Works shall not include works that remain |
||||||
|
separable from, or merely link (or bind by name) to the interfaces of, |
||||||
|
the Work and Derivative Works thereof. |
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including |
||||||
|
the original version of the Work and any modifications or additions |
||||||
|
to that Work or Derivative Works thereof, that is intentionally |
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner |
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of |
||||||
|
the copyright owner. For the purposes of this definition, "submitted" |
||||||
|
means any form of electronic, verbal, or written communication sent |
||||||
|
to the Licensor or its representatives, including but not limited to |
||||||
|
communication on electronic mailing lists, source code control systems, |
||||||
|
and issue tracking systems that are managed by, or on behalf of, the |
||||||
|
Licensor for the purpose of discussing and improving the Work, but |
||||||
|
excluding communication that is conspicuously marked or otherwise |
||||||
|
designated in writing by the copyright owner as "Not a Contribution." |
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||||
|
on behalf of whom a Contribution has been received by Licensor and |
||||||
|
subsequently incorporated within the Work. |
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
copyright license to reproduce, prepare Derivative Works of, |
||||||
|
publicly display, publicly perform, sublicense, and distribute the |
||||||
|
Work and such Derivative Works in Source or Object form. |
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
(except as stated in this section) patent license to make, have made, |
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||||
|
where such license applies only to those patent claims licensable |
||||||
|
by such Contributor that are necessarily infringed by their |
||||||
|
Contribution(s) alone or by combination of their Contribution(s) |
||||||
|
with the Work to which such Contribution(s) was submitted. If You |
||||||
|
institute patent litigation against any entity (including a |
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||||
|
or a Contribution incorporated within the Work constitutes direct |
||||||
|
or contributory patent infringement, then any patent licenses |
||||||
|
granted to You under this License for that Work shall terminate |
||||||
|
as of the date such litigation is filed. |
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the |
||||||
|
Work or Derivative Works thereof in any medium, with or without |
||||||
|
modifications, and in Source or Object form, provided that You |
||||||
|
meet the following conditions: |
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or |
||||||
|
Derivative Works a copy of this License; and |
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices |
||||||
|
stating that You changed the files; and |
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works |
||||||
|
that You distribute, all copyright, patent, trademark, and |
||||||
|
attribution notices from the Source form of the Work, |
||||||
|
excluding those notices that do not pertain to any part of |
||||||
|
the Derivative Works; and |
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its |
||||||
|
distribution, then any Derivative Works that You distribute must |
||||||
|
include a readable copy of the attribution notices contained |
||||||
|
within such NOTICE file, excluding those notices that do not |
||||||
|
pertain to any part of the Derivative Works, in at least one |
||||||
|
of the following places: within a NOTICE text file distributed |
||||||
|
as part of the Derivative Works; within the Source form or |
||||||
|
documentation, if provided along with the Derivative Works; or, |
||||||
|
within a display generated by the Derivative Works, if and |
||||||
|
wherever such third-party notices normally appear. The contents |
||||||
|
of the NOTICE file are for informational purposes only and |
||||||
|
do not modify the License. You may add Your own attribution |
||||||
|
notices within Derivative Works that You distribute, alongside |
||||||
|
or as an addendum to the NOTICE text from the Work, provided |
||||||
|
that such additional attribution notices cannot be construed |
||||||
|
as modifying the License. |
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and |
||||||
|
may provide additional or different license terms and conditions |
||||||
|
for use, reproduction, or distribution of Your modifications, or |
||||||
|
for any such Derivative Works as a whole, provided Your use, |
||||||
|
reproduction, and distribution of the Work otherwise complies with |
||||||
|
the conditions stated in this License. |
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||||
|
any Contribution intentionally submitted for inclusion in the Work |
||||||
|
by You to the Licensor shall be under the terms and conditions of |
||||||
|
this License, without any additional terms or conditions. |
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify |
||||||
|
the terms of any separate license agreement you may have executed |
||||||
|
with Licensor regarding such Contributions. |
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade |
||||||
|
names, trademarks, service marks, or product names of the Licensor, |
||||||
|
except as required for reasonable and customary use in describing the |
||||||
|
origin of the Work and reproducing the content of the NOTICE file. |
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or |
||||||
|
agreed to in writing, Licensor provides the Work (and each |
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||||
|
implied, including, without limitation, any warranties or conditions |
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||||
|
appropriateness of using or redistributing the Work and assume any |
||||||
|
risks associated with Your exercise of permissions under this License. |
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory, |
||||||
|
whether in tort (including negligence), contract, or otherwise, |
||||||
|
unless required by applicable law (such as deliberate and grossly |
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be |
||||||
|
liable to You for damages, including any direct, indirect, special, |
||||||
|
incidental, or consequential damages of any character arising as a |
||||||
|
result of this License or out of the use or inability to use the |
||||||
|
Work (including but not limited to damages for loss of goodwill, |
||||||
|
work stoppage, computer failure or malfunction, or any and all |
||||||
|
other commercial damages or losses), even if such Contributor |
||||||
|
has been advised of the possibility of such damages. |
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing |
||||||
|
the Work or Derivative Works thereof, You may choose to offer, |
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity, |
||||||
|
or other liability obligations and/or rights consistent with this |
||||||
|
License. However, in accepting such obligations, You may act only |
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf |
||||||
|
of any other Contributor, and only if You agree to indemnify, |
||||||
|
defend, and hold each Contributor harmless for any liability |
||||||
|
incurred by, or claims asserted against, such Contributor by reason |
||||||
|
of your accepting any such warranty or additional liability. |
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS |
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work. |
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following |
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}" |
||||||
|
replaced with your own identifying information. (Don't include |
||||||
|
the brackets!) The text should be enclosed in the appropriate |
||||||
|
comment syntax for the file format. We also recommend that a |
||||||
|
file or class name and description of purpose be included on the |
||||||
|
same "printed page" as the copyright notice for easier |
||||||
|
identification within third-party archives. |
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner} |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
||||||
|
|
@ -0,0 +1,2 @@ |
|||||||
|
testconf.json |
||||||
|
go_rdkafka_generr/go_rdkafka_generr |
@ -0,0 +1,58 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016-2019 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
//Minimum required librdkafka version. This is checked both during
|
||||||
|
//build-time and runtime.
|
||||||
|
//Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error
|
||||||
|
//defines and strings in sync.
|
||||||
|
//
|
||||||
|
|
||||||
|
#define MIN_RD_KAFKA_VERSION 0x01060000 |
||||||
|
|
||||||
|
#ifdef __APPLE__ |
||||||
|
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" |
||||||
|
#else |
||||||
|
#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" |
||||||
|
#endif |
||||||
|
|
||||||
|
#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION |
||||||
|
#ifdef __APPLE__ |
||||||
|
#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" |
||||||
|
#else |
||||||
|
#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
func versionCheck() error { |
||||||
|
ver, verstr := LibraryVersion() |
||||||
|
if ver < C.MIN_RD_KAFKA_VERSION { |
||||||
|
return newErrorFromString(ErrNotImplemented, |
||||||
|
fmt.Sprintf("%s: librdkafka version %s (0x%x) detected", |
||||||
|
C.MIN_VER_ERRSTR, verstr, ver)) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,145 @@ |
|||||||
|
# Information for confluent-kafka-go developers |
||||||
|
|
||||||
|
Whenever librdkafka error codes are updated make sure to run generate |
||||||
|
before building: |
||||||
|
|
||||||
|
``` |
||||||
|
$ make -f mk/Makefile generr |
||||||
|
$ go build ./... |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Testing |
||||||
|
|
||||||
|
Some of the tests included in this directory, the benchmark and integration tests in particular, |
||||||
|
require an existing Kafka cluster and a testconf.json configuration file to |
||||||
|
provide tests with bootstrap brokers, topic name, etc. |
||||||
|
|
||||||
|
The format of testconf.json is a JSON object: |
||||||
|
``` |
||||||
|
{ |
||||||
|
"Brokers": "<bootstrap-brokers>", |
||||||
|
"Topic": "<test-topic-name>" |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
See testconf-example.json for an example and full set of available options. |
||||||
|
|
||||||
|
|
||||||
|
To run unit-tests: |
||||||
|
``` |
||||||
|
$ go test |
||||||
|
``` |
||||||
|
|
||||||
|
To run benchmark tests: |
||||||
|
``` |
||||||
|
$ go test -bench . |
||||||
|
``` |
||||||
|
|
||||||
|
For the code coverage: |
||||||
|
``` |
||||||
|
$ go test -coverprofile=coverage.out -bench=. |
||||||
|
$ go tool cover -func=coverage.out |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
## Build tags |
||||||
|
|
||||||
|
Different build types are supported through Go build tags (`-tags ..`), |
||||||
|
these tags should be specified on the **application** build/get/install command. |
||||||
|
|
||||||
|
* By default the bundled platform-specific static build of librdkafka will |
||||||
|
be used. This works out of the box on Mac OSX and glibc-based Linux distros, |
||||||
|
such as Ubuntu and CentOS. |
||||||
|
* `-tags musl` - must be specified when building on/for musl-based Linux |
||||||
|
distros, such as Alpine. Will use the bundled static musl build of |
||||||
|
librdkafka. |
||||||
|
* `-tags dynamic` - link librdkafka dynamically. A shared librdkafka library |
||||||
|
must be installed manually through other means (apt-get, yum, build from |
||||||
|
source, etc). |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Generating HTML documentation |
||||||
|
|
||||||
|
To generate one-page HTML documentation run the mk/doc-gen.py script from the |
||||||
|
top-level directory. This script requires the beautifulsoup4 Python package. |
||||||
|
|
||||||
|
``` |
||||||
|
$ source .../your/virtualenv/bin/activate |
||||||
|
$ pip install beautifulsoup4 |
||||||
|
... |
||||||
|
$ make -f mk/Makefile docs |
||||||
|
``` |
||||||
|
|
||||||
|
|
||||||
|
## Release process |
||||||
|
|
||||||
|
For each release candidate and final release, perform the following steps: |
||||||
|
|
||||||
|
### Update bundle to latest librdkafka |
||||||
|
|
||||||
|
See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md). |
||||||
|
|
||||||
|
|
||||||
|
### Update librdkafka version requirement |
||||||
|
|
||||||
|
Update the minimum required librdkafka version in `kafka/00version.go` |
||||||
|
and `README.md`. |
||||||
|
|
||||||
|
|
||||||
|
### Update error codes |
||||||
|
|
||||||
|
Error codes can be automatically generated from the current librdkafka version. |
||||||
|
|
||||||
|
|
||||||
|
Update generated error codes: |
||||||
|
|
||||||
|
$ make -f mk/Makefile generr |
||||||
|
# Verify by building |
||||||
|
|
||||||
|
|
||||||
|
### Rebuild everything |
||||||
|
|
||||||
|
$ go clean -i ./... |
||||||
|
$ go build ./... |
||||||
|
|
||||||
|
|
||||||
|
### Run full test suite |
||||||
|
|
||||||
|
Set up a test cluster using whatever mechanism you typically use |
||||||
|
(docker, trivup, ccloud, ..). |
||||||
|
|
||||||
|
Make sure to update `kafka/testconf.json` as needed (broker list, $BROKERS) |
||||||
|
|
||||||
|
Run test suite: |
||||||
|
|
||||||
|
$ go test ./... |
||||||
|
|
||||||
|
|
||||||
|
### Verify examples |
||||||
|
|
||||||
|
Manually verify that the examples/ applications work. |
||||||
|
|
||||||
|
Also make sure the examples in README.md work. |
||||||
|
|
||||||
|
Convert any examples using `github.com/confluentinc/confluent-kafka-go/kafka` to use |
||||||
|
`gopkg.in/confluentinc/confluent-kafka-go.v1/kafka` import path. |
||||||
|
|
||||||
|
$ find examples/ -type f -name *\.go -exec sed -i -e 's|github\.com/confluentinc/confluent-kafka-go/kafka|gopkg\.in/confluentinc/confluent-kafka-go\.v1/kafka|g' {} + |
||||||
|
|
||||||
|
### Commit any changes |
||||||
|
|
||||||
|
Make sure to push to github before creating the tag to have CI tests pass. |
||||||
|
|
||||||
|
|
||||||
|
### Create and push tag |
||||||
|
|
||||||
|
$ git tag v1.3.0 |
||||||
|
$ git push --dry-run origin v1.3.0 |
||||||
|
# Remove --dry-run and re-execute if it looks ok. |
||||||
|
|
||||||
|
|
||||||
|
### Create release notes page on github |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,264 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2018 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include <stdlib.h> |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// AdminOptionOperationTimeout sets the broker's operation timeout, such as the
|
||||||
|
// timeout for CreateTopics to complete the creation of topics on the controller
|
||||||
|
// before returning a result to the application.
|
||||||
|
//
|
||||||
|
// CreateTopics, DeleteTopics, CreatePartitions:
|
||||||
|
// a value 0 will return immediately after triggering topic
|
||||||
|
// creation, while > 0 will wait this long for topic creation to propagate
|
||||||
|
// in cluster.
|
||||||
|
//
|
||||||
|
// Default: 0 (return immediately).
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
|
||||||
|
type AdminOptionOperationTimeout struct { |
||||||
|
isSet bool |
||||||
|
val time.Duration |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionOperationTimeout) supportsCreateTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionOperationTimeout) supportsDeleteTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionOperationTimeout) supportsCreatePartitions() { |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { |
||||||
|
if !ao.isSet { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_AdminOptions_set_operation_timeout( |
||||||
|
cOptions, C.int(durationToMilliseconds(ao.val)), |
||||||
|
cErrstr, cErrstrSize) |
||||||
|
if cErr != 0 { |
||||||
|
C.rd_kafka_AdminOptions_destroy(cOptions) |
||||||
|
return newCErrorFromString(cErr, |
||||||
|
fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr))) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetAdminOperationTimeout sets the broker's operation timeout, such as the
|
||||||
|
// timeout for CreateTopics to complete the creation of topics on the controller
|
||||||
|
// before returning a result to the application.
|
||||||
|
//
|
||||||
|
// CreateTopics, DeleteTopics, CreatePartitions:
|
||||||
|
// a value 0 will return immediately after triggering topic
|
||||||
|
// creation, while > 0 will wait this long for topic creation to propagate
|
||||||
|
// in cluster.
|
||||||
|
//
|
||||||
|
// Default: 0 (return immediately).
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, DeleteTopics, CreatePartitions.
|
||||||
|
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) { |
||||||
|
ao.isSet = true |
||||||
|
ao.val = t |
||||||
|
return ao |
||||||
|
} |
||||||
|
|
||||||
|
// AdminOptionRequestTimeout sets the overall request timeout, including broker
|
||||||
|
// lookup, request transmission, operation time on broker, and response.
|
||||||
|
//
|
||||||
|
// Default: `socket.timeout.ms`.
|
||||||
|
//
|
||||||
|
// Valid for all Admin API methods.
|
||||||
|
type AdminOptionRequestTimeout struct { |
||||||
|
isSet bool |
||||||
|
val time.Duration |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionRequestTimeout) supportsCreateTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsDeleteTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsCreatePartitions() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsAlterConfigs() { |
||||||
|
} |
||||||
|
func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() { |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { |
||||||
|
if !ao.isSet { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_AdminOptions_set_request_timeout( |
||||||
|
cOptions, C.int(durationToMilliseconds(ao.val)), |
||||||
|
cErrstr, cErrstrSize) |
||||||
|
if cErr != 0 { |
||||||
|
C.rd_kafka_AdminOptions_destroy(cOptions) |
||||||
|
return newCErrorFromString(cErr, |
||||||
|
fmt.Sprintf("%s", C.GoString(cErrstr))) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetAdminRequestTimeout sets the overall request timeout, including broker
|
||||||
|
// lookup, request transmission, operation time on broker, and response.
|
||||||
|
//
|
||||||
|
// Default: `socket.timeout.ms`.
|
||||||
|
//
|
||||||
|
// Valid for all Admin API methods.
|
||||||
|
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) { |
||||||
|
ao.isSet = true |
||||||
|
ao.val = t |
||||||
|
return ao |
||||||
|
} |
||||||
|
|
||||||
|
// AdminOptionValidateOnly tells the broker to only validate the request,
|
||||||
|
// without performing the requested operation (create topics, etc).
|
||||||
|
//
|
||||||
|
// Default: false.
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, CreatePartitions, AlterConfigs
|
||||||
|
type AdminOptionValidateOnly struct { |
||||||
|
isSet bool |
||||||
|
val bool |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionValidateOnly) supportsCreateTopics() { |
||||||
|
} |
||||||
|
func (ao AdminOptionValidateOnly) supportsCreatePartitions() { |
||||||
|
} |
||||||
|
func (ao AdminOptionValidateOnly) supportsAlterConfigs() { |
||||||
|
} |
||||||
|
|
||||||
|
func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error { |
||||||
|
if !ao.isSet { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_AdminOptions_set_validate_only( |
||||||
|
cOptions, bool2cint(ao.val), |
||||||
|
cErrstr, cErrstrSize) |
||||||
|
if cErr != 0 { |
||||||
|
C.rd_kafka_AdminOptions_destroy(cOptions) |
||||||
|
return newCErrorFromString(cErr, |
||||||
|
fmt.Sprintf("%s", C.GoString(cErrstr))) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetAdminValidateOnly tells the broker to only validate the request,
|
||||||
|
// without performing the requested operation (create topics, etc).
|
||||||
|
//
|
||||||
|
// Default: false.
|
||||||
|
//
|
||||||
|
// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs
|
||||||
|
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) { |
||||||
|
ao.isSet = true |
||||||
|
ao.val = validateOnly |
||||||
|
return ao |
||||||
|
} |
||||||
|
|
||||||
|
// CreateTopicsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
|
||||||
|
type CreateTopicsAdminOption interface { |
||||||
|
supportsCreateTopics() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// DeleteTopicsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminOperationTimeout.
|
||||||
|
type DeleteTopicsAdminOption interface { |
||||||
|
supportsDeleteTopics() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// CreatePartitionsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
|
||||||
|
type CreatePartitionsAdminOption interface { |
||||||
|
supportsCreatePartitions() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// AlterConfigsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental.
|
||||||
|
type AlterConfigsAdminOption interface { |
||||||
|
supportsAlterConfigs() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// DescribeConfigsAdminOption - see setters.
|
||||||
|
//
|
||||||
|
// See SetAdminRequestTimeout.
|
||||||
|
type DescribeConfigsAdminOption interface { |
||||||
|
supportsDescribeConfigs() |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
// AdminOption is a generic type not to be used directly.
|
||||||
|
//
|
||||||
|
// See CreateTopicsAdminOption et.al.
|
||||||
|
type AdminOption interface { |
||||||
|
apply(cOptions *C.rd_kafka_AdminOptions_t) error |
||||||
|
} |
||||||
|
|
||||||
|
func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) { |
||||||
|
|
||||||
|
cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType) |
||||||
|
for _, opt := range options { |
||||||
|
if opt == nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
err := opt.apply(cOptions) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return cOptions, nil |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin.a -lm -lsasl2 -ldl -lpthread
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static darwin from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,9 @@ |
|||||||
|
// +build dynamic
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo pkg-config: rdkafka
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "dynamically linked to librdkafka" |
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
// +build !musl
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux.a -lm -ldl -lpthread -lrt
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
// +build musl
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux.a -lm -ldl -lpthread -lrt -lpthread -lrt
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,13 @@ |
|||||||
|
// +build !dynamic
|
||||||
|
|
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT.
|
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB
|
||||||
|
// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_windows.a -lws2_32 -lsecur32 -lcrypt32
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client
|
||||||
|
const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.8.2.tgz" |
@ -0,0 +1,280 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"reflect" |
||||||
|
"strings" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// ConfigValue supports the following types:
|
||||||
|
// bool, int, string, any type with the standard String() interface
|
||||||
|
type ConfigValue interface{} |
||||||
|
|
||||||
|
// ConfigMap is a map containing standard librdkafka configuration properties as documented in:
|
||||||
|
// https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md
|
||||||
|
//
|
||||||
|
// The special property "default.topic.config" (optional) is a ConfigMap
|
||||||
|
// containing default topic configuration properties.
|
||||||
|
//
|
||||||
|
// The use of "default.topic.config" is deprecated,
|
||||||
|
// topic configuration properties shall be specified in the standard ConfigMap.
|
||||||
|
// For backwards compatibility, "default.topic.config" (if supplied)
|
||||||
|
// takes precedence.
|
||||||
|
type ConfigMap map[string]ConfigValue |
||||||
|
|
||||||
|
// SetKey sets configuration property key to value.
|
||||||
|
//
|
||||||
|
// For user convenience a key prefixed with {topic}. will be
|
||||||
|
// set on the "default.topic.config" sub-map, this use is deprecated.
|
||||||
|
func (m ConfigMap) SetKey(key string, value ConfigValue) error { |
||||||
|
if strings.HasPrefix(key, "{topic}.") { |
||||||
|
_, found := m["default.topic.config"] |
||||||
|
if !found { |
||||||
|
m["default.topic.config"] = ConfigMap{} |
||||||
|
} |
||||||
|
m["default.topic.config"].(ConfigMap)[strings.TrimPrefix(key, "{topic}.")] = value |
||||||
|
} else { |
||||||
|
m[key] = value |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Set implements flag.Set (command line argument parser) as a convenience
|
||||||
|
// for `-X key=value` config.
|
||||||
|
func (m ConfigMap) Set(kv string) error { |
||||||
|
i := strings.Index(kv, "=") |
||||||
|
if i == -1 { |
||||||
|
return newErrorFromString(ErrInvalidArg, "Expected key=value") |
||||||
|
} |
||||||
|
|
||||||
|
k := kv[:i] |
||||||
|
v := kv[i+1:] |
||||||
|
|
||||||
|
return m.SetKey(k, v) |
||||||
|
} |
||||||
|
|
||||||
|
func value2string(v ConfigValue) (ret string, errstr string) { |
||||||
|
|
||||||
|
switch x := v.(type) { |
||||||
|
case bool: |
||||||
|
if x { |
||||||
|
ret = "true" |
||||||
|
} else { |
||||||
|
ret = "false" |
||||||
|
} |
||||||
|
case int: |
||||||
|
ret = fmt.Sprintf("%d", x) |
||||||
|
case string: |
||||||
|
ret = x |
||||||
|
case fmt.Stringer: |
||||||
|
ret = x.String() |
||||||
|
default: |
||||||
|
return "", fmt.Sprintf("Invalid value type %T", v) |
||||||
|
} |
||||||
|
|
||||||
|
return ret, "" |
||||||
|
} |
||||||
|
|
||||||
|
// rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t
|
||||||
|
// into a common interface.
|
||||||
|
type rdkAnyconf interface { |
||||||
|
set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t |
||||||
|
} |
||||||
|
|
||||||
|
func anyconfSet(anyconf rdkAnyconf, key string, val ConfigValue) (err error) { |
||||||
|
value, errstr := value2string(val) |
||||||
|
if errstr != "" { |
||||||
|
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s for key %s (expected string,bool,int,ConfigMap)", errstr, key)) |
||||||
|
} |
||||||
|
cKey := C.CString(key) |
||||||
|
defer C.free(unsafe.Pointer(cKey)) |
||||||
|
cVal := C.CString(value) |
||||||
|
defer C.free(unsafe.Pointer(cVal)) |
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(128))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
if anyconf.set(cKey, cVal, cErrstr, 128) != C.RD_KAFKA_CONF_OK { |
||||||
|
return newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// we need these typedefs to workaround a crash in golint
|
||||||
|
// when parsing the set() methods below
|
||||||
|
type rdkConf C.rd_kafka_conf_t |
||||||
|
type rdkTopicConf C.rd_kafka_topic_conf_t |
||||||
|
|
||||||
|
func (cConf *rdkConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { |
||||||
|
return C.rd_kafka_conf_set((*C.rd_kafka_conf_t)(cConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) |
||||||
|
} |
||||||
|
|
||||||
|
func (ctopicConf *rdkTopicConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { |
||||||
|
return C.rd_kafka_topic_conf_set((*C.rd_kafka_topic_conf_t)(ctopicConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) |
||||||
|
} |
||||||
|
|
||||||
|
func configConvertAnyconf(m ConfigMap, anyconf rdkAnyconf) (err error) { |
||||||
|
// set plugins first, any plugin-specific configuration depends on
|
||||||
|
// the plugin to have already been set
|
||||||
|
pluginPaths, ok := m["plugin.library.paths"] |
||||||
|
if ok { |
||||||
|
err = anyconfSet(anyconf, "plugin.library.paths", pluginPaths) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
for k, v := range m { |
||||||
|
if k == "plugin.library.paths" { |
||||||
|
continue |
||||||
|
} |
||||||
|
switch v.(type) { |
||||||
|
case ConfigMap: |
||||||
|
/* Special sub-ConfigMap, only used for default.topic.config */ |
||||||
|
|
||||||
|
if k != "default.topic.config" { |
||||||
|
return newErrorFromString(ErrInvalidArg, fmt.Sprintf("Invalid type for key %s", k)) |
||||||
|
} |
||||||
|
|
||||||
|
var cTopicConf = C.rd_kafka_topic_conf_new() |
||||||
|
|
||||||
|
err = configConvertAnyconf(v.(ConfigMap), |
||||||
|
(*rdkTopicConf)(cTopicConf)) |
||||||
|
if err != nil { |
||||||
|
C.rd_kafka_topic_conf_destroy(cTopicConf) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
C.rd_kafka_conf_set_default_topic_conf( |
||||||
|
(*C.rd_kafka_conf_t)(anyconf.(*rdkConf)), |
||||||
|
(*C.rd_kafka_topic_conf_t)((*rdkTopicConf)(cTopicConf))) |
||||||
|
|
||||||
|
default: |
||||||
|
err = anyconfSet(anyconf, k, v) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// convert ConfigMap to C rd_kafka_conf_t *
|
||||||
|
func (m ConfigMap) convert() (cConf *C.rd_kafka_conf_t, err error) { |
||||||
|
cConf = C.rd_kafka_conf_new() |
||||||
|
|
||||||
|
// Set the client.software.name and .version (use librdkafka version).
|
||||||
|
_, librdkafkaVersion := LibraryVersion() |
||||||
|
anyconfSet((*rdkConf)(cConf), "client.software.name", "confluent-kafka-go") |
||||||
|
anyconfSet((*rdkConf)(cConf), "client.software.version", librdkafkaVersion) |
||||||
|
|
||||||
|
err = configConvertAnyconf(m, (*rdkConf)(cConf)) |
||||||
|
if err != nil { |
||||||
|
C.rd_kafka_conf_destroy(cConf) |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return cConf, nil |
||||||
|
} |
||||||
|
|
||||||
|
// get finds key in the configmap and returns its value.
|
||||||
|
// If the key is not found defval is returned.
|
||||||
|
// If the key is found but the type is mismatched an error is returned.
|
||||||
|
func (m ConfigMap) get(key string, defval ConfigValue) (ConfigValue, error) { |
||||||
|
if strings.HasPrefix(key, "{topic}.") { |
||||||
|
defconfCv, found := m["default.topic.config"] |
||||||
|
if !found { |
||||||
|
return defval, nil |
||||||
|
} |
||||||
|
return defconfCv.(ConfigMap).get(strings.TrimPrefix(key, "{topic}."), defval) |
||||||
|
} |
||||||
|
|
||||||
|
v, ok := m[key] |
||||||
|
if !ok { |
||||||
|
return defval, nil |
||||||
|
} |
||||||
|
|
||||||
|
if defval != nil && reflect.TypeOf(defval) != reflect.TypeOf(v) { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, fmt.Sprintf("%s expects type %T, not %T", key, defval, v)) |
||||||
|
} |
||||||
|
|
||||||
|
return v, nil |
||||||
|
} |
||||||
|
|
||||||
|
// extract performs a get() and if found deletes the key.
|
||||||
|
func (m ConfigMap) extract(key string, defval ConfigValue) (ConfigValue, error) { |
||||||
|
|
||||||
|
v, err := m.get(key, defval) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
delete(m, key) |
||||||
|
|
||||||
|
return v, nil |
||||||
|
} |
||||||
|
|
||||||
|
// extractLogConfig extracts generic go.logs.* configuration properties.
|
||||||
|
func (m ConfigMap) extractLogConfig() (logsChanEnable bool, logsChan chan LogEvent, err error) { |
||||||
|
v, err := m.extract("go.logs.channel.enable", false) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
logsChanEnable = v.(bool) |
||||||
|
|
||||||
|
v, err = m.extract("go.logs.channel", nil) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
if v != nil { |
||||||
|
logsChan = v.(chan LogEvent) |
||||||
|
} |
||||||
|
|
||||||
|
if logsChanEnable { |
||||||
|
// Tell librdkafka to forward logs to the log queue
|
||||||
|
m.Set("log.queue=true") |
||||||
|
} |
||||||
|
|
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func (m ConfigMap) clone() ConfigMap { |
||||||
|
m2 := make(ConfigMap) |
||||||
|
for k, v := range m { |
||||||
|
m2[k] = v |
||||||
|
} |
||||||
|
return m2 |
||||||
|
} |
||||||
|
|
||||||
|
// Get finds the given key in the ConfigMap and returns its value.
|
||||||
|
// If the key is not found `defval` is returned.
|
||||||
|
// If the key is found but the type does not match that of `defval` (unless nil)
|
||||||
|
// an ErrInvalidArg error is returned.
|
||||||
|
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) { |
||||||
|
return m.get(key, defval) |
||||||
|
} |
@ -0,0 +1,923 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016-2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
|
||||||
|
static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { |
||||||
|
return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// RebalanceCb provides a per-Subscribe*() rebalance event callback.
|
||||||
|
// The passed Event will be either AssignedPartitions or RevokedPartitions
|
||||||
|
type RebalanceCb func(*Consumer, Event) error |
||||||
|
|
||||||
|
// Consumer implements a High-level Apache Kafka Consumer instance
|
||||||
|
type Consumer struct { |
||||||
|
events chan Event |
||||||
|
handle handle |
||||||
|
eventsChanEnable bool |
||||||
|
readerTermChan chan bool |
||||||
|
rebalanceCb RebalanceCb |
||||||
|
appReassigned bool |
||||||
|
appRebalanceEnable bool // Config setting
|
||||||
|
} |
||||||
|
|
||||||
|
// Strings returns a human readable name for a Consumer instance
|
||||||
|
func (c *Consumer) String() string { |
||||||
|
return c.handle.String() |
||||||
|
} |
||||||
|
|
||||||
|
// getHandle implements the Handle interface
|
||||||
|
func (c *Consumer) gethandle() *handle { |
||||||
|
return &c.handle |
||||||
|
} |
||||||
|
|
||||||
|
// Subscribe to a single topic
|
||||||
|
// This replaces the current subscription
|
||||||
|
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error { |
||||||
|
return c.SubscribeTopics([]string{topic}, rebalanceCb) |
||||||
|
} |
||||||
|
|
||||||
|
// SubscribeTopics subscribes to the provided list of topics.
|
||||||
|
// This replaces the current subscription.
|
||||||
|
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) { |
||||||
|
ctopics := C.rd_kafka_topic_partition_list_new(C.int(len(topics))) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(ctopics) |
||||||
|
|
||||||
|
for _, topic := range topics { |
||||||
|
ctopic := C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
C.rd_kafka_topic_partition_list_add(ctopics, ctopic, C.RD_KAFKA_PARTITION_UA) |
||||||
|
} |
||||||
|
|
||||||
|
e := C.rd_kafka_subscribe(c.handle.rk, ctopics) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
c.rebalanceCb = rebalanceCb |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Unsubscribe from the current subscription, if any.
|
||||||
|
func (c *Consumer) Unsubscribe() (err error) { |
||||||
|
C.rd_kafka_unsubscribe(c.handle.rk) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Assign an atomic set of partitions to consume.
|
||||||
|
//
|
||||||
|
// The .Offset field of each TopicPartition must either be set to an absolute
|
||||||
|
// starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc),
|
||||||
|
// but should typically be set to `kafka.OffsetStored` to have the consumer
|
||||||
|
// use the committed offset as a start position, with a fallback to
|
||||||
|
// `auto.offset.reset` if there is no committed offset.
|
||||||
|
//
|
||||||
|
// This replaces the current assignment.
|
||||||
|
func (c *Consumer) Assign(partitions []TopicPartition) (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
|
||||||
|
e := C.rd_kafka_assign(c.handle.rk, cparts) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Unassign the current set of partitions to consume.
|
||||||
|
func (c *Consumer) Unassign() (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
e := C.rd_kafka_assign(c.handle.rk, nil) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// IncrementalAssign adds the specified partitions to the current set of
|
||||||
|
// partitions to consume.
|
||||||
|
//
|
||||||
|
// The .Offset field of each TopicPartition must either be set to an absolute
|
||||||
|
// starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc),
|
||||||
|
// but should typically be set to `kafka.OffsetStored` to have the consumer
|
||||||
|
// use the committed offset as a start position, with a fallback to
|
||||||
|
// `auto.offset.reset` if there is no committed offset.
|
||||||
|
//
|
||||||
|
// The new partitions must not be part of the current assignment.
|
||||||
|
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
|
||||||
|
cError := C.rd_kafka_incremental_assign(c.handle.rk, cparts) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// IncrementalUnassign removes the specified partitions from the current set of
|
||||||
|
// partitions to consume.
|
||||||
|
//
|
||||||
|
// The .Offset field of the TopicPartition is ignored.
|
||||||
|
//
|
||||||
|
// The removed partitions must be part of the current assignment.
|
||||||
|
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) { |
||||||
|
c.appReassigned = true |
||||||
|
|
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
|
||||||
|
cError := C.rd_kafka_incremental_unassign(c.handle.rk, cparts) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// GetRebalanceProtocol returns the current consumer group rebalance protocol,
|
||||||
|
// which is either "EAGER" or "COOPERATIVE".
|
||||||
|
// If the rebalance protocol is not known in the current state an empty string
|
||||||
|
// is returned.
|
||||||
|
// Should typically only be called during rebalancing.
|
||||||
|
func (c *Consumer) GetRebalanceProtocol() string { |
||||||
|
cStr := C.rd_kafka_rebalance_protocol(c.handle.rk) |
||||||
|
if cStr == nil { |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
return C.GoString(cStr) |
||||||
|
} |
||||||
|
|
||||||
|
// AssignmentLost returns true if current partition assignment has been lost.
|
||||||
|
// This method is only applicable for use with a subscribing consumer when
|
||||||
|
// handling a rebalance event or callback.
|
||||||
|
// Partitions that have been lost may already be owned by other members in the
|
||||||
|
// group and therefore commiting offsets, for example, may fail.
|
||||||
|
func (c *Consumer) AssignmentLost() bool { |
||||||
|
return cint2bool(C.rd_kafka_assignment_lost(c.handle.rk)) |
||||||
|
} |
||||||
|
|
||||||
|
// commit offsets for specified offsets.
|
||||||
|
// If offsets is nil the currently assigned partitions' offsets are committed.
|
||||||
|
// This is a blocking call, caller will need to wrap in go-routine to
|
||||||
|
// get async or throw-away behaviour.
|
||||||
|
func (c *Consumer) commit(offsets []TopicPartition) (committedOffsets []TopicPartition, err error) { |
||||||
|
var rkqu *C.rd_kafka_queue_t |
||||||
|
|
||||||
|
rkqu = C.rd_kafka_queue_new(c.handle.rk) |
||||||
|
defer C.rd_kafka_queue_destroy(rkqu) |
||||||
|
|
||||||
|
var coffsets *C.rd_kafka_topic_partition_list_t |
||||||
|
if offsets != nil { |
||||||
|
coffsets = newCPartsFromTopicPartitions(offsets) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(coffsets) |
||||||
|
} |
||||||
|
|
||||||
|
cErr := C.rd_kafka_commit_queue(c.handle.rk, coffsets, rkqu, nil, nil) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
rkev := C.rd_kafka_queue_poll(rkqu, C.int(-1)) |
||||||
|
if rkev == nil { |
||||||
|
// shouldn't happen
|
||||||
|
return nil, newError(C.RD_KAFKA_RESP_ERR__DESTROY) |
||||||
|
} |
||||||
|
defer C.rd_kafka_event_destroy(rkev) |
||||||
|
|
||||||
|
if C.rd_kafka_event_type(rkev) != C.RD_KAFKA_EVENT_OFFSET_COMMIT { |
||||||
|
panic(fmt.Sprintf("Expected OFFSET_COMMIT, got %s", |
||||||
|
C.GoString(C.rd_kafka_event_name(rkev)))) |
||||||
|
} |
||||||
|
|
||||||
|
cErr = C.rd_kafka_event_error(rkev) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) |
||||||
|
} |
||||||
|
|
||||||
|
cRetoffsets := C.rd_kafka_event_topic_partition_list(rkev) |
||||||
|
if cRetoffsets == nil { |
||||||
|
// no offsets, no error
|
||||||
|
return nil, nil |
||||||
|
} |
||||||
|
committedOffsets = newTopicPartitionsFromCparts(cRetoffsets) |
||||||
|
|
||||||
|
return committedOffsets, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Commit offsets for currently assigned partitions
|
||||||
|
// This is a blocking call.
|
||||||
|
// Returns the committed offsets on success.
|
||||||
|
func (c *Consumer) Commit() ([]TopicPartition, error) { |
||||||
|
return c.commit(nil) |
||||||
|
} |
||||||
|
|
||||||
|
// CommitMessage commits offset based on the provided message.
|
||||||
|
// This is a blocking call.
|
||||||
|
// Returns the committed offsets on success.
|
||||||
|
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) { |
||||||
|
if m.TopicPartition.Error != nil { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Can't commit errored message") |
||||||
|
} |
||||||
|
offsets := []TopicPartition{m.TopicPartition} |
||||||
|
offsets[0].Offset++ |
||||||
|
return c.commit(offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// CommitOffsets commits the provided list of offsets
|
||||||
|
// This is a blocking call.
|
||||||
|
// Returns the committed offsets on success.
|
||||||
|
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) { |
||||||
|
return c.commit(offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// StoreOffsets stores the provided list of offsets that will be committed
|
||||||
|
// to the offset store according to `auto.commit.interval.ms` or manual
|
||||||
|
// offset-less Commit().
|
||||||
|
//
|
||||||
|
// Returns the stored offsets on success. If at least one offset couldn't be stored,
|
||||||
|
// an error and a list of offsets is returned. Each offset can be checked for
|
||||||
|
// specific errors via its `.Error` member.
|
||||||
|
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) { |
||||||
|
coffsets := newCPartsFromTopicPartitions(offsets) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(coffsets) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_offsets_store(c.handle.rk, coffsets) |
||||||
|
|
||||||
|
// coffsets might be annotated with an error
|
||||||
|
storedOffsets = newTopicPartitionsFromCparts(coffsets) |
||||||
|
|
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return storedOffsets, newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return storedOffsets, nil |
||||||
|
} |
||||||
|
|
||||||
|
// StoreMessage stores offset based on the provided message.
|
||||||
|
// This is a convenience method that uses StoreOffsets to do the actual work.
|
||||||
|
func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) { |
||||||
|
if m.TopicPartition.Error != nil { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Can't store errored message") |
||||||
|
} |
||||||
|
if m.TopicPartition.Offset < 0 { |
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Can't store message with offset less than 0") |
||||||
|
} |
||||||
|
offsets := []TopicPartition{m.TopicPartition} |
||||||
|
offsets[0].Offset++ |
||||||
|
return c.StoreOffsets(offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// Seek seeks the given topic partitions using the offset from the TopicPartition.
|
||||||
|
//
|
||||||
|
// If timeoutMs is not 0 the call will wait this long for the
|
||||||
|
// seek to be performed. If the timeout is reached the internal state
|
||||||
|
// will be unknown and this function returns ErrTimedOut.
|
||||||
|
// If timeoutMs is 0 it will initiate the seek but return
|
||||||
|
// immediately without any error reporting (e.g., async).
|
||||||
|
//
|
||||||
|
// Seek() may only be used for partitions already being consumed
|
||||||
|
// (through Assign() or implicitly through a self-rebalanced Subscribe()).
|
||||||
|
// To set the starting offset it is preferred to use Assign() and provide
|
||||||
|
// a starting offset for each partition.
|
||||||
|
//
|
||||||
|
// Returns an error on failure or nil otherwise.
|
||||||
|
func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error { |
||||||
|
rkt := c.handle.getRkt(*partition.Topic) |
||||||
|
cErr := C.rd_kafka_seek(rkt, |
||||||
|
C.int32_t(partition.Partition), |
||||||
|
C.int64_t(partition.Offset), |
||||||
|
C.int(timeoutMs)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Poll the consumer for messages or events.
|
||||||
|
//
|
||||||
|
// Will block for at most timeoutMs milliseconds
|
||||||
|
//
|
||||||
|
// The following callbacks may be triggered:
|
||||||
|
// Subscribe()'s rebalanceCb
|
||||||
|
//
|
||||||
|
// Returns nil on timeout, else an Event
|
||||||
|
func (c *Consumer) Poll(timeoutMs int) (event Event) { |
||||||
|
ev, _ := c.handle.eventPoll(nil, timeoutMs, 1, nil) |
||||||
|
return ev |
||||||
|
} |
||||||
|
|
||||||
|
// Events returns the Events channel (if enabled)
|
||||||
|
func (c *Consumer) Events() chan Event { |
||||||
|
return c.events |
||||||
|
} |
||||||
|
|
||||||
|
// Logs returns the log channel if enabled, or nil otherwise.
|
||||||
|
func (c *Consumer) Logs() chan LogEvent { |
||||||
|
return c.handle.logs |
||||||
|
} |
||||||
|
|
||||||
|
// ReadMessage polls the consumer for a message.
|
||||||
|
//
|
||||||
|
// This is a convenience API that wraps Poll() and only returns
|
||||||
|
// messages or errors. All other event types are discarded.
|
||||||
|
//
|
||||||
|
// The call will block for at most `timeout` waiting for
|
||||||
|
// a new message or error. `timeout` may be set to -1 for
|
||||||
|
// indefinite wait.
|
||||||
|
//
|
||||||
|
// Timeout is returned as (nil, err) where err is `err.(kafka.Error).Code() == kafka.ErrTimedOut`.
|
||||||
|
//
|
||||||
|
// Messages are returned as (msg, nil),
|
||||||
|
// while general errors are returned as (nil, err),
|
||||||
|
// and partition-specific errors are returned as (msg, err) where
|
||||||
|
// msg.TopicPartition provides partition-specific information (such as topic, partition and offset).
|
||||||
|
//
|
||||||
|
// All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded.
|
||||||
|
//
|
||||||
|
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) { |
||||||
|
|
||||||
|
var absTimeout time.Time |
||||||
|
var timeoutMs int |
||||||
|
|
||||||
|
if timeout > 0 { |
||||||
|
absTimeout = time.Now().Add(timeout) |
||||||
|
timeoutMs = (int)(timeout.Seconds() * 1000.0) |
||||||
|
} else { |
||||||
|
timeoutMs = (int)(timeout) |
||||||
|
} |
||||||
|
|
||||||
|
for { |
||||||
|
ev := c.Poll(timeoutMs) |
||||||
|
|
||||||
|
switch e := ev.(type) { |
||||||
|
case *Message: |
||||||
|
if e.TopicPartition.Error != nil { |
||||||
|
return e, e.TopicPartition.Error |
||||||
|
} |
||||||
|
return e, nil |
||||||
|
case Error: |
||||||
|
return nil, e |
||||||
|
default: |
||||||
|
// Ignore other event types
|
||||||
|
} |
||||||
|
|
||||||
|
if timeout > 0 { |
||||||
|
// Calculate remaining time
|
||||||
|
timeoutMs = int(math.Max(0.0, absTimeout.Sub(time.Now()).Seconds()*1000.0)) |
||||||
|
} |
||||||
|
|
||||||
|
if timeoutMs == 0 && ev == nil { |
||||||
|
return nil, newError(C.RD_KAFKA_RESP_ERR__TIMED_OUT) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// Close Consumer instance.
|
||||||
|
// The object is no longer usable after this call.
|
||||||
|
func (c *Consumer) Close() (err error) { |
||||||
|
|
||||||
|
// Wait for consumerReader() or pollLogEvents to terminate (by closing readerTermChan)
|
||||||
|
close(c.readerTermChan) |
||||||
|
c.handle.waitGroup.Wait() |
||||||
|
if c.eventsChanEnable { |
||||||
|
close(c.events) |
||||||
|
} |
||||||
|
|
||||||
|
// librdkafka's rd_kafka_consumer_close() will block
|
||||||
|
// and trigger the rebalance_cb() if one is set, if not, which is the
|
||||||
|
// case with the Go client since it registers EVENTs rather than callbacks,
|
||||||
|
// librdkafka will shortcut the rebalance_cb and do a forced unassign.
|
||||||
|
// But we can't have that since the application might need the final RevokePartitions
|
||||||
|
// before shutting down. So we trigger an Unsubscribe() first, wait for that to
|
||||||
|
// propagate (in the Poll loop below), and then close the consumer.
|
||||||
|
c.Unsubscribe() |
||||||
|
|
||||||
|
// Poll for rebalance events
|
||||||
|
for { |
||||||
|
c.Poll(10 * 1000) |
||||||
|
if int(C.rd_kafka_queue_length(c.handle.rkq)) == 0 { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Destroy our queue
|
||||||
|
C.rd_kafka_queue_destroy(c.handle.rkq) |
||||||
|
c.handle.rkq = nil |
||||||
|
|
||||||
|
// Close the consumer
|
||||||
|
C.rd_kafka_consumer_close(c.handle.rk) |
||||||
|
|
||||||
|
c.handle.cleanup() |
||||||
|
|
||||||
|
C.rd_kafka_destroy(c.handle.rk) |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// NewConsumer creates a new high-level Consumer instance.
|
||||||
|
//
|
||||||
|
// conf is a *ConfigMap with standard librdkafka configuration properties.
|
||||||
|
//
|
||||||
|
// Supported special configuration properties:
|
||||||
|
// go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
|
||||||
|
// If set to true the app must handle the AssignedPartitions and
|
||||||
|
// RevokedPartitions events and call Assign() and Unassign()
|
||||||
|
// respectively.
|
||||||
|
// go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled.
|
||||||
|
// go.events.channel.size (int, 1000) - Events() channel size
|
||||||
|
// go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
|
||||||
|
// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
|
||||||
|
//
|
||||||
|
// WARNING: Due to the buffering nature of channels (and queues in general) the
|
||||||
|
// use of the events channel risks receiving outdated events and
|
||||||
|
// messages. Minimizing go.events.channel.size reduces the risk
|
||||||
|
// and number of outdated events and messages but does not eliminate
|
||||||
|
// the factor completely. With a channel size of 1 at most one
|
||||||
|
// event or message may be outdated.
|
||||||
|
func NewConsumer(conf *ConfigMap) (*Consumer, error) { |
||||||
|
|
||||||
|
err := versionCheck() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// before we do anything with the configuration, create a copy such that
|
||||||
|
// the original is not mutated.
|
||||||
|
confCopy := conf.clone() |
||||||
|
|
||||||
|
groupid, _ := confCopy.get("group.id", nil) |
||||||
|
if groupid == nil { |
||||||
|
// without a group.id the underlying cgrp subsystem in librdkafka wont get started
|
||||||
|
// and without it there is no way to consume assigned partitions.
|
||||||
|
// So for now require the group.id, this might change in the future.
|
||||||
|
return nil, newErrorFromString(ErrInvalidArg, "Required property group.id not set") |
||||||
|
} |
||||||
|
|
||||||
|
c := &Consumer{} |
||||||
|
|
||||||
|
v, err := confCopy.extract("go.application.rebalance.enable", false) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
c.appRebalanceEnable = v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.events.channel.enable", false) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
c.eventsChanEnable = v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.events.channel.size", 1000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
eventsChanSize := v.(int) |
||||||
|
|
||||||
|
logsChanEnable, logsChan, err := confCopy.extractLogConfig() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cConf, err := confCopy.convert() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(256))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_REBALANCE|C.RD_KAFKA_EVENT_OFFSET_COMMIT|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) |
||||||
|
|
||||||
|
c.handle.rk = C.rd_kafka_new(C.RD_KAFKA_CONSUMER, cConf, cErrstr, 256) |
||||||
|
if c.handle.rk == nil { |
||||||
|
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
C.rd_kafka_poll_set_consumer(c.handle.rk) |
||||||
|
|
||||||
|
c.handle.c = c |
||||||
|
c.handle.setup() |
||||||
|
c.readerTermChan = make(chan bool) |
||||||
|
c.handle.rkq = C.rd_kafka_queue_get_consumer(c.handle.rk) |
||||||
|
if c.handle.rkq == nil { |
||||||
|
// no cgrp (no group.id configured), revert to main queue.
|
||||||
|
c.handle.rkq = C.rd_kafka_queue_get_main(c.handle.rk) |
||||||
|
} |
||||||
|
|
||||||
|
if logsChanEnable { |
||||||
|
c.handle.setupLogQueue(logsChan, c.readerTermChan) |
||||||
|
} |
||||||
|
|
||||||
|
if c.eventsChanEnable { |
||||||
|
c.events = make(chan Event, eventsChanSize) |
||||||
|
/* Start rdkafka consumer queue reader -> events writer goroutine */ |
||||||
|
c.handle.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
consumerReader(c, c.readerTermChan) |
||||||
|
c.handle.waitGroup.Done() |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
return c, nil |
||||||
|
} |
||||||
|
|
||||||
|
// consumerReader reads messages and events from the librdkafka consumer queue
|
||||||
|
// and posts them on the consumer channel.
|
||||||
|
// Runs until termChan closes
|
||||||
|
func consumerReader(c *Consumer, termChan chan bool) { |
||||||
|
for { |
||||||
|
select { |
||||||
|
case _ = <-termChan: |
||||||
|
return |
||||||
|
default: |
||||||
|
_, term := c.handle.eventPoll(c.events, 100, 1000, termChan) |
||||||
|
if term { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// GetMetadata queries broker for cluster and topic metadata.
|
||||||
|
// If topic is non-nil only information about that topic is returned, else if
|
||||||
|
// allTopics is false only information about locally used topics is returned,
|
||||||
|
// else information about all topics is returned.
|
||||||
|
// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
|
||||||
|
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { |
||||||
|
return getMetadata(c, topic, allTopics, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition.
|
||||||
|
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { |
||||||
|
return queryWatermarkOffsets(c, topic, partition, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// GetWatermarkOffsets returns the cached low and high offsets for the given topic
|
||||||
|
// and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets.
|
||||||
|
// The low offset is populated every statistics.interval.ms if that value is set.
|
||||||
|
// OffsetInvalid will be returned if there is no cached offset for either value.
|
||||||
|
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error) { |
||||||
|
return getWatermarkOffsets(c, topic, partition) |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetsForTimes looks up offsets by timestamp for the given partitions.
|
||||||
|
//
|
||||||
|
// The returned offset for each partition is the earliest offset whose
|
||||||
|
// timestamp is greater than or equal to the given timestamp in the
|
||||||
|
// corresponding partition. If the provided timestamp exceeds that of the
|
||||||
|
// last message in the partition, a value of -1 will be returned.
|
||||||
|
//
|
||||||
|
// The timestamps to query are represented as `.Offset` in the `times`
|
||||||
|
// argument and the looked up offsets are represented as `.Offset` in the returned
|
||||||
|
// `offsets` list.
|
||||||
|
//
|
||||||
|
// The function will block for at most timeoutMs milliseconds.
|
||||||
|
//
|
||||||
|
// Duplicate Topic+Partitions are not supported.
|
||||||
|
// Per-partition errors may be returned in the `.Error` field.
|
||||||
|
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
return offsetsForTimes(c, times, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// Subscription returns the current subscription as set by Subscribe()
|
||||||
|
func (c *Consumer) Subscription() (topics []string, err error) { |
||||||
|
var cTopics *C.rd_kafka_topic_partition_list_t |
||||||
|
|
||||||
|
cErr := C.rd_kafka_subscription(c.handle.rk, &cTopics) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cTopics) |
||||||
|
|
||||||
|
topicCnt := int(cTopics.cnt) |
||||||
|
topics = make([]string, topicCnt) |
||||||
|
for i := 0; i < topicCnt; i++ { |
||||||
|
crktpar := C._c_rdkafka_topic_partition_list_entry(cTopics, |
||||||
|
C.int(i)) |
||||||
|
topics[i] = C.GoString(crktpar.topic) |
||||||
|
} |
||||||
|
|
||||||
|
return topics, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Assignment returns the current partition assignments
|
||||||
|
func (c *Consumer) Assignment() (partitions []TopicPartition, err error) { |
||||||
|
var cParts *C.rd_kafka_topic_partition_list_t |
||||||
|
|
||||||
|
cErr := C.rd_kafka_assignment(c.handle.rk, &cParts) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cParts) |
||||||
|
|
||||||
|
partitions = newTopicPartitionsFromCparts(cParts) |
||||||
|
|
||||||
|
return partitions, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Committed retrieves committed offsets for the given set of partitions
|
||||||
|
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_committed(c.handle.rk, cparts, C.int(timeoutMs)) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cerr) |
||||||
|
} |
||||||
|
|
||||||
|
return newTopicPartitionsFromCparts(cparts), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Position returns the current consume position for the given partitions.
|
||||||
|
// Typical use is to call Assignment() to get the partition list
|
||||||
|
// and then pass it to Position() to get the current consume position for
|
||||||
|
// each of the assigned partitions.
|
||||||
|
// The consume position is the next message to read from the partition.
|
||||||
|
// i.e., the offset of the last message seen by the application + 1.
|
||||||
|
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_position(c.handle.rk, cparts) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cerr) |
||||||
|
} |
||||||
|
|
||||||
|
return newTopicPartitionsFromCparts(cparts), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Pause consumption for the provided list of partitions
|
||||||
|
//
|
||||||
|
// Note that messages already enqueued on the consumer's Event channel
|
||||||
|
// (if `go.events.channel.enable` has been set) will NOT be purged by
|
||||||
|
// this call, set `go.events.channel.size` accordingly.
|
||||||
|
func (c *Consumer) Pause(partitions []TopicPartition) (err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_pause_partitions(c.handle.rk, cparts) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cerr) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Resume consumption for the provided list of partitions
|
||||||
|
func (c *Consumer) Resume(partitions []TopicPartition) (err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(partitions) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_resume_partitions(c.handle.rk, cparts) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cerr) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerToken sets the the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
|
||||||
|
// on success, otherwise an error if:
|
||||||
|
// 1) the token data is invalid (meaning an expiration time in the past
|
||||||
|
// or either a token value or an extension key or value that does not meet
|
||||||
|
// the regular expression requirements as per
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1);
|
||||||
|
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { |
||||||
|
return c.handle.setOAuthBearerToken(oauthBearerToken) |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerTokenFailure sets the error message describing why token
|
||||||
|
// retrieval/setting failed; it also schedules a new token refresh event for 10
|
||||||
|
// seconds later so the attempt may be retried. It will return nil on
|
||||||
|
// success, otherwise an error if:
|
||||||
|
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error { |
||||||
|
return c.handle.setOAuthBearerTokenFailure(errstr) |
||||||
|
} |
||||||
|
|
||||||
|
// ConsumerGroupMetadata reflects the current consumer group member metadata.
|
||||||
|
type ConsumerGroupMetadata struct { |
||||||
|
serialized []byte |
||||||
|
} |
||||||
|
|
||||||
|
// serializeConsumerGroupMetadata converts a C metadata object to its
|
||||||
|
// binary representation so we don't have to hold on to the C object,
|
||||||
|
// which would require an explicit .Close().
|
||||||
|
func serializeConsumerGroupMetadata(cgmd *C.rd_kafka_consumer_group_metadata_t) ([]byte, error) { |
||||||
|
var cBuffer *C.void |
||||||
|
var cSize C.size_t |
||||||
|
cError := C.rd_kafka_consumer_group_metadata_write(cgmd, |
||||||
|
(*unsafe.Pointer)(unsafe.Pointer(&cBuffer)), &cSize) |
||||||
|
if cError != nil { |
||||||
|
return nil, newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
defer C.rd_kafka_mem_free(nil, unsafe.Pointer(cBuffer)) |
||||||
|
|
||||||
|
return C.GoBytes(unsafe.Pointer(cBuffer), C.int(cSize)), nil |
||||||
|
} |
||||||
|
|
||||||
|
// deserializeConsumerGroupMetadata converts a serialized metadata object
|
||||||
|
// back to a C object.
|
||||||
|
func deserializeConsumerGroupMetadata(serialized []byte) (*C.rd_kafka_consumer_group_metadata_t, error) { |
||||||
|
var cgmd *C.rd_kafka_consumer_group_metadata_t |
||||||
|
|
||||||
|
cSerialized := C.CBytes(serialized) |
||||||
|
defer C.free(cSerialized) |
||||||
|
|
||||||
|
cError := C.rd_kafka_consumer_group_metadata_read( |
||||||
|
&cgmd, cSerialized, C.size_t(len(serialized))) |
||||||
|
if cError != nil { |
||||||
|
return nil, newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return cgmd, nil |
||||||
|
} |
||||||
|
|
||||||
|
// GetConsumerGroupMetadata returns the consumer's current group metadata.
|
||||||
|
// This object should be passed to the transactional producer's
|
||||||
|
// SendOffsetsToTransaction() API.
|
||||||
|
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error) { |
||||||
|
cgmd := C.rd_kafka_consumer_group_metadata(c.handle.rk) |
||||||
|
if cgmd == nil { |
||||||
|
return nil, NewError(ErrState, "Consumer group metadata not available", false) |
||||||
|
} |
||||||
|
defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) |
||||||
|
|
||||||
|
serialized, err := serializeConsumerGroupMetadata(cgmd) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return &ConsumerGroupMetadata{serialized}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// NewTestConsumerGroupMetadata creates a new consumer group metadata instance
|
||||||
|
// mainly for testing use.
|
||||||
|
// Use GetConsumerGroupMetadata() to retrieve the real metadata.
|
||||||
|
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error) { |
||||||
|
cGroupID := C.CString(groupID) |
||||||
|
defer C.free(unsafe.Pointer(cGroupID)) |
||||||
|
|
||||||
|
cgmd := C.rd_kafka_consumer_group_metadata_new(cGroupID) |
||||||
|
if cgmd == nil { |
||||||
|
return nil, NewError(ErrInvalidArg, "Failed to create metadata object", false) |
||||||
|
} |
||||||
|
|
||||||
|
defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) |
||||||
|
serialized, err := serializeConsumerGroupMetadata(cgmd) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return &ConsumerGroupMetadata{serialized}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// cEventToRebalanceEvent returns an Event (AssignedPartitions or RevokedPartitions)
|
||||||
|
// based on the specified rkev.
|
||||||
|
func cEventToRebalanceEvent(rkev *C.rd_kafka_event_t) Event { |
||||||
|
if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS { |
||||||
|
var ev AssignedPartitions |
||||||
|
ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
return ev |
||||||
|
} else if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS { |
||||||
|
var ev RevokedPartitions |
||||||
|
ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
return ev |
||||||
|
} else { |
||||||
|
panic(fmt.Sprintf("Unable to create rebalance event from C type %s", |
||||||
|
C.GoString(C.rd_kafka_err2name(C.rd_kafka_event_error(rkev))))) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// handleRebalanceEvent handles a assign/rebalance rebalance event.
|
||||||
|
//
|
||||||
|
// If the app provided a RebalanceCb to Subscribe*() or
|
||||||
|
// has go.application.rebalance.enable=true we create an event
|
||||||
|
// and forward it to the application thru the RebalanceCb or the
|
||||||
|
// Events channel respectively.
|
||||||
|
// Since librdkafka requires the rebalance event to be "acked" by
|
||||||
|
// the application (by calling *assign()) to synchronize state we keep track
|
||||||
|
// of if the application performed *Assign() or *Unassign(), but this only
|
||||||
|
// works for the non-channel case. For the channel case we assume the
|
||||||
|
// application calls *Assign() or *Unassign().
|
||||||
|
// Failure to do so will "hang" the consumer, e.g., it wont start consuming
|
||||||
|
// and it wont close cleanly, so this error case should be visible
|
||||||
|
// immediately to the application developer.
|
||||||
|
//
|
||||||
|
// In the polling case (not channel based consumer) the rebalance event
|
||||||
|
// is returned in retval, else nil is returned.
|
||||||
|
|
||||||
|
func (c *Consumer) handleRebalanceEvent(channel chan Event, rkev *C.rd_kafka_event_t) (retval Event) { |
||||||
|
|
||||||
|
var ev Event |
||||||
|
|
||||||
|
if c.rebalanceCb != nil || c.appRebalanceEnable { |
||||||
|
// Application has a rebalance callback or has enabled
|
||||||
|
// rebalances on the events channel, create the appropriate Event.
|
||||||
|
ev = cEventToRebalanceEvent(rkev) |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
if channel != nil && c.appRebalanceEnable && c.rebalanceCb == nil { |
||||||
|
// Channel-based consumer with rebalancing enabled,
|
||||||
|
// return the rebalance event and rely on the application
|
||||||
|
// to call *Assign() / *Unassign().
|
||||||
|
return ev |
||||||
|
} |
||||||
|
|
||||||
|
// Call the application's rebalance callback, if any.
|
||||||
|
if c.rebalanceCb != nil { |
||||||
|
// Mark .appReassigned as false to keep track of whether the
|
||||||
|
// application called *Assign() / *Unassign().
|
||||||
|
c.appReassigned = false |
||||||
|
|
||||||
|
c.rebalanceCb(c, ev) |
||||||
|
|
||||||
|
if c.appReassigned { |
||||||
|
// Rebalance event handled by application.
|
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Either there was no rebalance callback, or the application
|
||||||
|
// did not call *Assign / *Unassign, so we need to do it.
|
||||||
|
|
||||||
|
isCooperative := c.GetRebalanceProtocol() == "COOPERATIVE" |
||||||
|
var cError *C.rd_kafka_error_t |
||||||
|
var cErr C.rd_kafka_resp_err_t |
||||||
|
|
||||||
|
if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS { |
||||||
|
// Assign partitions
|
||||||
|
if isCooperative { |
||||||
|
cError = C.rd_kafka_incremental_assign( |
||||||
|
c.handle.rk, |
||||||
|
C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
} else { |
||||||
|
cErr = C.rd_kafka_assign( |
||||||
|
c.handle.rk, |
||||||
|
C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
} |
||||||
|
} else { |
||||||
|
// Revoke partitions
|
||||||
|
|
||||||
|
if isCooperative { |
||||||
|
cError = C.rd_kafka_incremental_unassign( |
||||||
|
c.handle.rk, |
||||||
|
C.rd_kafka_event_topic_partition_list(rkev)) |
||||||
|
} else { |
||||||
|
cErr = C.rd_kafka_assign(c.handle.rk, nil) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// If the *assign() call returned error, forward it to the
|
||||||
|
// the consumer's Events() channel for visibility.
|
||||||
|
if cError != nil { |
||||||
|
c.events <- newErrorFromCErrorDestroy(cError) |
||||||
|
} else if cErr != 0 { |
||||||
|
c.events <- newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,31 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2019 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// Timeout returns the remaining time after which work done on behalf of this context should be
|
||||||
|
// canceled, or ok==false if no deadline/timeout is set.
|
||||||
|
func timeout(ctx context.Context) (timeout time.Duration, ok bool) { |
||||||
|
if deadline, ok := ctx.Deadline(); ok { |
||||||
|
return deadline.Sub(time.Now()), true |
||||||
|
} |
||||||
|
return 0, false |
||||||
|
} |
@ -0,0 +1,154 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// Automatically generate error codes from librdkafka
|
||||||
|
// See README for instructions
|
||||||
|
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
|
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
// Error provides a Kafka-specific error container
|
||||||
|
type Error struct { |
||||||
|
code ErrorCode |
||||||
|
str string |
||||||
|
fatal bool |
||||||
|
retriable bool |
||||||
|
txnRequiresAbort bool |
||||||
|
} |
||||||
|
|
||||||
|
func newError(code C.rd_kafka_resp_err_t) (err Error) { |
||||||
|
return Error{code: ErrorCode(code)} |
||||||
|
} |
||||||
|
|
||||||
|
// NewError creates a new Error.
|
||||||
|
func NewError(code ErrorCode, str string, fatal bool) (err Error) { |
||||||
|
return Error{code: code, str: str, fatal: fatal} |
||||||
|
} |
||||||
|
|
||||||
|
func newErrorFromString(code ErrorCode, str string) (err Error) { |
||||||
|
return Error{code: code, str: str} |
||||||
|
} |
||||||
|
|
||||||
|
func newErrorFromCString(code C.rd_kafka_resp_err_t, cstr *C.char) (err Error) { |
||||||
|
var str string |
||||||
|
if cstr != nil { |
||||||
|
str = C.GoString(cstr) |
||||||
|
} else { |
||||||
|
str = "" |
||||||
|
} |
||||||
|
return Error{code: ErrorCode(code), str: str} |
||||||
|
} |
||||||
|
|
||||||
|
func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) { |
||||||
|
return newErrorFromString(ErrorCode(code), str) |
||||||
|
} |
||||||
|
|
||||||
|
// newErrorFromCError creates a new Error instance and destroys
|
||||||
|
// the passed cError.
|
||||||
|
func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { |
||||||
|
defer C.rd_kafka_error_destroy(cError) |
||||||
|
return Error{ |
||||||
|
code: ErrorCode(C.rd_kafka_error_code(cError)), |
||||||
|
str: C.GoString(C.rd_kafka_error_string(cError)), |
||||||
|
fatal: cint2bool(C.rd_kafka_error_is_fatal(cError)), |
||||||
|
retriable: cint2bool(C.rd_kafka_error_is_retriable(cError)), |
||||||
|
txnRequiresAbort: cint2bool(C.rd_kafka_error_txn_requires_abort(cError)), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Error returns a human readable representation of an Error
|
||||||
|
// Same as Error.String()
|
||||||
|
func (e Error) Error() string { |
||||||
|
return e.String() |
||||||
|
} |
||||||
|
|
||||||
|
// String returns a human readable representation of an Error
|
||||||
|
func (e Error) String() string { |
||||||
|
var errstr string |
||||||
|
if len(e.str) > 0 { |
||||||
|
errstr = e.str |
||||||
|
} else { |
||||||
|
errstr = e.code.String() |
||||||
|
} |
||||||
|
|
||||||
|
if e.IsFatal() { |
||||||
|
return fmt.Sprintf("Fatal error: %s", errstr) |
||||||
|
} |
||||||
|
|
||||||
|
return errstr |
||||||
|
} |
||||||
|
|
||||||
|
// Code returns the ErrorCode of an Error
|
||||||
|
func (e Error) Code() ErrorCode { |
||||||
|
return e.code |
||||||
|
} |
||||||
|
|
||||||
|
// IsFatal returns true if the error is a fatal error.
|
||||||
|
// A fatal error indicates the client instance is no longer operable and
|
||||||
|
// should be terminated. Typical causes include non-recoverable
|
||||||
|
// idempotent producer errors.
|
||||||
|
func (e Error) IsFatal() bool { |
||||||
|
return e.fatal |
||||||
|
} |
||||||
|
|
||||||
|
// IsRetriable returns true if the operation that caused this error
|
||||||
|
// may be retried.
|
||||||
|
// This flag is currently only set by the Transactional producer API.
|
||||||
|
func (e Error) IsRetriable() bool { |
||||||
|
return e.retriable |
||||||
|
} |
||||||
|
|
||||||
|
// TxnRequiresAbort returns true if the error is an abortable transaction error
|
||||||
|
// that requires the application to abort the current transaction with
|
||||||
|
// AbortTransaction() and start a new transaction with BeginTransaction()
|
||||||
|
// if it wishes to proceed with transactional operations.
|
||||||
|
// This flag is only set by the Transactional producer API.
|
||||||
|
func (e Error) TxnRequiresAbort() bool { |
||||||
|
return e.txnRequiresAbort |
||||||
|
} |
||||||
|
|
||||||
|
// getFatalError returns an Error object if the client instance has raised a fatal error, else nil.
|
||||||
|
func getFatalError(H Handle) error { |
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(512))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cErr := C.rd_kafka_fatal_error(H.gethandle().rk, cErrstr, 512) |
||||||
|
if int(cErr) == 0 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
err := newErrorFromCString(cErr, cErrstr) |
||||||
|
err.fatal = true |
||||||
|
|
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// testFatalError triggers a fatal error in the underlying client.
|
||||||
|
// This is to be used strictly for testing purposes.
|
||||||
|
func testFatalError(H Handle, code ErrorCode, str string) ErrorCode { |
||||||
|
return ErrorCode(C.rd_kafka_test_fatal_error(H.gethandle().rk, C.rd_kafka_resp_err_t(code), C.CString(str))) |
||||||
|
} |
@ -0,0 +1,112 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// Automatically generate error codes from librdkafka
|
||||||
|
// See README for instructions
|
||||||
|
//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
|
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
static const char *errdesc_to_string (const struct rd_kafka_err_desc *ed, int idx) { |
||||||
|
return ed[idx].name; |
||||||
|
} |
||||||
|
|
||||||
|
static const char *errdesc_to_desc (const struct rd_kafka_err_desc *ed, int idx) { |
||||||
|
return ed[idx].desc; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// camelCase transforms a snake_case string to camelCase.
|
||||||
|
func camelCase(s string) string { |
||||||
|
ret := "" |
||||||
|
for _, v := range strings.Split(s, "_") { |
||||||
|
if len(v) == 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
ret += strings.ToUpper((string)(v[0])) + strings.ToLower(v[1:]) |
||||||
|
} |
||||||
|
return ret |
||||||
|
} |
||||||
|
|
||||||
|
// WriteErrorCodes writes Go error code constants to file from the
|
||||||
|
// librdkafka error codes.
|
||||||
|
// This function is not intended for public use.
|
||||||
|
func WriteErrorCodes(f *os.File) { |
||||||
|
f.WriteString("package kafka\n") |
||||||
|
now := time.Now() |
||||||
|
f.WriteString(fmt.Sprintf("// Copyright 2016-%d Confluent Inc.\n", now.Year())) |
||||||
|
f.WriteString(fmt.Sprintf("// AUTOMATICALLY GENERATED ON %v USING librdkafka %s\n", |
||||||
|
now, C.GoString(C.rd_kafka_version_str()))) |
||||||
|
|
||||||
|
var errdescs *C.struct_rd_kafka_err_desc |
||||||
|
var csize C.size_t |
||||||
|
C.rd_kafka_get_err_descs(&errdescs, &csize) |
||||||
|
|
||||||
|
f.WriteString(` |
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// ErrorCode is the integer representation of local and broker error codes
|
||||||
|
type ErrorCode int |
||||||
|
|
||||||
|
// String returns a human readable representation of an error code
|
||||||
|
func (c ErrorCode) String() string { |
||||||
|
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
`) |
||||||
|
|
||||||
|
for i := 0; i < int(csize); i++ { |
||||||
|
orig := C.GoString(C.errdesc_to_string(errdescs, C.int(i))) |
||||||
|
if len(orig) == 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
desc := C.GoString(C.errdesc_to_desc(errdescs, C.int(i))) |
||||||
|
if len(desc) == 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
errname := "Err" + camelCase(orig) |
||||||
|
|
||||||
|
// Special handling to please golint
|
||||||
|
// Eof -> EOF
|
||||||
|
// Id -> ID
|
||||||
|
errname = strings.Replace(errname, "Eof", "EOF", -1) |
||||||
|
errname = strings.Replace(errname, "Id", "ID", -1) |
||||||
|
|
||||||
|
f.WriteString(fmt.Sprintf(" // %s %s\n", errname, desc)) |
||||||
|
f.WriteString(fmt.Sprintf(" %s ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_%s)\n", |
||||||
|
errname, orig)) |
||||||
|
} |
||||||
|
|
||||||
|
f.WriteString(")\n") |
||||||
|
|
||||||
|
} |
@ -0,0 +1,316 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
|
||||||
|
|
||||||
|
void chdrs_to_tmphdrs (glue_msg_t *gMsg) { |
||||||
|
size_t i = 0; |
||||||
|
const char *name; |
||||||
|
const void *val; |
||||||
|
size_t size; |
||||||
|
rd_kafka_headers_t *chdrs; |
||||||
|
|
||||||
|
if (rd_kafka_message_headers(gMsg->msg, &chdrs)) { |
||||||
|
gMsg->tmphdrs = NULL; |
||||||
|
gMsg->tmphdrsCnt = 0; |
||||||
|
return; |
||||||
|
} |
||||||
|
|
||||||
|
gMsg->tmphdrsCnt = rd_kafka_header_cnt(chdrs); |
||||||
|
gMsg->tmphdrs = malloc(sizeof(*gMsg->tmphdrs) * gMsg->tmphdrsCnt); |
||||||
|
|
||||||
|
while (!rd_kafka_header_get_all(chdrs, i, |
||||||
|
&gMsg->tmphdrs[i].key, |
||||||
|
&gMsg->tmphdrs[i].val, |
||||||
|
(size_t *)&gMsg->tmphdrs[i].size)) |
||||||
|
i++; |
||||||
|
} |
||||||
|
|
||||||
|
rd_kafka_event_t *_rk_queue_poll (rd_kafka_queue_t *rkq, int timeoutMs, |
||||||
|
rd_kafka_event_type_t *evtype, |
||||||
|
glue_msg_t *gMsg, |
||||||
|
rd_kafka_event_t *prev_rkev) { |
||||||
|
rd_kafka_event_t *rkev; |
||||||
|
|
||||||
|
if (prev_rkev) |
||||||
|
rd_kafka_event_destroy(prev_rkev); |
||||||
|
|
||||||
|
rkev = rd_kafka_queue_poll(rkq, timeoutMs); |
||||||
|
*evtype = rd_kafka_event_type(rkev); |
||||||
|
|
||||||
|
if (*evtype == RD_KAFKA_EVENT_FETCH) { |
||||||
|
gMsg->msg = (rd_kafka_message_t *)rd_kafka_event_message_next(rkev); |
||||||
|
gMsg->ts = rd_kafka_message_timestamp(gMsg->msg, &gMsg->tstype); |
||||||
|
|
||||||
|
if (gMsg->want_hdrs) |
||||||
|
chdrs_to_tmphdrs(gMsg); |
||||||
|
} |
||||||
|
|
||||||
|
return rkev; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
func chdrsToTmphdrs(gMsg *C.glue_msg_t) { |
||||||
|
C.chdrs_to_tmphdrs(gMsg) |
||||||
|
} |
||||||
|
|
||||||
|
// Event generic interface
|
||||||
|
type Event interface { |
||||||
|
// String returns a human-readable representation of the event
|
||||||
|
String() string |
||||||
|
} |
||||||
|
|
||||||
|
// Specific event types
|
||||||
|
|
||||||
|
// Stats statistics event
|
||||||
|
type Stats struct { |
||||||
|
statsJSON string |
||||||
|
} |
||||||
|
|
||||||
|
func (e Stats) String() string { |
||||||
|
return e.statsJSON |
||||||
|
} |
||||||
|
|
||||||
|
// AssignedPartitions consumer group rebalance event: assigned partition set
|
||||||
|
type AssignedPartitions struct { |
||||||
|
Partitions []TopicPartition |
||||||
|
} |
||||||
|
|
||||||
|
func (e AssignedPartitions) String() string { |
||||||
|
return fmt.Sprintf("AssignedPartitions: %v", e.Partitions) |
||||||
|
} |
||||||
|
|
||||||
|
// RevokedPartitions consumer group rebalance event: revoked partition set
|
||||||
|
type RevokedPartitions struct { |
||||||
|
Partitions []TopicPartition |
||||||
|
} |
||||||
|
|
||||||
|
func (e RevokedPartitions) String() string { |
||||||
|
return fmt.Sprintf("RevokedPartitions: %v", e.Partitions) |
||||||
|
} |
||||||
|
|
||||||
|
// PartitionEOF consumer reached end of partition
|
||||||
|
// Needs to be explicitly enabled by setting the `enable.partition.eof`
|
||||||
|
// configuration property to true.
|
||||||
|
type PartitionEOF TopicPartition |
||||||
|
|
||||||
|
func (p PartitionEOF) String() string { |
||||||
|
return fmt.Sprintf("EOF at %s", TopicPartition(p)) |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetsCommitted reports committed offsets
|
||||||
|
type OffsetsCommitted struct { |
||||||
|
Error error |
||||||
|
Offsets []TopicPartition |
||||||
|
} |
||||||
|
|
||||||
|
func (o OffsetsCommitted) String() string { |
||||||
|
return fmt.Sprintf("OffsetsCommitted (%v, %v)", o.Error, o.Offsets) |
||||||
|
} |
||||||
|
|
||||||
|
// OAuthBearerTokenRefresh indicates token refresh is required
|
||||||
|
type OAuthBearerTokenRefresh struct { |
||||||
|
// Config is the value of the sasl.oauthbearer.config property
|
||||||
|
Config string |
||||||
|
} |
||||||
|
|
||||||
|
func (o OAuthBearerTokenRefresh) String() string { |
||||||
|
return "OAuthBearerTokenRefresh" |
||||||
|
} |
||||||
|
|
||||||
|
// eventPoll polls an event from the handler's C rd_kafka_queue_t,
|
||||||
|
// translates it into an Event type and then sends on `channel` if non-nil, else returns the Event.
|
||||||
|
// term_chan is an optional channel to monitor along with producing to channel
|
||||||
|
// to indicate that `channel` is being terminated.
|
||||||
|
// returns (event Event, terminate Bool) tuple, where Terminate indicates
|
||||||
|
// if termChan received a termination event.
|
||||||
|
func (h *handle) eventPoll(channel chan Event, timeoutMs int, maxEvents int, termChan chan bool) (Event, bool) { |
||||||
|
|
||||||
|
var prevRkev *C.rd_kafka_event_t |
||||||
|
term := false |
||||||
|
|
||||||
|
var retval Event |
||||||
|
|
||||||
|
if channel == nil { |
||||||
|
maxEvents = 1 |
||||||
|
} |
||||||
|
out: |
||||||
|
for evcnt := 0; evcnt < maxEvents; evcnt++ { |
||||||
|
var evtype C.rd_kafka_event_type_t |
||||||
|
var gMsg C.glue_msg_t |
||||||
|
gMsg.want_hdrs = C.int8_t(bool2cint(h.msgFields.Headers)) |
||||||
|
rkev := C._rk_queue_poll(h.rkq, C.int(timeoutMs), &evtype, &gMsg, prevRkev) |
||||||
|
prevRkev = rkev |
||||||
|
timeoutMs = 0 |
||||||
|
|
||||||
|
retval = nil |
||||||
|
|
||||||
|
switch evtype { |
||||||
|
case C.RD_KAFKA_EVENT_FETCH: |
||||||
|
// Consumer fetch event, new message.
|
||||||
|
// Extracted into temporary gMsg for optimization
|
||||||
|
retval = h.newMessageFromGlueMsg(&gMsg) |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_REBALANCE: |
||||||
|
// Consumer rebalance event
|
||||||
|
retval = h.c.handleRebalanceEvent(channel, rkev) |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_ERROR: |
||||||
|
// Error event
|
||||||
|
cErr := C.rd_kafka_event_error(rkev) |
||||||
|
if cErr == C.RD_KAFKA_RESP_ERR__PARTITION_EOF { |
||||||
|
crktpar := C.rd_kafka_event_topic_partition(rkev) |
||||||
|
if crktpar == nil { |
||||||
|
break |
||||||
|
} |
||||||
|
|
||||||
|
defer C.rd_kafka_topic_partition_destroy(crktpar) |
||||||
|
var peof PartitionEOF |
||||||
|
setupTopicPartitionFromCrktpar((*TopicPartition)(&peof), crktpar) |
||||||
|
|
||||||
|
retval = peof |
||||||
|
|
||||||
|
} else if int(C.rd_kafka_event_error_is_fatal(rkev)) != 0 { |
||||||
|
// A fatal error has been raised.
|
||||||
|
// Extract the actual error from the client
|
||||||
|
// instance and return a new Error with
|
||||||
|
// fatal set to true.
|
||||||
|
cFatalErrstrSize := C.size_t(512) |
||||||
|
cFatalErrstr := (*C.char)(C.malloc(cFatalErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cFatalErrstr)) |
||||||
|
cFatalErr := C.rd_kafka_fatal_error(h.rk, cFatalErrstr, cFatalErrstrSize) |
||||||
|
fatalErr := newErrorFromCString(cFatalErr, cFatalErrstr) |
||||||
|
fatalErr.fatal = true |
||||||
|
retval = fatalErr |
||||||
|
|
||||||
|
} else { |
||||||
|
retval = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) |
||||||
|
} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_STATS: |
||||||
|
retval = &Stats{C.GoString(C.rd_kafka_event_stats(rkev))} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_DR: |
||||||
|
// Producer Delivery Report event
|
||||||
|
// Each such event contains delivery reports for all
|
||||||
|
// messages in the produced batch.
|
||||||
|
// Forward delivery reports to per-message's response channel
|
||||||
|
// or to the global Producer.Events channel, or none.
|
||||||
|
rkmessages := make([]*C.rd_kafka_message_t, int(C.rd_kafka_event_message_count(rkev))) |
||||||
|
|
||||||
|
cnt := int(C.rd_kafka_event_message_array(rkev, (**C.rd_kafka_message_t)(unsafe.Pointer(&rkmessages[0])), C.size_t(len(rkmessages)))) |
||||||
|
|
||||||
|
for _, rkmessage := range rkmessages[:cnt] { |
||||||
|
msg := h.newMessageFromC(rkmessage) |
||||||
|
var ch *chan Event |
||||||
|
|
||||||
|
if rkmessage._private != nil { |
||||||
|
// Find cgoif by id
|
||||||
|
cg, found := h.cgoGet((int)((uintptr)(rkmessage._private))) |
||||||
|
if found { |
||||||
|
cdr := cg.(cgoDr) |
||||||
|
|
||||||
|
if cdr.deliveryChan != nil { |
||||||
|
ch = &cdr.deliveryChan |
||||||
|
} |
||||||
|
msg.Opaque = cdr.opaque |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if ch == nil && h.fwdDr { |
||||||
|
ch = &channel |
||||||
|
} |
||||||
|
|
||||||
|
if ch != nil { |
||||||
|
select { |
||||||
|
case *ch <- msg: |
||||||
|
case <-termChan: |
||||||
|
retval = nil |
||||||
|
term = true |
||||||
|
break out |
||||||
|
} |
||||||
|
|
||||||
|
} else { |
||||||
|
retval = msg |
||||||
|
break out |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_OFFSET_COMMIT: |
||||||
|
// Offsets committed
|
||||||
|
cErr := C.rd_kafka_event_error(rkev) |
||||||
|
coffsets := C.rd_kafka_event_topic_partition_list(rkev) |
||||||
|
var offsets []TopicPartition |
||||||
|
if coffsets != nil { |
||||||
|
offsets = newTopicPartitionsFromCparts(coffsets) |
||||||
|
} |
||||||
|
|
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
retval = OffsetsCommitted{newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)), offsets} |
||||||
|
} else { |
||||||
|
retval = OffsetsCommitted{nil, offsets} |
||||||
|
} |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: |
||||||
|
ev := OAuthBearerTokenRefresh{C.GoString(C.rd_kafka_event_config_string(rkev))} |
||||||
|
retval = ev |
||||||
|
|
||||||
|
case C.RD_KAFKA_EVENT_NONE: |
||||||
|
// poll timed out: no events available
|
||||||
|
break out |
||||||
|
|
||||||
|
default: |
||||||
|
if rkev != nil { |
||||||
|
fmt.Fprintf(os.Stderr, "Ignored event %s\n", |
||||||
|
C.GoString(C.rd_kafka_event_name(rkev))) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
if retval != nil { |
||||||
|
if channel != nil { |
||||||
|
select { |
||||||
|
case channel <- retval: |
||||||
|
case <-termChan: |
||||||
|
retval = nil |
||||||
|
term = true |
||||||
|
break out |
||||||
|
} |
||||||
|
} else { |
||||||
|
break out |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if prevRkev != nil { |
||||||
|
C.rd_kafka_event_destroy(prevRkev) |
||||||
|
} |
||||||
|
|
||||||
|
return retval, term |
||||||
|
} |
@ -0,0 +1,337 @@ |
|||||||
|
package kafka |
||||||
|
// Copyright 2016-2021 Confluent Inc.
|
||||||
|
// AUTOMATICALLY GENERATED ON 2021-12-08 12:44:39.243338672 +0100 CET m=+0.000248284 USING librdkafka 1.8.2
|
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// ErrorCode is the integer representation of local and broker error codes
|
||||||
|
type ErrorCode int |
||||||
|
|
||||||
|
// String returns a human readable representation of an error code
|
||||||
|
func (c ErrorCode) String() string { |
||||||
|
return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
// ErrBadMsg Local: Bad message format
|
||||||
|
ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG) |
||||||
|
// ErrBadCompression Local: Invalid compressed data
|
||||||
|
ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION) |
||||||
|
// ErrDestroy Local: Broker handle destroyed
|
||||||
|
ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY) |
||||||
|
// ErrFail Local: Communication failure with broker
|
||||||
|
ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL) |
||||||
|
// ErrTransport Local: Broker transport failure
|
||||||
|
ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT) |
||||||
|
// ErrCritSysResource Local: Critical system resource failure
|
||||||
|
ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE) |
||||||
|
// ErrResolve Local: Host resolution failure
|
||||||
|
ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE) |
||||||
|
// ErrMsgTimedOut Local: Message timed out
|
||||||
|
ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) |
||||||
|
// ErrPartitionEOF Broker: No more messages
|
||||||
|
ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF) |
||||||
|
// ErrUnknownPartition Local: Unknown partition
|
||||||
|
ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) |
||||||
|
// ErrFs Local: File or filesystem error
|
||||||
|
ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS) |
||||||
|
// ErrUnknownTopic Local: Unknown topic
|
||||||
|
ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) |
||||||
|
// ErrAllBrokersDown Local: All broker connections are down
|
||||||
|
ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) |
||||||
|
// ErrInvalidArg Local: Invalid argument or configuration
|
||||||
|
ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG) |
||||||
|
// ErrTimedOut Local: Timed out
|
||||||
|
ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT) |
||||||
|
// ErrQueueFull Local: Queue full
|
||||||
|
ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL) |
||||||
|
// ErrIsrInsuff Local: ISR count insufficient
|
||||||
|
ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF) |
||||||
|
// ErrNodeUpdate Local: Broker node update
|
||||||
|
ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE) |
||||||
|
// ErrSsl Local: SSL error
|
||||||
|
ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL) |
||||||
|
// ErrWaitCoord Local: Waiting for coordinator
|
||||||
|
ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD) |
||||||
|
// ErrUnknownGroup Local: Unknown group
|
||||||
|
ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP) |
||||||
|
// ErrInProgress Local: Operation in progress
|
||||||
|
ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS) |
||||||
|
// ErrPrevInProgress Local: Previous operation in progress
|
||||||
|
ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS) |
||||||
|
// ErrExistingSubscription Local: Existing subscription
|
||||||
|
ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION) |
||||||
|
// ErrAssignPartitions Local: Assign partitions
|
||||||
|
ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) |
||||||
|
// ErrRevokePartitions Local: Revoke partitions
|
||||||
|
ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) |
||||||
|
// ErrConflict Local: Conflicting use
|
||||||
|
ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT) |
||||||
|
// ErrState Local: Erroneous state
|
||||||
|
ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE) |
||||||
|
// ErrUnknownProtocol Local: Unknown protocol
|
||||||
|
ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL) |
||||||
|
// ErrNotImplemented Local: Not implemented
|
||||||
|
ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) |
||||||
|
// ErrAuthentication Local: Authentication failure
|
||||||
|
ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION) |
||||||
|
// ErrNoOffset Local: No offset stored
|
||||||
|
ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET) |
||||||
|
// ErrOutdated Local: Outdated
|
||||||
|
ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED) |
||||||
|
// ErrTimedOutQueue Local: Timed out in queue
|
||||||
|
ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) |
||||||
|
// ErrUnsupportedFeature Local: Required feature not supported by broker
|
||||||
|
ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) |
||||||
|
// ErrWaitCache Local: Awaiting cache update
|
||||||
|
ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE) |
||||||
|
// ErrIntr Local: Operation interrupted
|
||||||
|
ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR) |
||||||
|
// ErrKeySerialization Local: Key serialization error
|
||||||
|
ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION) |
||||||
|
// ErrValueSerialization Local: Value serialization error
|
||||||
|
ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION) |
||||||
|
// ErrKeyDeserialization Local: Key deserialization error
|
||||||
|
ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION) |
||||||
|
// ErrValueDeserialization Local: Value deserialization error
|
||||||
|
ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION) |
||||||
|
// ErrPartial Local: Partial response
|
||||||
|
ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL) |
||||||
|
// ErrReadOnly Local: Read-only object
|
||||||
|
ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY) |
||||||
|
// ErrNoent Local: No such entry
|
||||||
|
ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT) |
||||||
|
// ErrUnderflow Local: Read underflow
|
||||||
|
ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW) |
||||||
|
// ErrInvalidType Local: Invalid type
|
||||||
|
ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE) |
||||||
|
// ErrRetry Local: Retry operation
|
||||||
|
ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY) |
||||||
|
// ErrPurgeQueue Local: Purged in queue
|
||||||
|
ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE) |
||||||
|
// ErrPurgeInflight Local: Purged in flight
|
||||||
|
ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT) |
||||||
|
// ErrFatal Local: Fatal error
|
||||||
|
ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL) |
||||||
|
// ErrInconsistent Local: Inconsistent state
|
||||||
|
ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT) |
||||||
|
// ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
|
||||||
|
ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) |
||||||
|
// ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
|
||||||
|
ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) |
||||||
|
// ErrUnknownBroker Local: Unknown broker
|
||||||
|
ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER) |
||||||
|
// ErrNotConfigured Local: Functionality not configured
|
||||||
|
ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED) |
||||||
|
// ErrFenced Local: This instance has been fenced by a newer instance
|
||||||
|
ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED) |
||||||
|
// ErrApplication Local: Application generated error
|
||||||
|
ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION) |
||||||
|
// ErrAssignmentLost Local: Group partition assignment lost
|
||||||
|
ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST) |
||||||
|
// ErrNoop Local: No operation performed
|
||||||
|
ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP) |
||||||
|
// ErrAutoOffsetReset Local: No offset to automatically reset to
|
||||||
|
ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) |
||||||
|
// ErrUnknown Unknown broker error
|
||||||
|
ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) |
||||||
|
// ErrNoError Success
|
||||||
|
ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR) |
||||||
|
// ErrOffsetOutOfRange Broker: Offset out of range
|
||||||
|
ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE) |
||||||
|
// ErrInvalidMsg Broker: Invalid message
|
||||||
|
ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG) |
||||||
|
// ErrUnknownTopicOrPart Broker: Unknown topic or partition
|
||||||
|
ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) |
||||||
|
// ErrInvalidMsgSize Broker: Invalid message size
|
||||||
|
ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE) |
||||||
|
// ErrLeaderNotAvailable Broker: Leader not available
|
||||||
|
ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) |
||||||
|
// ErrNotLeaderForPartition Broker: Not leader for partition
|
||||||
|
ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) |
||||||
|
// ErrRequestTimedOut Broker: Request timed out
|
||||||
|
ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT) |
||||||
|
// ErrBrokerNotAvailable Broker: Broker not available
|
||||||
|
ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE) |
||||||
|
// ErrReplicaNotAvailable Broker: Replica not available
|
||||||
|
ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE) |
||||||
|
// ErrMsgSizeTooLarge Broker: Message size too large
|
||||||
|
ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) |
||||||
|
// ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
|
||||||
|
ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH) |
||||||
|
// ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
|
||||||
|
ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE) |
||||||
|
// ErrNetworkException Broker: Broker disconnected before response received
|
||||||
|
ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION) |
||||||
|
// ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
|
||||||
|
ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS) |
||||||
|
// ErrCoordinatorNotAvailable Broker: Coordinator not available
|
||||||
|
ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE) |
||||||
|
// ErrNotCoordinator Broker: Not coordinator
|
||||||
|
ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR) |
||||||
|
// ErrTopicException Broker: Invalid topic
|
||||||
|
ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION) |
||||||
|
// ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
|
||||||
|
ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE) |
||||||
|
// ErrNotEnoughReplicas Broker: Not enough in-sync replicas
|
||||||
|
ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS) |
||||||
|
// ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
|
||||||
|
ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND) |
||||||
|
// ErrInvalidRequiredAcks Broker: Invalid required acks value
|
||||||
|
ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS) |
||||||
|
// ErrIllegalGeneration Broker: Specified group generation id is not valid
|
||||||
|
ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) |
||||||
|
// ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
|
||||||
|
ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL) |
||||||
|
// ErrInvalidGroupID Broker: Invalid group.id
|
||||||
|
ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID) |
||||||
|
// ErrUnknownMemberID Broker: Unknown member
|
||||||
|
ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) |
||||||
|
// ErrInvalidSessionTimeout Broker: Invalid session timeout
|
||||||
|
ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT) |
||||||
|
// ErrRebalanceInProgress Broker: Group rebalance in progress
|
||||||
|
ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS) |
||||||
|
// ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
|
||||||
|
ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE) |
||||||
|
// ErrTopicAuthorizationFailed Broker: Topic authorization failed
|
||||||
|
ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) |
||||||
|
// ErrGroupAuthorizationFailed Broker: Group authorization failed
|
||||||
|
ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED) |
||||||
|
// ErrClusterAuthorizationFailed Broker: Cluster authorization failed
|
||||||
|
ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED) |
||||||
|
// ErrInvalidTimestamp Broker: Invalid timestamp
|
||||||
|
ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP) |
||||||
|
// ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
|
||||||
|
ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM) |
||||||
|
// ErrIllegalSaslState Broker: Request not valid in current SASL state
|
||||||
|
ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE) |
||||||
|
// ErrUnsupportedVersion Broker: API version not supported
|
||||||
|
ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) |
||||||
|
// ErrTopicAlreadyExists Broker: Topic already exists
|
||||||
|
ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) |
||||||
|
// ErrInvalidPartitions Broker: Invalid number of partitions
|
||||||
|
ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS) |
||||||
|
// ErrInvalidReplicationFactor Broker: Invalid replication factor
|
||||||
|
ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR) |
||||||
|
// ErrInvalidReplicaAssignment Broker: Invalid replica assignment
|
||||||
|
ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT) |
||||||
|
// ErrInvalidConfig Broker: Configuration is invalid
|
||||||
|
ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG) |
||||||
|
// ErrNotController Broker: Not controller for cluster
|
||||||
|
ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER) |
||||||
|
// ErrInvalidRequest Broker: Invalid request
|
||||||
|
ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST) |
||||||
|
// ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
|
||||||
|
ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT) |
||||||
|
// ErrPolicyViolation Broker: Policy violation
|
||||||
|
ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION) |
||||||
|
// ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
|
||||||
|
ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER) |
||||||
|
// ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
|
||||||
|
ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER) |
||||||
|
// ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
|
||||||
|
ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) |
||||||
|
// ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
|
||||||
|
ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE) |
||||||
|
// ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
|
||||||
|
ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING) |
||||||
|
// ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
|
||||||
|
ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT) |
||||||
|
// ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
|
||||||
|
ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS) |
||||||
|
// ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
|
||||||
|
ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED) |
||||||
|
// ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
|
||||||
|
ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED) |
||||||
|
// ErrSecurityDisabled Broker: Security features are disabled
|
||||||
|
ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED) |
||||||
|
// ErrOperationNotAttempted Broker: Operation not attempted
|
||||||
|
ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED) |
||||||
|
// ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
|
||||||
|
ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR) |
||||||
|
// ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
|
||||||
|
ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND) |
||||||
|
// ErrSaslAuthenticationFailed Broker: SASL Authentication failed
|
||||||
|
ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED) |
||||||
|
// ErrUnknownProducerID Broker: Unknown Producer Id
|
||||||
|
ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) |
||||||
|
// ErrReassignmentInProgress Broker: Partition reassignment is in progress
|
||||||
|
ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS) |
||||||
|
// ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
|
||||||
|
ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED) |
||||||
|
// ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
|
||||||
|
ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND) |
||||||
|
// ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
|
||||||
|
ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH) |
||||||
|
// ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
|
||||||
|
ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED) |
||||||
|
// ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
|
||||||
|
ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED) |
||||||
|
// ErrDelegationTokenExpired Broker: Delegation Token is expired
|
||||||
|
ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED) |
||||||
|
// ErrInvalidPrincipalType Broker: Supplied principalType is not supported
|
||||||
|
ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE) |
||||||
|
// ErrNonEmptyGroup Broker: The group is not empty
|
||||||
|
ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP) |
||||||
|
// ErrGroupIDNotFound Broker: The group id does not exist
|
||||||
|
ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND) |
||||||
|
// ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
|
||||||
|
ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND) |
||||||
|
// ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
|
||||||
|
ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH) |
||||||
|
// ErrListenerNotFound Broker: No matching listener
|
||||||
|
ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND) |
||||||
|
// ErrTopicDeletionDisabled Broker: Topic deletion is disabled
|
||||||
|
ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED) |
||||||
|
// ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
|
||||||
|
ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH) |
||||||
|
// ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
|
||||||
|
ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) |
||||||
|
// ErrUnsupportedCompressionType Broker: Unsupported compression type
|
||||||
|
ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE) |
||||||
|
// ErrStaleBrokerEpoch Broker: Broker epoch has changed
|
||||||
|
ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH) |
||||||
|
// ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
|
||||||
|
ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) |
||||||
|
// ErrMemberIDRequired Broker: Group member needs a valid member ID
|
||||||
|
ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) |
||||||
|
// ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
|
||||||
|
ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE) |
||||||
|
// ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
|
||||||
|
ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED) |
||||||
|
// ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
|
||||||
|
ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) |
||||||
|
// ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
|
||||||
|
ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE) |
||||||
|
// ErrElectionNotNeeded Broker: Leader election not needed for topic partition
|
||||||
|
ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED) |
||||||
|
// ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
|
||||||
|
ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS) |
||||||
|
// ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
|
||||||
|
ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC) |
||||||
|
// ErrInvalidRecord Broker: Broker failed to validate record
|
||||||
|
ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD) |
||||||
|
// ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
|
||||||
|
ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) |
||||||
|
// ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
|
||||||
|
ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED) |
||||||
|
// ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
|
||||||
|
ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED) |
||||||
|
// ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
|
||||||
|
ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND) |
||||||
|
// ErrDuplicateResource Broker: Request illegally referred to the same resource twice
|
||||||
|
ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE) |
||||||
|
// ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
|
||||||
|
ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL) |
||||||
|
// ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
|
||||||
|
ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET) |
||||||
|
// ErrInvalidUpdateVersion Broker: Invalid update version
|
||||||
|
ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION) |
||||||
|
// ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
|
||||||
|
ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED) |
||||||
|
// ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
|
||||||
|
ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE) |
||||||
|
) |
@ -0,0 +1,48 @@ |
|||||||
|
/**
|
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
#pragma once |
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Glue between Go, Cgo and librdkafka |
||||||
|
*/ |
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Temporary C to Go header representation |
||||||
|
*/ |
||||||
|
typedef struct tmphdr_s { |
||||||
|
const char *key; |
||||||
|
const void *val; // producer: malloc()ed by Go code if size > 0
|
||||||
|
// consumer: owned by librdkafka
|
||||||
|
ssize_t size; |
||||||
|
} tmphdr_t; |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @struct This is a glue struct used by the C code in this client to |
||||||
|
* effectively map fields from a librdkafka rd_kafka_message_t |
||||||
|
* to something usable in Go with as few CGo calls as possible. |
||||||
|
*/ |
||||||
|
typedef struct glue_msg_s { |
||||||
|
rd_kafka_message_t *msg; |
||||||
|
rd_kafka_timestamp_type_t tstype; |
||||||
|
int64_t ts; |
||||||
|
tmphdr_t *tmphdrs; |
||||||
|
size_t tmphdrsCnt; |
||||||
|
int8_t want_hdrs; /**< If true, copy headers */ |
||||||
|
} glue_msg_t; |
@ -0,0 +1,379 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include <stdlib.h> |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// OAuthBearerToken represents the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication.
|
||||||
|
type OAuthBearerToken struct { |
||||||
|
// Token value, often (but not necessarily) a JWS compact serialization
|
||||||
|
// as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
|
||||||
|
// the regular expression for a SASL/OAUTHBEARER value defined at
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1
|
||||||
|
TokenValue string |
||||||
|
// Metadata about the token indicating when it expires (local time);
|
||||||
|
// it must represent a time in the future
|
||||||
|
Expiration time.Time |
||||||
|
// Metadata about the token indicating the Kafka principal name
|
||||||
|
// to which it applies (for example, "admin")
|
||||||
|
Principal string |
||||||
|
// SASL extensions, if any, to be communicated to the broker during
|
||||||
|
// authentication (all keys and values of which must meet the regular
|
||||||
|
// expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
|
||||||
|
// and it must not contain the reserved "auth" key)
|
||||||
|
Extensions map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
// Handle represents a generic client handle containing common parts for
|
||||||
|
// both Producer and Consumer.
|
||||||
|
type Handle interface { |
||||||
|
// SetOAuthBearerToken sets the the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
|
||||||
|
// on success, otherwise an error if:
|
||||||
|
// 1) the token data is invalid (meaning an expiration time in the past
|
||||||
|
// or either a token value or an extension key or value that does not meet
|
||||||
|
// the regular expression requirements as per
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1);
|
||||||
|
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error |
||||||
|
|
||||||
|
// SetOAuthBearerTokenFailure sets the error message describing why token
|
||||||
|
// retrieval/setting failed; it also schedules a new token refresh event for 10
|
||||||
|
// seconds later so the attempt may be retried. It will return nil on
|
||||||
|
// success, otherwise an error if:
|
||||||
|
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
SetOAuthBearerTokenFailure(errstr string) error |
||||||
|
|
||||||
|
// gethandle() returns the internal handle struct pointer
|
||||||
|
gethandle() *handle |
||||||
|
} |
||||||
|
|
||||||
|
// Common instance handle for both Producer and Consumer
|
||||||
|
type handle struct { |
||||||
|
rk *C.rd_kafka_t |
||||||
|
rkq *C.rd_kafka_queue_t |
||||||
|
|
||||||
|
// Forward logs from librdkafka log queue to logs channel.
|
||||||
|
logs chan LogEvent |
||||||
|
logq *C.rd_kafka_queue_t |
||||||
|
closeLogsChan bool |
||||||
|
|
||||||
|
// Topic <-> rkt caches
|
||||||
|
rktCacheLock sync.Mutex |
||||||
|
// topic name -> rkt cache
|
||||||
|
rktCache map[string]*C.rd_kafka_topic_t |
||||||
|
// rkt -> topic name cache
|
||||||
|
rktNameCache map[*C.rd_kafka_topic_t]string |
||||||
|
|
||||||
|
// Cached instance name to avoid CGo call in String()
|
||||||
|
name string |
||||||
|
|
||||||
|
//
|
||||||
|
// cgo map
|
||||||
|
// Maps C callbacks based on cgoid back to its Go object
|
||||||
|
cgoLock sync.Mutex |
||||||
|
cgoidNext uintptr |
||||||
|
cgomap map[int]cgoif |
||||||
|
|
||||||
|
//
|
||||||
|
// producer
|
||||||
|
//
|
||||||
|
p *Producer |
||||||
|
|
||||||
|
// Forward delivery reports on Producer.Events channel
|
||||||
|
fwdDr bool |
||||||
|
|
||||||
|
// Enabled message fields for delivery reports and consumed messages.
|
||||||
|
msgFields *messageFields |
||||||
|
|
||||||
|
//
|
||||||
|
// consumer
|
||||||
|
//
|
||||||
|
c *Consumer |
||||||
|
|
||||||
|
// WaitGroup to wait for spawned go-routines to finish.
|
||||||
|
waitGroup sync.WaitGroup |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) String() string { |
||||||
|
return h.name |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) setup() { |
||||||
|
h.rktCache = make(map[string]*C.rd_kafka_topic_t) |
||||||
|
h.rktNameCache = make(map[*C.rd_kafka_topic_t]string) |
||||||
|
h.cgomap = make(map[int]cgoif) |
||||||
|
h.name = C.GoString(C.rd_kafka_name(h.rk)) |
||||||
|
if h.msgFields == nil { |
||||||
|
h.msgFields = newMessageFields() |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) cleanup() { |
||||||
|
if h.logs != nil { |
||||||
|
C.rd_kafka_queue_destroy(h.logq) |
||||||
|
if h.closeLogsChan { |
||||||
|
close(h.logs) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
for _, crkt := range h.rktCache { |
||||||
|
C.rd_kafka_topic_destroy(crkt) |
||||||
|
} |
||||||
|
|
||||||
|
if h.rkq != nil { |
||||||
|
C.rd_kafka_queue_destroy(h.rkq) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) setupLogQueue(logsChan chan LogEvent, termChan chan bool) { |
||||||
|
if logsChan == nil { |
||||||
|
logsChan = make(chan LogEvent, 10000) |
||||||
|
h.closeLogsChan = true |
||||||
|
} |
||||||
|
|
||||||
|
h.logs = logsChan |
||||||
|
|
||||||
|
// Let librdkafka forward logs to our log queue instead of the main queue
|
||||||
|
h.logq = C.rd_kafka_queue_new(h.rk) |
||||||
|
C.rd_kafka_set_log_queue(h.rk, h.logq) |
||||||
|
|
||||||
|
// Start a polling goroutine to consume the log queue
|
||||||
|
h.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
h.pollLogEvents(h.logs, 100, termChan) |
||||||
|
h.waitGroup.Done() |
||||||
|
}() |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// getRkt0 finds or creates and returns a C topic_t object from the local cache.
|
||||||
|
func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) { |
||||||
|
if doLock { |
||||||
|
h.rktCacheLock.Lock() |
||||||
|
defer h.rktCacheLock.Unlock() |
||||||
|
} |
||||||
|
crkt, ok := h.rktCache[topic] |
||||||
|
if ok { |
||||||
|
return crkt |
||||||
|
} |
||||||
|
|
||||||
|
if ctopic == nil { |
||||||
|
ctopic = C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
} |
||||||
|
|
||||||
|
crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil) |
||||||
|
if crkt == nil { |
||||||
|
panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s", |
||||||
|
topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error())))) |
||||||
|
} |
||||||
|
|
||||||
|
h.rktCache[topic] = crkt |
||||||
|
h.rktNameCache[crkt] = topic |
||||||
|
|
||||||
|
return crkt |
||||||
|
} |
||||||
|
|
||||||
|
// getRkt finds or creates and returns a C topic_t object from the local cache.
|
||||||
|
func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) { |
||||||
|
return h.getRkt0(topic, nil, true) |
||||||
|
} |
||||||
|
|
||||||
|
// getTopicNameFromRkt returns the topic name for a C topic_t object, preferably
|
||||||
|
// using the local cache to avoid a cgo call.
|
||||||
|
func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) { |
||||||
|
h.rktCacheLock.Lock() |
||||||
|
defer h.rktCacheLock.Unlock() |
||||||
|
|
||||||
|
topic, ok := h.rktNameCache[crkt] |
||||||
|
if ok { |
||||||
|
return topic |
||||||
|
} |
||||||
|
|
||||||
|
// we need our own copy/refcount of the crkt
|
||||||
|
ctopic := C.rd_kafka_topic_name(crkt) |
||||||
|
topic = C.GoString(ctopic) |
||||||
|
|
||||||
|
crkt = h.getRkt0(topic, ctopic, false /* dont lock */) |
||||||
|
|
||||||
|
return topic |
||||||
|
} |
||||||
|
|
||||||
|
// cgoif is a generic interface for holding Go state passed as opaque
|
||||||
|
// value to the C code.
|
||||||
|
// Since pointers to complex Go types cannot be passed to C we instead create
|
||||||
|
// a cgoif object, generate a unique id that is added to the cgomap,
|
||||||
|
// and then pass that id to the C code. When the C code callback is called we
|
||||||
|
// use the id to look up the cgoif object in the cgomap.
|
||||||
|
type cgoif interface{} |
||||||
|
|
||||||
|
// delivery report cgoif container
|
||||||
|
type cgoDr struct { |
||||||
|
deliveryChan chan Event |
||||||
|
opaque interface{} |
||||||
|
} |
||||||
|
|
||||||
|
// cgoPut adds object cg to the handle's cgo map and returns a
|
||||||
|
// unique id for the added entry.
|
||||||
|
// Thread-safe.
|
||||||
|
// FIXME: the uniquity of the id is questionable over time.
|
||||||
|
func (h *handle) cgoPut(cg cgoif) (cgoid int) { |
||||||
|
h.cgoLock.Lock() |
||||||
|
defer h.cgoLock.Unlock() |
||||||
|
|
||||||
|
h.cgoidNext++ |
||||||
|
if h.cgoidNext == 0 { |
||||||
|
h.cgoidNext++ |
||||||
|
} |
||||||
|
cgoid = (int)(h.cgoidNext) |
||||||
|
h.cgomap[cgoid] = cg |
||||||
|
return cgoid |
||||||
|
} |
||||||
|
|
||||||
|
// cgoGet looks up cgoid in the cgo map, deletes the reference from the map
|
||||||
|
// and returns the object, if found. Else returns nil, false.
|
||||||
|
// Thread-safe.
|
||||||
|
func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) { |
||||||
|
if cgoid == 0 { |
||||||
|
return nil, false |
||||||
|
} |
||||||
|
|
||||||
|
h.cgoLock.Lock() |
||||||
|
defer h.cgoLock.Unlock() |
||||||
|
cg, found = h.cgomap[cgoid] |
||||||
|
if found { |
||||||
|
delete(h.cgomap, cgoid) |
||||||
|
} |
||||||
|
|
||||||
|
return cg, found |
||||||
|
} |
||||||
|
|
||||||
|
// setOauthBearerToken - see rd_kafka_oauthbearer_set_token()
|
||||||
|
func (h *handle) setOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { |
||||||
|
cTokenValue := C.CString(oauthBearerToken.TokenValue) |
||||||
|
defer C.free(unsafe.Pointer(cTokenValue)) |
||||||
|
|
||||||
|
cPrincipal := C.CString(oauthBearerToken.Principal) |
||||||
|
defer C.free(unsafe.Pointer(cPrincipal)) |
||||||
|
|
||||||
|
cErrstrSize := C.size_t(512) |
||||||
|
cErrstr := (*C.char)(C.malloc(cErrstrSize)) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
cExtensions := make([]*C.char, 2*len(oauthBearerToken.Extensions)) |
||||||
|
extensionSize := 0 |
||||||
|
for key, value := range oauthBearerToken.Extensions { |
||||||
|
cExtensions[extensionSize] = C.CString(key) |
||||||
|
defer C.free(unsafe.Pointer(cExtensions[extensionSize])) |
||||||
|
extensionSize++ |
||||||
|
cExtensions[extensionSize] = C.CString(value) |
||||||
|
defer C.free(unsafe.Pointer(cExtensions[extensionSize])) |
||||||
|
extensionSize++ |
||||||
|
} |
||||||
|
|
||||||
|
var cExtensionsToUse **C.char |
||||||
|
if extensionSize > 0 { |
||||||
|
cExtensionsToUse = (**C.char)(unsafe.Pointer(&cExtensions[0])) |
||||||
|
} |
||||||
|
|
||||||
|
cErr := C.rd_kafka_oauthbearer_set_token(h.rk, cTokenValue, |
||||||
|
C.int64_t(oauthBearerToken.Expiration.UnixNano()/(1000*1000)), cPrincipal, |
||||||
|
cExtensionsToUse, C.size_t(extensionSize), cErrstr, cErrstrSize) |
||||||
|
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return newErrorFromCString(cErr, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
// setOauthBearerTokenFailure - see rd_kafka_oauthbearer_set_token_failure()
|
||||||
|
func (h *handle) setOAuthBearerTokenFailure(errstr string) error { |
||||||
|
cerrstr := C.CString(errstr) |
||||||
|
defer C.free(unsafe.Pointer(cerrstr)) |
||||||
|
cErr := C.rd_kafka_oauthbearer_set_token_failure(h.rk, cerrstr) |
||||||
|
if cErr == C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil |
||||||
|
} |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
// messageFields controls which fields are made available for producer delivery reports & consumed messages.
|
||||||
|
// true values indicate that the field should be included
|
||||||
|
type messageFields struct { |
||||||
|
Key bool |
||||||
|
Value bool |
||||||
|
Headers bool |
||||||
|
} |
||||||
|
|
||||||
|
// disableAll disable all fields
|
||||||
|
func (mf *messageFields) disableAll() { |
||||||
|
mf.Key = false |
||||||
|
mf.Value = false |
||||||
|
mf.Headers = false |
||||||
|
} |
||||||
|
|
||||||
|
// newMessageFields returns a new messageFields with all fields enabled
|
||||||
|
func newMessageFields() *messageFields { |
||||||
|
return &messageFields{ |
||||||
|
Key: true, |
||||||
|
Value: true, |
||||||
|
Headers: true, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// newMessageFieldsFrom constructs a new messageFields from the given configuration value
|
||||||
|
func newMessageFieldsFrom(v ConfigValue) (*messageFields, error) { |
||||||
|
msgFields := newMessageFields() |
||||||
|
switch v { |
||||||
|
case "all": |
||||||
|
// nothing to do
|
||||||
|
case "", "none": |
||||||
|
msgFields.disableAll() |
||||||
|
default: |
||||||
|
msgFields.disableAll() |
||||||
|
for _, value := range strings.Split(v.(string), ",") { |
||||||
|
switch value { |
||||||
|
case "key": |
||||||
|
msgFields.Key = true |
||||||
|
case "value": |
||||||
|
msgFields.Value = true |
||||||
|
case "headers": |
||||||
|
msgFields.Headers = true |
||||||
|
default: |
||||||
|
return nil, fmt.Errorf("unknown message field: %s", value) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return msgFields, nil |
||||||
|
} |
@ -0,0 +1,67 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2018 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"strconv" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <string.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// Header represents a single Kafka message header.
|
||||||
|
//
|
||||||
|
// Message headers are made up of a list of Header elements, retaining their original insert
|
||||||
|
// order and allowing for duplicate Keys.
|
||||||
|
//
|
||||||
|
// Key is a human readable string identifying the header.
|
||||||
|
// Value is the key's binary value, Kafka does not put any restrictions on the format of
|
||||||
|
// of the Value but it should be made relatively compact.
|
||||||
|
// The value may be a byte array, empty, or nil.
|
||||||
|
//
|
||||||
|
// NOTE: Message headers are not available on producer delivery report messages.
|
||||||
|
type Header struct { |
||||||
|
Key string // Header name (utf-8 string)
|
||||||
|
Value []byte // Header value (nil, empty, or binary)
|
||||||
|
} |
||||||
|
|
||||||
|
// String returns the Header Key and data in a human representable possibly truncated form
|
||||||
|
// suitable for displaying to the user.
|
||||||
|
func (h Header) String() string { |
||||||
|
if h.Value == nil { |
||||||
|
return fmt.Sprintf("%s=nil", h.Key) |
||||||
|
} |
||||||
|
|
||||||
|
valueLen := len(h.Value) |
||||||
|
if valueLen == 0 { |
||||||
|
return fmt.Sprintf("%s=<empty>", h.Key) |
||||||
|
} |
||||||
|
|
||||||
|
truncSize := valueLen |
||||||
|
trunc := "" |
||||||
|
if valueLen > 50+15 { |
||||||
|
truncSize = 50 |
||||||
|
trunc = fmt.Sprintf("(%d more bytes)", valueLen-truncSize) |
||||||
|
} |
||||||
|
|
||||||
|
return fmt.Sprintf("%s=%s%s", h.Key, strconv.Quote(string(h.Value[:truncSize])), trunc) |
||||||
|
} |
@ -0,0 +1,375 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// Package kafka provides high-level Apache Kafka producer and consumers
|
||||||
|
// using bindings on-top of the librdkafka C library.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// High-level Consumer
|
||||||
|
//
|
||||||
|
// * Decide if you want to read messages and events by calling `.Poll()` or
|
||||||
|
// the deprecated option of using the `.Events()` channel. (If you want to use
|
||||||
|
// `.Events()` channel then set `"go.events.channel.enable": true`).
|
||||||
|
//
|
||||||
|
// * Create a Consumer with `kafka.NewConsumer()` providing at
|
||||||
|
// least the `bootstrap.servers` and `group.id` configuration properties.
|
||||||
|
//
|
||||||
|
// * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics)
|
||||||
|
// to join the group with the specified subscription set.
|
||||||
|
// Subscriptions are atomic, calling `.Subscribe*()` again will leave
|
||||||
|
// the group and rejoin with the new set of topics.
|
||||||
|
//
|
||||||
|
// * Start reading events and messages from either the `.Events` channel
|
||||||
|
// or by calling `.Poll()`.
|
||||||
|
//
|
||||||
|
// * When the group has rebalanced each client member is assigned a
|
||||||
|
// (sub-)set of topic+partitions.
|
||||||
|
// By default the consumer will start fetching messages for its assigned
|
||||||
|
// partitions at this point, but your application may enable rebalance
|
||||||
|
// events to get an insight into what the assigned partitions where
|
||||||
|
// as well as set the initial offsets. To do this you need to pass
|
||||||
|
// `"go.application.rebalance.enable": true` to the `NewConsumer()` call
|
||||||
|
// mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event
|
||||||
|
// with the assigned partition set. You can optionally modify the initial
|
||||||
|
// offsets (they'll default to stored offsets and if there are no previously stored
|
||||||
|
// offsets it will fall back to `"auto.offset.reset"`
|
||||||
|
// which defaults to the `latest` message) and then call `.Assign(partitions)`
|
||||||
|
// to start consuming. If you don't need to modify the initial offsets you will
|
||||||
|
// not need to call `.Assign()`, the client will do so automatically for you if
|
||||||
|
// you dont, unless you are using the channel-based consumer in which case
|
||||||
|
// you MUST call `.Assign()` when receiving the `AssignedPartitions` and
|
||||||
|
// `RevokedPartitions` events.
|
||||||
|
//
|
||||||
|
// * As messages are fetched they will be made available on either the
|
||||||
|
// `.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`.
|
||||||
|
//
|
||||||
|
// * Handle messages, events and errors to your liking.
|
||||||
|
//
|
||||||
|
// * When you are done consuming call `.Close()` to commit final offsets
|
||||||
|
// and leave the consumer group.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Producer
|
||||||
|
//
|
||||||
|
// * Create a Producer with `kafka.NewProducer()` providing at least
|
||||||
|
// the `bootstrap.servers` configuration properties.
|
||||||
|
//
|
||||||
|
// * Messages may now be produced either by sending a `*kafka.Message`
|
||||||
|
// on the `.ProduceChannel` or by calling `.Produce()`.
|
||||||
|
//
|
||||||
|
// * Producing is an asynchronous operation so the client notifies the application
|
||||||
|
// of per-message produce success or failure through something called delivery reports.
|
||||||
|
// Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message`
|
||||||
|
// and you should check `msg.TopicPartition.Error` for `nil` to find out if the message
|
||||||
|
// was succesfully delivered or not.
|
||||||
|
// It is also possible to direct delivery reports to alternate channels
|
||||||
|
// by providing a non-nil `chan Event` channel to `.Produce()`.
|
||||||
|
// If no delivery reports are wanted they can be completely disabled by
|
||||||
|
// setting configuration property `"go.delivery.reports": false`.
|
||||||
|
//
|
||||||
|
// * When you are done producing messages you will need to make sure all messages
|
||||||
|
// are indeed delivered to the broker (or failed), remember that this is
|
||||||
|
// an asynchronous client so some of your messages may be lingering in internal
|
||||||
|
// channels or tranmission queues.
|
||||||
|
// To do this you can either keep track of the messages you've produced
|
||||||
|
// and wait for their corresponding delivery reports, or call the convenience
|
||||||
|
// function `.Flush()` that will block until all message deliveries are done
|
||||||
|
// or the provided timeout elapses.
|
||||||
|
//
|
||||||
|
// * Finally call `.Close()` to decommission the producer.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Transactional producer API
|
||||||
|
//
|
||||||
|
// The transactional producer operates on top of the idempotent producer,
|
||||||
|
// and provides full exactly-once semantics (EOS) for Apache Kafka when used
|
||||||
|
// with the transaction aware consumer (`isolation.level=read_committed`).
|
||||||
|
//
|
||||||
|
// A producer instance is configured for transactions by setting the
|
||||||
|
// `transactional.id` to an identifier unique for the application. This
|
||||||
|
// id will be used to fence stale transactions from previous instances of
|
||||||
|
// the application, typically following an outage or crash.
|
||||||
|
//
|
||||||
|
// After creating the transactional producer instance using `NewProducer()`
|
||||||
|
// the transactional state must be initialized by calling
|
||||||
|
// `InitTransactions()`. This is a blocking call that will
|
||||||
|
// acquire a runtime producer id from the transaction coordinator broker
|
||||||
|
// as well as abort any stale transactions and fence any still running producer
|
||||||
|
// instances with the same `transactional.id`.
|
||||||
|
//
|
||||||
|
// Once transactions are initialized the application may begin a new
|
||||||
|
// transaction by calling `BeginTransaction()`.
|
||||||
|
// A producer instance may only have one single on-going transaction.
|
||||||
|
//
|
||||||
|
// Any messages produced after the transaction has been started will
|
||||||
|
// belong to the ongoing transaction and will be committed or aborted
|
||||||
|
// atomically.
|
||||||
|
// It is not permitted to produce messages outside a transaction
|
||||||
|
// boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`,
|
||||||
|
// `AbortTransaction()` or if the current transaction has failed.
|
||||||
|
//
|
||||||
|
// If consumed messages are used as input to the transaction, the consumer
|
||||||
|
// instance must be configured with `enable.auto.commit` set to `false`.
|
||||||
|
// To commit the consumed offsets along with the transaction pass the
|
||||||
|
// list of consumed partitions and the last offset processed + 1 to
|
||||||
|
// `SendOffsetsToTransaction()` prior to committing the transaction.
|
||||||
|
// This allows an aborted transaction to be restarted using the previously
|
||||||
|
// committed offsets.
|
||||||
|
//
|
||||||
|
// To commit the produced messages, and any consumed offsets, to the
|
||||||
|
// current transaction, call `CommitTransaction()`.
|
||||||
|
// This call will block until the transaction has been fully committed or
|
||||||
|
// failed (typically due to fencing by a newer producer instance).
|
||||||
|
//
|
||||||
|
// Alternatively, if processing fails, or an abortable transaction error is
|
||||||
|
// raised, the transaction needs to be aborted by calling
|
||||||
|
// `AbortTransaction()` which marks any produced messages and
|
||||||
|
// offset commits as aborted.
|
||||||
|
//
|
||||||
|
// After the current transaction has been committed or aborted a new
|
||||||
|
// transaction may be started by calling `BeginTransaction()` again.
|
||||||
|
//
|
||||||
|
// Retriable errors:
|
||||||
|
// Some error cases allow the attempted operation to be retried, this is
|
||||||
|
// indicated by the error object having the retriable flag set which can
|
||||||
|
// be detected by calling `err.(kafka.Error).IsRetriable()`.
|
||||||
|
// When this flag is set the application may retry the operation immediately
|
||||||
|
// or preferably after a shorter grace period (to avoid busy-looping).
|
||||||
|
// Retriable errors include timeouts, broker transport failures, etc.
|
||||||
|
//
|
||||||
|
// Abortable errors:
|
||||||
|
// An ongoing transaction may fail permanently due to various errors,
|
||||||
|
// such as transaction coordinator becoming unavailable, write failures to the
|
||||||
|
// Apache Kafka log, under-replicated partitions, etc.
|
||||||
|
// At this point the producer application must abort the current transaction
|
||||||
|
// using `AbortTransaction()` and optionally start a new transaction
|
||||||
|
// by calling `BeginTransaction()`.
|
||||||
|
// Whether an error is abortable or not is detected by calling
|
||||||
|
// `err.(kafka.Error).TxnRequiresAbort()` on the returned error object.
|
||||||
|
//
|
||||||
|
// Fatal errors:
|
||||||
|
// While the underlying idempotent producer will typically only raise
|
||||||
|
// fatal errors for unrecoverable cluster errors where the idempotency
|
||||||
|
// guarantees can't be maintained, most of these are treated as abortable by
|
||||||
|
// the transactional producer since transactions may be aborted and retried
|
||||||
|
// in their entirety;
|
||||||
|
// The transactional producer on the other hand introduces a set of additional
|
||||||
|
// fatal errors which the application needs to handle by shutting down the
|
||||||
|
// producer and terminate. There is no way for a producer instance to recover
|
||||||
|
// from fatal errors.
|
||||||
|
// Whether an error is fatal or not is detected by calling
|
||||||
|
// `err.(kafka.Error).IsFatal()` on the returned error object or by checking
|
||||||
|
// the global `GetFatalError()`.
|
||||||
|
//
|
||||||
|
// Handling of other errors:
|
||||||
|
// For errors that have neither retriable, abortable or the fatal flag set
|
||||||
|
// it is not always obvious how to handle them. While some of these errors
|
||||||
|
// may be indicative of bugs in the application code, such as when
|
||||||
|
// an invalid parameter is passed to a method, other errors might originate
|
||||||
|
// from the broker and be passed thru as-is to the application.
|
||||||
|
// The general recommendation is to treat these errors, that have
|
||||||
|
// neither the retriable or abortable flags set, as fatal.
|
||||||
|
//
|
||||||
|
// Error handling example:
|
||||||
|
// retry:
|
||||||
|
//
|
||||||
|
// err := producer.CommitTransaction(...)
|
||||||
|
// if err == nil {
|
||||||
|
// return nil
|
||||||
|
// } else if err.(kafka.Error).TxnRequiresAbort() {
|
||||||
|
// do_abort_transaction_and_reset_inputs()
|
||||||
|
// } else if err.(kafka.Error).IsRetriable() {
|
||||||
|
// goto retry
|
||||||
|
// } else { // treat all other errors as fatal errors
|
||||||
|
// panic(err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Events
|
||||||
|
//
|
||||||
|
// Apart from emitting messages and delivery reports the client also communicates
|
||||||
|
// with the application through a number of different event types.
|
||||||
|
// An application may choose to handle or ignore these events.
|
||||||
|
//
|
||||||
|
// Consumer events
|
||||||
|
//
|
||||||
|
// * `*kafka.Message` - a fetched message.
|
||||||
|
//
|
||||||
|
// * `AssignedPartitions` - The assigned partition set for this client following a rebalance.
|
||||||
|
// Requires `go.application.rebalance.enable`
|
||||||
|
//
|
||||||
|
// * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance.
|
||||||
|
// `AssignedPartitions` and `RevokedPartitions` are symmetrical.
|
||||||
|
// Requires `go.application.rebalance.enable`
|
||||||
|
//
|
||||||
|
// * `PartitionEOF` - Consumer has reached the end of a partition.
|
||||||
|
// NOTE: The consumer will keep trying to fetch new messages for the partition.
|
||||||
|
//
|
||||||
|
// * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled).
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Producer events
|
||||||
|
//
|
||||||
|
// * `*kafka.Message` - delivery report for produced message.
|
||||||
|
// Check `.TopicPartition.Error` for delivery result.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Generic events for both Consumer and Producer
|
||||||
|
//
|
||||||
|
// * `KafkaError` - client (error codes are prefixed with _) or broker error.
|
||||||
|
// These errors are normally just informational since the
|
||||||
|
// client will try its best to automatically recover (eventually).
|
||||||
|
//
|
||||||
|
// * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required.
|
||||||
|
// This event only occurs with sasl.mechanism=OAUTHBEARER.
|
||||||
|
// Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient
|
||||||
|
// instance when a successful token retrieval is completed, otherwise be sure to
|
||||||
|
// invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or
|
||||||
|
// if setting the token failed, which could happen if an extension doesn't meet
|
||||||
|
// the required regular expression); invoking SetOAuthBearerTokenFailure() will
|
||||||
|
// schedule a new event for 10 seconds later so another retrieval can be attempted.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Hint: If your application registers a signal notification
|
||||||
|
// (signal.Notify) makes sure the signals channel is buffered to avoid
|
||||||
|
// possible complications with blocking Poll() calls.
|
||||||
|
//
|
||||||
|
// Note: The Confluent Kafka Go client is safe for concurrent use.
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
// Make sure librdkafka_vendor/ sub-directory is included in vendor pulls.
|
||||||
|
_ "github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include <string.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { |
||||||
|
return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// PartitionAny represents any partition (for partitioning),
|
||||||
|
// or unspecified value (for all other cases)
|
||||||
|
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA) |
||||||
|
|
||||||
|
// TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset.
|
||||||
|
type TopicPartition struct { |
||||||
|
Topic *string |
||||||
|
Partition int32 |
||||||
|
Offset Offset |
||||||
|
Metadata *string |
||||||
|
Error error |
||||||
|
} |
||||||
|
|
||||||
|
func (p TopicPartition) String() string { |
||||||
|
topic := "<null>" |
||||||
|
if p.Topic != nil { |
||||||
|
topic = *p.Topic |
||||||
|
} |
||||||
|
if p.Error != nil { |
||||||
|
return fmt.Sprintf("%s[%d]@%s(%s)", |
||||||
|
topic, p.Partition, p.Offset, p.Error) |
||||||
|
} |
||||||
|
return fmt.Sprintf("%s[%d]@%s", |
||||||
|
topic, p.Partition, p.Offset) |
||||||
|
} |
||||||
|
|
||||||
|
// TopicPartitions is a slice of TopicPartitions that also implements
|
||||||
|
// the sort interface
|
||||||
|
type TopicPartitions []TopicPartition |
||||||
|
|
||||||
|
func (tps TopicPartitions) Len() int { |
||||||
|
return len(tps) |
||||||
|
} |
||||||
|
|
||||||
|
func (tps TopicPartitions) Less(i, j int) bool { |
||||||
|
if *tps[i].Topic < *tps[j].Topic { |
||||||
|
return true |
||||||
|
} else if *tps[i].Topic > *tps[j].Topic { |
||||||
|
return false |
||||||
|
} |
||||||
|
return tps[i].Partition < tps[j].Partition |
||||||
|
} |
||||||
|
|
||||||
|
func (tps TopicPartitions) Swap(i, j int) { |
||||||
|
tps[i], tps[j] = tps[j], tps[i] |
||||||
|
} |
||||||
|
|
||||||
|
// new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t
|
||||||
|
// from a TopicPartition array.
|
||||||
|
func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) { |
||||||
|
cparts = C.rd_kafka_topic_partition_list_new(C.int(len(partitions))) |
||||||
|
for _, part := range partitions { |
||||||
|
ctopic := C.CString(*part.Topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
rktpar := C.rd_kafka_topic_partition_list_add(cparts, ctopic, C.int32_t(part.Partition)) |
||||||
|
rktpar.offset = C.int64_t(part.Offset) |
||||||
|
|
||||||
|
if part.Metadata != nil { |
||||||
|
cmetadata := C.CString(*part.Metadata) |
||||||
|
rktpar.metadata = unsafe.Pointer(cmetadata) |
||||||
|
rktpar.metadata_size = C.size_t(len(*part.Metadata)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return cparts |
||||||
|
} |
||||||
|
|
||||||
|
func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kafka_topic_partition_t) { |
||||||
|
|
||||||
|
topic := C.GoString(crktpar.topic) |
||||||
|
partition.Topic = &topic |
||||||
|
partition.Partition = int32(crktpar.partition) |
||||||
|
partition.Offset = Offset(crktpar.offset) |
||||||
|
if crktpar.metadata_size > 0 { |
||||||
|
size := C.int(crktpar.metadata_size) |
||||||
|
cstr := (*C.char)(unsafe.Pointer(crktpar.metadata)) |
||||||
|
metadata := C.GoStringN(cstr, size) |
||||||
|
partition.Metadata = &metadata |
||||||
|
} |
||||||
|
if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
partition.Error = newError(crktpar.err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) { |
||||||
|
|
||||||
|
partcnt := int(cparts.cnt) |
||||||
|
|
||||||
|
partitions = make([]TopicPartition, partcnt) |
||||||
|
for i := 0; i < partcnt; i++ { |
||||||
|
crktpar := C._c_rdkafka_topic_partition_list_entry(cparts, C.int(i)) |
||||||
|
setupTopicPartitionFromCrktpar(&partitions[i], crktpar) |
||||||
|
} |
||||||
|
|
||||||
|
return partitions |
||||||
|
} |
||||||
|
|
||||||
|
// LibraryVersion returns the underlying librdkafka library version as a
|
||||||
|
// (version_int, version_str) tuple.
|
||||||
|
func LibraryVersion() (int, string) { |
||||||
|
ver := (int)(C.rd_kafka_version()) |
||||||
|
verstr := C.GoString(C.rd_kafka_version_str()) |
||||||
|
return ver, verstr |
||||||
|
} |
@ -0,0 +1,3 @@ |
|||||||
|
*.tar.gz |
||||||
|
*.tgz |
||||||
|
tmp* |
@ -0,0 +1,366 @@ |
|||||||
|
LICENSE |
||||||
|
-------------------------------------------------------------- |
||||||
|
librdkafka - Apache Kafka C driver library |
||||||
|
|
||||||
|
Copyright (c) 2012-2020, Magnus Edenhill |
||||||
|
All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are met: |
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, |
||||||
|
this list of conditions and the following disclaimer. |
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice, |
||||||
|
this list of conditions and the following disclaimer in the documentation |
||||||
|
and/or other materials provided with the distribution. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||||||
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||||||
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||||||
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||||||
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||||||
|
POSSIBILITY OF SUCH DAMAGE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.crc32c |
||||||
|
-------------------------------------------------------------- |
||||||
|
# For src/crc32c.c copied (with modifications) from |
||||||
|
# http://stackoverflow.com/a/17646775/1821055 |
||||||
|
|
||||||
|
/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction |
||||||
|
* Copyright (C) 2013 Mark Adler |
||||||
|
* Version 1.1 1 Aug 2013 Mark Adler |
||||||
|
*/ |
||||||
|
|
||||||
|
/* |
||||||
|
This software is provided 'as-is', without any express or implied |
||||||
|
warranty. In no event will the author be held liable for any damages |
||||||
|
arising from the use of this software. |
||||||
|
|
||||||
|
Permission is granted to anyone to use this software for any purpose, |
||||||
|
including commercial applications, and to alter it and redistribute it |
||||||
|
freely, subject to the following restrictions: |
||||||
|
|
||||||
|
1. The origin of this software must not be misrepresented; you must not |
||||||
|
claim that you wrote the original software. If you use this software |
||||||
|
in a product, an acknowledgment in the product documentation would be |
||||||
|
appreciated but is not required. |
||||||
|
2. Altered source versions must be plainly marked as such, and must not be |
||||||
|
misrepresented as being the original software. |
||||||
|
3. This notice may not be removed or altered from any source distribution. |
||||||
|
|
||||||
|
Mark Adler |
||||||
|
madler@alumni.caltech.edu |
||||||
|
*/ |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.fnv1a |
||||||
|
-------------------------------------------------------------- |
||||||
|
parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c |
||||||
|
|
||||||
|
|
||||||
|
Please do not copyright this code. This code is in the public domain. |
||||||
|
|
||||||
|
LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, |
||||||
|
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO |
||||||
|
EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR |
||||||
|
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF |
||||||
|
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR |
||||||
|
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR |
||||||
|
PERFORMANCE OF THIS SOFTWARE. |
||||||
|
|
||||||
|
By: |
||||||
|
chongo <Landon Curt Noll> /\oo/\ |
||||||
|
http://www.isthe.com/chongo/ |
||||||
|
|
||||||
|
Share and Enjoy! :-) |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.hdrhistogram |
||||||
|
-------------------------------------------------------------- |
||||||
|
This license covers src/rdhdrhistogram.c which is a C port of |
||||||
|
Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram |
||||||
|
at revision 3a0bb77429bd3a61596f5e8a3172445844342120 |
||||||
|
|
||||||
|
----------------------------------------------------------------------------- |
||||||
|
|
||||||
|
The MIT License (MIT) |
||||||
|
|
||||||
|
Copyright (c) 2014 Coda Hale |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in |
||||||
|
all copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||||
|
THE SOFTWARE |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.lz4 |
||||||
|
-------------------------------------------------------------- |
||||||
|
src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3 |
||||||
|
|
||||||
|
LZ4 Library |
||||||
|
Copyright (c) 2011-2016, Yann Collet |
||||||
|
All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification, |
||||||
|
are permitted provided that the following conditions are met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this |
||||||
|
list of conditions and the following disclaimer. |
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice, this |
||||||
|
list of conditions and the following disclaimer in the documentation and/or |
||||||
|
other materials provided with the distribution. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR |
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
||||||
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.murmur2 |
||||||
|
-------------------------------------------------------------- |
||||||
|
parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git |
||||||
|
|
||||||
|
|
||||||
|
MurMurHash2 Library |
||||||
|
//----------------------------------------------------------------------------- |
||||||
|
// MurmurHash2 was written by Austin Appleby, and is placed in the public |
||||||
|
// domain. The author hereby disclaims copyright to this source code. |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all |
||||||
|
copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||||
|
SOFTWARE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.pycrc |
||||||
|
-------------------------------------------------------------- |
||||||
|
The following license applies to the files rdcrc32.c and rdcrc32.h which |
||||||
|
have been generated by the pycrc tool. |
||||||
|
============================================================================ |
||||||
|
|
||||||
|
Copyright (c) 2006-2012, Thomas Pircher <tehpeh@gmx.net> |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in |
||||||
|
all copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||||
|
THE SOFTWARE. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.queue |
||||||
|
-------------------------------------------------------------- |
||||||
|
For sys/queue.h: |
||||||
|
|
||||||
|
* Copyright (c) 1991, 1993 |
||||||
|
* The Regents of the University of California. All rights reserved. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or without |
||||||
|
* modification, are permitted provided that the following conditions |
||||||
|
* are met: |
||||||
|
* 1. Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer in the |
||||||
|
* documentation and/or other materials provided with the distribution. |
||||||
|
* 4. Neither the name of the University nor the names of its contributors |
||||||
|
* may be used to endorse or promote products derived from this software |
||||||
|
* without specific prior written permission. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
||||||
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
||||||
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
||||||
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
||||||
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
||||||
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
||||||
|
* SUCH DAMAGE. |
||||||
|
* |
||||||
|
* @(#)queue.h 8.5 (Berkeley) 8/20/94 |
||||||
|
* $FreeBSD$ |
||||||
|
|
||||||
|
LICENSE.regexp |
||||||
|
-------------------------------------------------------------- |
||||||
|
regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684 |
||||||
|
|
||||||
|
" |
||||||
|
These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution. |
||||||
|
" |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.snappy |
||||||
|
-------------------------------------------------------------- |
||||||
|
###################################################################### |
||||||
|
# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h # |
||||||
|
# originally retrieved from http://github.com/andikleen/snappy-c # |
||||||
|
# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 # |
||||||
|
###################################################################### |
||||||
|
|
||||||
|
The snappy-c code is under the same license as the original snappy source |
||||||
|
|
||||||
|
Copyright 2011 Intel Corporation All Rights Reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are |
||||||
|
met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright |
||||||
|
notice, this list of conditions and the following disclaimer. |
||||||
|
* Redistributions in binary form must reproduce the above |
||||||
|
copyright notice, this list of conditions and the following disclaimer |
||||||
|
in the documentation and/or other materials provided with the |
||||||
|
distribution. |
||||||
|
* Neither the name of Intel Corporation nor the names of its |
||||||
|
contributors may be used to endorse or promote products derived from |
||||||
|
this software without specific prior written permission. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
LICENSE.tinycthread |
||||||
|
-------------------------------------------------------------- |
||||||
|
From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9 |
||||||
|
|
||||||
|
License |
||||||
|
------- |
||||||
|
|
||||||
|
Copyright (c) 2012 Marcus Geelnard |
||||||
|
2013-2014 Evan Nemerson |
||||||
|
|
||||||
|
This software is provided 'as-is', without any express or implied |
||||||
|
warranty. In no event will the authors be held liable for any damages |
||||||
|
arising from the use of this software. |
||||||
|
|
||||||
|
Permission is granted to anyone to use this software for any purpose, |
||||||
|
including commercial applications, and to alter it and redistribute it |
||||||
|
freely, subject to the following restrictions: |
||||||
|
|
||||||
|
1. The origin of this software must not be misrepresented; you must not |
||||||
|
claim that you wrote the original software. If you use this software |
||||||
|
in a product, an acknowledgment in the product documentation would be |
||||||
|
appreciated but is not required. |
||||||
|
|
||||||
|
2. Altered source versions must be plainly marked as such, and must not be |
||||||
|
misrepresented as being the original software. |
||||||
|
|
||||||
|
3. This notice may not be removed or altered from any source |
||||||
|
distribution. |
||||||
|
|
||||||
|
|
||||||
|
LICENSE.wingetopt |
||||||
|
-------------------------------------------------------------- |
||||||
|
For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt |
||||||
|
|
||||||
|
/* |
||||||
|
* Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com> |
||||||
|
* |
||||||
|
* Permission to use, copy, modify, and distribute this software for any |
||||||
|
* purpose with or without fee is hereby granted, provided that the above |
||||||
|
* copyright notice and this permission notice appear in all copies. |
||||||
|
* |
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
||||||
|
* |
||||||
|
* Sponsored in part by the Defense Advanced Research Projects |
||||||
|
* Agency (DARPA) and Air Force Research Laboratory, Air Force |
||||||
|
* Materiel Command, USAF, under agreement number F39502-99-1-0512. |
||||||
|
*/ |
||||||
|
/*- |
||||||
|
* Copyright (c) 2000 The NetBSD Foundation, Inc. |
||||||
|
* All rights reserved. |
||||||
|
* |
||||||
|
* This code is derived from software contributed to The NetBSD Foundation |
||||||
|
* by Dieter Baron and Thomas Klausner. |
||||||
|
* |
||||||
|
* Redistribution and use in source and binary forms, with or without |
||||||
|
* modification, are permitted provided that the following conditions |
||||||
|
* are met: |
||||||
|
* 1. Redistributions of source code must retain the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer. |
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright |
||||||
|
* notice, this list of conditions and the following disclaimer in the |
||||||
|
* documentation and/or other materials provided with the distribution. |
||||||
|
* |
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
||||||
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
||||||
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
||||||
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
||||||
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
||||||
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
||||||
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
||||||
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
||||||
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
||||||
|
* POSSIBILITY OF SUCH DAMAGE. |
||||||
|
*/ |
||||||
|
|
||||||
|
|
@ -0,0 +1,24 @@ |
|||||||
|
# Bundling prebuilt librdkafka |
||||||
|
|
||||||
|
confluent-kafka-go bundles prebuilt statically linked |
||||||
|
versions of librdkafka for the following platforms: |
||||||
|
|
||||||
|
* MacOSX x64 (aka Darwin) |
||||||
|
* Linux glibc x64 (Ubuntu, CentOS, etc) |
||||||
|
* Linux musl x64 (Alpine) |
||||||
|
|
||||||
|
## Import static librdkafka bundle |
||||||
|
|
||||||
|
First create the static librdkafka bundle following the instructions in |
||||||
|
librdkafka's packaging/nuget/README.md. |
||||||
|
|
||||||
|
Then import the new version by using the import.sh script here, this script |
||||||
|
will create a branch, import the bundle, create a commit and push the |
||||||
|
branch to Github for PR review. This PR must be manually opened, reviewed |
||||||
|
and then finally merged (make sure to merge it, DO NOT squash or rebase). |
||||||
|
|
||||||
|
$ ./import.sh ~/path/to/librdkafka-static-bundle-v1.4.0.tgz |
||||||
|
|
||||||
|
This will copy the static library and the rdkafka.h header file |
||||||
|
to this directory, as well as generate a new ../build_..go file |
||||||
|
for this platform + variant. |
@ -0,0 +1,113 @@ |
|||||||
|
#!/bin/bash |
||||||
|
# |
||||||
|
# Updates the bundled prebuilt librdkafka libraries to specified version. |
||||||
|
# |
||||||
|
|
||||||
|
set -e |
||||||
|
|
||||||
|
|
||||||
|
usage() { |
||||||
|
echo "Usage: $0 librdkafka-static-bundle-<VERSION>.tgz" |
||||||
|
echo "" |
||||||
|
echo "This tool must be run from the TOPDIR/kafka/librdkafka_vendor directory" |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
parse_dynlibs() { |
||||||
|
# Parse dynamic libraries from pkg-config file, |
||||||
|
# both the ones specified with Libs: but also through Requires: |
||||||
|
local pc=$1 |
||||||
|
local libs= |
||||||
|
local req= |
||||||
|
local n= |
||||||
|
for req in $(grep ^Requires: $pc | sed -e 's/^Requires://'); do |
||||||
|
n=$(pkg-config --libs $req) |
||||||
|
if [[ $n == -l* ]]; then |
||||||
|
libs="${libs} $n" |
||||||
|
fi |
||||||
|
done |
||||||
|
for n in $(grep ^Libs: $pc); do |
||||||
|
if [[ $n == -l* ]]; then |
||||||
|
libs="${libs} $n" |
||||||
|
fi |
||||||
|
done |
||||||
|
|
||||||
|
echo "$libs" |
||||||
|
} |
||||||
|
|
||||||
|
setup_build() { |
||||||
|
# Copies static library from the temp directory into final location, |
||||||
|
# extracts dynamic lib list from the pkg-config file, |
||||||
|
# and generates the build_..go file |
||||||
|
local btype=$1 |
||||||
|
local apath=$2 |
||||||
|
local pc=$3 |
||||||
|
local srcinfo=$4 |
||||||
|
local build_tag= |
||||||
|
local gpath="../build_${btype}.go" |
||||||
|
local dpath="librdkafka_${btype}.a" |
||||||
|
|
||||||
|
if [[ $btype == glibc_linux ]]; then |
||||||
|
build_tag="// +build !musl" |
||||||
|
elif [[ $btype == musl_linux ]]; then |
||||||
|
build_tag="// +build musl" |
||||||
|
fi |
||||||
|
|
||||||
|
local dynlibs=$(parse_dynlibs $pc) |
||||||
|
|
||||||
|
echo "Copying $apath to $dpath" |
||||||
|
cp "$apath" "$dpath" |
||||||
|
|
||||||
|
echo "Generating $gpath (extra build tag: $build_tag)" |
||||||
|
|
||||||
|
cat >$gpath <<EOF |
||||||
|
// +build !dynamic |
||||||
|
$build_tag |
||||||
|
|
||||||
|
// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB |
||||||
|
// #cgo LDFLAGS: \${SRCDIR}/librdkafka_vendor/${dpath} $dynlibs |
||||||
|
import "C" |
||||||
|
|
||||||
|
// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client |
||||||
|
const LibrdkafkaLinkInfo = "static ${btype} from ${srcinfo}" |
||||||
|
EOF |
||||||
|
|
||||||
|
git add "$dpath" "$gpath" |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
bundle="$1" |
||||||
|
[[ -f $bundle ]] || usage |
||||||
|
|
||||||
|
bundlename=$(basename "$bundle") |
||||||
|
|
||||||
|
bdir=$(mktemp -d tmpXXXXXX) |
||||||
|
|
||||||
|
echo "Extracting bundle $bundle:" |
||||||
|
tar -xzvf "$bundle" -C "$bdir/" |
||||||
|
|
||||||
|
echo "Copying librdkafka files" |
||||||
|
for f in rdkafka.h LICENSES.txt ; do |
||||||
|
cp $bdir/$f . || true |
||||||
|
git add "$f" |
||||||
|
done |
||||||
|
|
||||||
|
|
||||||
|
for btype in glibc_linux musl_linux darwin windows ; do |
||||||
|
lib=$bdir/librdkafka_${btype}.a |
||||||
|
pc=${lib/%.a/.pc} |
||||||
|
[[ -f $lib ]] || (echo "Expected file $lib missing" ; exit 1) |
||||||
|
[[ -f $pc ]] || (echo "Expected file $pc missing" ; exit 1) |
||||||
|
|
||||||
|
setup_build $btype $lib $pc $bundlename |
||||||
|
done |
||||||
|
|
||||||
|
rm -rf "$bdir" |
||||||
|
|
||||||
|
echo "All done" |
@ -0,0 +1,110 @@ |
|||||||
|
#!/bin/bash |
||||||
|
# |
||||||
|
# |
||||||
|
# Import a new version of librdkafka based on a librdkafka static bundle. |
||||||
|
# This will create a separate branch, import librdkafka, make a commit, |
||||||
|
# and then ask you to push the branch to github, have it reviewed, |
||||||
|
# and then later merged (NOT squashed or rebased). |
||||||
|
# Having a merge per import allows future shallow clones to skip and ignore |
||||||
|
# older imports, hopefully reducing the amount of git history data 'go get' |
||||||
|
# needs to download. |
||||||
|
|
||||||
|
set -e |
||||||
|
|
||||||
|
usage() { |
||||||
|
echo "Usage: $0 [--devel] path/to/librdkafka-static-bundle-<VERSION>.tgz" |
||||||
|
echo "" |
||||||
|
echo "This tool must be run from the TOPDIR/kafka/librdkafka directory" |
||||||
|
echo "" |
||||||
|
echo "Options:" |
||||||
|
echo " --devel - Development use: No branch checks and does not push to github" |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
error_cleanup() { |
||||||
|
echo "Error occurred, cleaning up" |
||||||
|
git checkout $currbranch |
||||||
|
git branch -D $import_branch |
||||||
|
exit 1 |
||||||
|
} |
||||||
|
|
||||||
|
devel=0 |
||||||
|
if [[ $1 == --devel ]]; then |
||||||
|
devel=1 |
||||||
|
shift |
||||||
|
fi |
||||||
|
|
||||||
|
bundle="$1" |
||||||
|
[[ -f $bundle ]] || usage |
||||||
|
|
||||||
|
# Parse the librdkafka version from the bundle |
||||||
|
bundlename=$(basename $bundle) |
||||||
|
version=${bundlename#librdkafka-static-bundle-} |
||||||
|
version=${version%.tgz} |
||||||
|
|
||||||
|
if [[ -z $version ]]; then |
||||||
|
echo "Error: Could not parse version from bundle $bundle" |
||||||
|
exit 1 |
||||||
|
fi |
||||||
|
|
||||||
|
# Verify branch state |
||||||
|
curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-) |
||||||
|
uncommitted=$(git status --untracked-files=no --porcelain) |
||||||
|
|
||||||
|
if [[ $devel != 1 ]] && ( [[ $curr_branch != master ]] || [[ ! -z $uncommitted ]] ); then |
||||||
|
echo "Error: This script must be run on an up-to-date, clean, master branch" |
||||||
|
if [[ ! -z $uncommitted ]]; then |
||||||
|
echo "Uncommitted files:" |
||||||
|
echo "$uncommitted" |
||||||
|
fi |
||||||
|
exit 1 |
||||||
|
fi |
||||||
|
|
||||||
|
|
||||||
|
# Create import branch, import bundle, commit. |
||||||
|
import_branch="import_$version" |
||||||
|
|
||||||
|
exists=$(git branch -rlq | grep "/$import_branch\$" || true) |
||||||
|
if [[ ! -z $exists ]]; then |
||||||
|
echo "Error: This version branch already seems to exist: $exists: already imorted?" |
||||||
|
[[ $devel != 1 ]] && exit 1 |
||||||
|
fi |
||||||
|
|
||||||
|
echo "Checking for existing commits that match this version (should be none)" |
||||||
|
git log --oneline | grep "^librdkafka static bundle $version\$" && exit 1 |
||||||
|
|
||||||
|
|
||||||
|
echo "Creating import branch $import_branch" |
||||||
|
git checkout -b $import_branch |
||||||
|
|
||||||
|
echo "Importing bundle $bundle" |
||||||
|
./bundle-import.sh "$bundle" || error_cleanup |
||||||
|
|
||||||
|
echo "Committing $version" |
||||||
|
git commit -a -m "librdkafka static bundle $version" || error_cleanup |
||||||
|
|
||||||
|
echo "Updating error codes and docs" |
||||||
|
pushd ../../ |
||||||
|
make -f mk/Makefile docs || error_cleanup |
||||||
|
git commit -a -m "Documentation and error code update for librdkafka $version" \ |
||||||
|
|| error_cleanup |
||||||
|
popd |
||||||
|
|
||||||
|
if [[ $devel != 1 ]]; then |
||||||
|
echo "Pushing branch" |
||||||
|
git push origin $import_branch || error_cleanup |
||||||
|
fi |
||||||
|
|
||||||
|
git checkout $curr_branch |
||||||
|
|
||||||
|
if [[ $devel != 1 ]]; then |
||||||
|
git branch -D $import_branch |
||||||
|
fi |
||||||
|
|
||||||
|
echo "" |
||||||
|
echo "############## IMPORT OF $version COMPLETE ##############" |
||||||
|
if [[ $devel != 1 ]]; then |
||||||
|
echo "Branch $import_branch has been pushed." |
||||||
|
echo "Create a PR, have it reviewed and then merge it (do NOT squash or rebase)." |
||||||
|
fi |
||||||
|
|
@ -0,0 +1,21 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package librdkafka |
||||||
|
|
||||||
|
// LibrdkafkaGoSubdir is a dummy variable needed to export something so the
|
||||||
|
// file is not empty.
|
||||||
|
var LibrdkafkaGoSubdir = true |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,89 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// LogEvent represent the log from librdkafka internal log queue
|
||||||
|
type LogEvent struct { |
||||||
|
Name string // Name of client instance
|
||||||
|
Tag string // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
|
||||||
|
Message string // Log message
|
||||||
|
Level int // Log syslog level, lower is more critical.
|
||||||
|
Timestamp time.Time // Log timestamp
|
||||||
|
} |
||||||
|
|
||||||
|
// newLogEvent creates a new LogEvent from the given rd_kafka_event_t.
|
||||||
|
//
|
||||||
|
// This function does not take ownership of the cEvent pointer. You need to
|
||||||
|
// free its resources using C.rd_kafka_event_destroy afterwards.
|
||||||
|
//
|
||||||
|
// The cEvent object needs to be of type C.RD_KAFKA_EVENT_LOG. Calling this
|
||||||
|
// function with an object of another type has undefined behaviour.
|
||||||
|
func (h *handle) newLogEvent(cEvent *C.rd_kafka_event_t) LogEvent { |
||||||
|
var tag, message *C.char |
||||||
|
var level C.int |
||||||
|
|
||||||
|
C.rd_kafka_event_log(cEvent, &(tag), &(message), &(level)) |
||||||
|
|
||||||
|
return LogEvent{ |
||||||
|
Name: h.name, |
||||||
|
Tag: C.GoString(tag), |
||||||
|
Message: C.GoString(message), |
||||||
|
Level: int(level), |
||||||
|
Timestamp: time.Now(), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// pollLogEvents polls log events from librdkafka and pushes them to toChannel,
|
||||||
|
// until doneChan is closed.
|
||||||
|
//
|
||||||
|
// Each call to librdkafka times out after timeoutMs. If a call to librdkafka
|
||||||
|
// is ongoing when doneChan is closed, the function will wait until the call
|
||||||
|
// returns or times out, whatever happens first.
|
||||||
|
func (h *handle) pollLogEvents(toChannel chan LogEvent, timeoutMs int, doneChan chan bool) { |
||||||
|
for { |
||||||
|
select { |
||||||
|
case <-doneChan: |
||||||
|
return |
||||||
|
|
||||||
|
default: |
||||||
|
cEvent := C.rd_kafka_queue_poll(h.logq, C.int(timeoutMs)) |
||||||
|
if cEvent == nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
if C.rd_kafka_event_type(cEvent) != C.RD_KAFKA_EVENT_LOG { |
||||||
|
C.rd_kafka_event_destroy(cEvent) |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
logEvent := h.newLogEvent(cEvent) |
||||||
|
C.rd_kafka_event_destroy(cEvent) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-doneChan: |
||||||
|
return |
||||||
|
|
||||||
|
case toChannel <- logEvent: |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (logEvent LogEvent) String() string { |
||||||
|
return fmt.Sprintf( |
||||||
|
"[%v][%s][%s][%d]%s", |
||||||
|
logEvent.Timestamp.Format(time.RFC3339), |
||||||
|
logEvent.Name, |
||||||
|
logEvent.Tag, |
||||||
|
logEvent.Level, |
||||||
|
logEvent.Message) |
||||||
|
} |
@ -0,0 +1,223 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <string.h> |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
|
||||||
|
void setup_rkmessage (rd_kafka_message_t *rkmessage, |
||||||
|
rd_kafka_topic_t *rkt, int32_t partition, |
||||||
|
const void *payload, size_t len, |
||||||
|
void *key, size_t keyLen, void *opaque) { |
||||||
|
rkmessage->rkt = rkt; |
||||||
|
rkmessage->partition = partition; |
||||||
|
rkmessage->payload = (void *)payload; |
||||||
|
rkmessage->len = len; |
||||||
|
rkmessage->key = (void *)key; |
||||||
|
rkmessage->key_len = keyLen; |
||||||
|
rkmessage->_private = opaque; |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// TimestampType is a the Message timestamp type or source
|
||||||
|
//
|
||||||
|
type TimestampType int |
||||||
|
|
||||||
|
const ( |
||||||
|
// TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
|
||||||
|
TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) |
||||||
|
// TimestampCreateTime indicates timestamp set by producer (source time)
|
||||||
|
TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME) |
||||||
|
// TimestampLogAppendTime indicates timestamp set set by broker (store time)
|
||||||
|
TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) |
||||||
|
) |
||||||
|
|
||||||
|
func (t TimestampType) String() string { |
||||||
|
switch t { |
||||||
|
case TimestampCreateTime: |
||||||
|
return "CreateTime" |
||||||
|
case TimestampLogAppendTime: |
||||||
|
return "LogAppendTime" |
||||||
|
case TimestampNotAvailable: |
||||||
|
fallthrough |
||||||
|
default: |
||||||
|
return "NotAvailable" |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Message represents a Kafka message
|
||||||
|
type Message struct { |
||||||
|
TopicPartition TopicPartition |
||||||
|
Value []byte |
||||||
|
Key []byte |
||||||
|
Timestamp time.Time |
||||||
|
TimestampType TimestampType |
||||||
|
Opaque interface{} |
||||||
|
Headers []Header |
||||||
|
} |
||||||
|
|
||||||
|
// String returns a human readable representation of a Message.
|
||||||
|
// Key and payload are not represented.
|
||||||
|
func (m *Message) String() string { |
||||||
|
var topic string |
||||||
|
if m.TopicPartition.Topic != nil { |
||||||
|
topic = *m.TopicPartition.Topic |
||||||
|
} else { |
||||||
|
topic = "" |
||||||
|
} |
||||||
|
return fmt.Sprintf("%s[%d]@%s", topic, m.TopicPartition.Partition, m.TopicPartition.Offset) |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) getRktFromMessage(msg *Message) (crkt *C.rd_kafka_topic_t) { |
||||||
|
if msg.TopicPartition.Topic == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
return h.getRkt(*msg.TopicPartition.Topic) |
||||||
|
} |
||||||
|
|
||||||
|
// setupHeadersFromGlueMsg converts the C tmp headers in gMsg to
|
||||||
|
// Go Headers in msg.
|
||||||
|
// gMsg.tmphdrs will be freed.
|
||||||
|
func setupHeadersFromGlueMsg(msg *Message, gMsg *C.glue_msg_t) { |
||||||
|
msg.Headers = make([]Header, gMsg.tmphdrsCnt) |
||||||
|
for n := range msg.Headers { |
||||||
|
tmphdr := (*[1 << 30]C.tmphdr_t)(unsafe.Pointer(gMsg.tmphdrs))[n] |
||||||
|
msg.Headers[n].Key = C.GoString(tmphdr.key) |
||||||
|
if tmphdr.val != nil { |
||||||
|
msg.Headers[n].Value = C.GoBytes(unsafe.Pointer(tmphdr.val), C.int(tmphdr.size)) |
||||||
|
} else { |
||||||
|
msg.Headers[n].Value = nil |
||||||
|
} |
||||||
|
} |
||||||
|
C.free(unsafe.Pointer(gMsg.tmphdrs)) |
||||||
|
} |
||||||
|
|
||||||
|
func (h *handle) newMessageFromGlueMsg(gMsg *C.glue_msg_t) (msg *Message) { |
||||||
|
msg = &Message{} |
||||||
|
|
||||||
|
if gMsg.ts != -1 { |
||||||
|
ts := int64(gMsg.ts) |
||||||
|
msg.TimestampType = TimestampType(gMsg.tstype) |
||||||
|
msg.Timestamp = time.Unix(ts/1000, (ts%1000)*1000000) |
||||||
|
} |
||||||
|
|
||||||
|
if gMsg.tmphdrsCnt > 0 { |
||||||
|
setupHeadersFromGlueMsg(msg, gMsg) |
||||||
|
} |
||||||
|
|
||||||
|
h.setupMessageFromC(msg, gMsg.msg) |
||||||
|
|
||||||
|
return msg |
||||||
|
} |
||||||
|
|
||||||
|
// setupMessageFromC sets up a message object from a C rd_kafka_message_t
|
||||||
|
func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) { |
||||||
|
if cmsg.rkt != nil { |
||||||
|
topic := h.getTopicNameFromRkt(cmsg.rkt) |
||||||
|
msg.TopicPartition.Topic = &topic |
||||||
|
} |
||||||
|
msg.TopicPartition.Partition = int32(cmsg.partition) |
||||||
|
if cmsg.payload != nil && h.msgFields.Value { |
||||||
|
msg.Value = C.GoBytes(unsafe.Pointer(cmsg.payload), C.int(cmsg.len)) |
||||||
|
} |
||||||
|
if cmsg.key != nil && h.msgFields.Key { |
||||||
|
msg.Key = C.GoBytes(unsafe.Pointer(cmsg.key), C.int(cmsg.key_len)) |
||||||
|
} |
||||||
|
if h.msgFields.Headers { |
||||||
|
var gMsg C.glue_msg_t |
||||||
|
gMsg.msg = cmsg |
||||||
|
gMsg.want_hdrs = C.int8_t(1) |
||||||
|
chdrsToTmphdrs(&gMsg) |
||||||
|
if gMsg.tmphdrsCnt > 0 { |
||||||
|
setupHeadersFromGlueMsg(msg, &gMsg) |
||||||
|
} |
||||||
|
} |
||||||
|
msg.TopicPartition.Offset = Offset(cmsg.offset) |
||||||
|
if cmsg.err != 0 { |
||||||
|
msg.TopicPartition.Error = newError(cmsg.err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// newMessageFromC creates a new message object from a C rd_kafka_message_t
|
||||||
|
// NOTE: For use with Producer: does not set message timestamp fields.
|
||||||
|
func (h *handle) newMessageFromC(cmsg *C.rd_kafka_message_t) (msg *Message) { |
||||||
|
msg = &Message{} |
||||||
|
|
||||||
|
h.setupMessageFromC(msg, cmsg) |
||||||
|
|
||||||
|
return msg |
||||||
|
} |
||||||
|
|
||||||
|
// messageToC sets up cmsg as a clone of msg
|
||||||
|
func (h *handle) messageToC(msg *Message, cmsg *C.rd_kafka_message_t) { |
||||||
|
var valp unsafe.Pointer |
||||||
|
var keyp unsafe.Pointer |
||||||
|
|
||||||
|
// to circumvent Cgo constraints we need to allocate C heap memory
|
||||||
|
// for both Value and Key (one allocation back to back)
|
||||||
|
// and copy the bytes from Value and Key to the C memory.
|
||||||
|
// We later tell librdkafka (in produce()) to free the
|
||||||
|
// C memory pointer when it is done.
|
||||||
|
var payload unsafe.Pointer |
||||||
|
|
||||||
|
valueLen := 0 |
||||||
|
keyLen := 0 |
||||||
|
if msg.Value != nil { |
||||||
|
valueLen = len(msg.Value) |
||||||
|
} |
||||||
|
if msg.Key != nil { |
||||||
|
keyLen = len(msg.Key) |
||||||
|
} |
||||||
|
|
||||||
|
allocLen := valueLen + keyLen |
||||||
|
if allocLen > 0 { |
||||||
|
payload = C.malloc(C.size_t(allocLen)) |
||||||
|
if valueLen > 0 { |
||||||
|
copy((*[1 << 30]byte)(payload)[0:valueLen], msg.Value) |
||||||
|
valp = payload |
||||||
|
} |
||||||
|
if keyLen > 0 { |
||||||
|
copy((*[1 << 30]byte)(payload)[valueLen:allocLen], msg.Key) |
||||||
|
keyp = unsafe.Pointer(&((*[1 << 31]byte)(payload)[valueLen])) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
cmsg.rkt = h.getRktFromMessage(msg) |
||||||
|
cmsg.partition = C.int32_t(msg.TopicPartition.Partition) |
||||||
|
cmsg.payload = valp |
||||||
|
cmsg.len = C.size_t(valueLen) |
||||||
|
cmsg.key = keyp |
||||||
|
cmsg.key_len = C.size_t(keyLen) |
||||||
|
cmsg._private = nil |
||||||
|
} |
||||||
|
|
||||||
|
// used for testing messageToC performance
|
||||||
|
func (h *handle) messageToCDummy(msg *Message) { |
||||||
|
var cmsg C.rd_kafka_message_t |
||||||
|
h.messageToC(msg, &cmsg) |
||||||
|
} |
@ -0,0 +1,180 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
struct rd_kafka_metadata_broker *_getMetadata_broker_element(struct rd_kafka_metadata *m, int i) { |
||||||
|
return &m->brokers[i]; |
||||||
|
} |
||||||
|
|
||||||
|
struct rd_kafka_metadata_topic *_getMetadata_topic_element(struct rd_kafka_metadata *m, int i) { |
||||||
|
return &m->topics[i]; |
||||||
|
} |
||||||
|
|
||||||
|
struct rd_kafka_metadata_partition *_getMetadata_partition_element(struct rd_kafka_metadata *m, int topic_idx, int partition_idx) { |
||||||
|
return &m->topics[topic_idx].partitions[partition_idx]; |
||||||
|
} |
||||||
|
|
||||||
|
int32_t _get_int32_element (int32_t *arr, int i) { |
||||||
|
return arr[i]; |
||||||
|
} |
||||||
|
|
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// BrokerMetadata contains per-broker metadata
|
||||||
|
type BrokerMetadata struct { |
||||||
|
ID int32 |
||||||
|
Host string |
||||||
|
Port int |
||||||
|
} |
||||||
|
|
||||||
|
// PartitionMetadata contains per-partition metadata
|
||||||
|
type PartitionMetadata struct { |
||||||
|
ID int32 |
||||||
|
Error Error |
||||||
|
Leader int32 |
||||||
|
Replicas []int32 |
||||||
|
Isrs []int32 |
||||||
|
} |
||||||
|
|
||||||
|
// TopicMetadata contains per-topic metadata
|
||||||
|
type TopicMetadata struct { |
||||||
|
Topic string |
||||||
|
Partitions []PartitionMetadata |
||||||
|
Error Error |
||||||
|
} |
||||||
|
|
||||||
|
// Metadata contains broker and topic metadata for all (matching) topics
|
||||||
|
type Metadata struct { |
||||||
|
Brokers []BrokerMetadata |
||||||
|
Topics map[string]TopicMetadata |
||||||
|
|
||||||
|
OriginatingBroker BrokerMetadata |
||||||
|
} |
||||||
|
|
||||||
|
// getMetadata queries broker for cluster and topic metadata.
|
||||||
|
// If topic is non-nil only information about that topic is returned, else if
|
||||||
|
// allTopics is false only information about locally used topics is returned,
|
||||||
|
// else information about all topics is returned.
|
||||||
|
func getMetadata(H Handle, topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { |
||||||
|
h := H.gethandle() |
||||||
|
|
||||||
|
var rkt *C.rd_kafka_topic_t |
||||||
|
if topic != nil { |
||||||
|
rkt = h.getRkt(*topic) |
||||||
|
} |
||||||
|
|
||||||
|
var cMd *C.struct_rd_kafka_metadata |
||||||
|
cErr := C.rd_kafka_metadata(h.rk, bool2cint(allTopics), |
||||||
|
rkt, &cMd, C.int(timeoutMs)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
m := Metadata{} |
||||||
|
defer C.rd_kafka_metadata_destroy(cMd) |
||||||
|
|
||||||
|
m.Brokers = make([]BrokerMetadata, cMd.broker_cnt) |
||||||
|
for i := 0; i < int(cMd.broker_cnt); i++ { |
||||||
|
b := C._getMetadata_broker_element(cMd, C.int(i)) |
||||||
|
m.Brokers[i] = BrokerMetadata{int32(b.id), C.GoString(b.host), |
||||||
|
int(b.port)} |
||||||
|
} |
||||||
|
|
||||||
|
m.Topics = make(map[string]TopicMetadata, int(cMd.topic_cnt)) |
||||||
|
for i := 0; i < int(cMd.topic_cnt); i++ { |
||||||
|
t := C._getMetadata_topic_element(cMd, C.int(i)) |
||||||
|
|
||||||
|
thisTopic := C.GoString(t.topic) |
||||||
|
m.Topics[thisTopic] = TopicMetadata{Topic: thisTopic, |
||||||
|
Error: newError(t.err), |
||||||
|
Partitions: make([]PartitionMetadata, int(t.partition_cnt))} |
||||||
|
|
||||||
|
for j := 0; j < int(t.partition_cnt); j++ { |
||||||
|
p := C._getMetadata_partition_element(cMd, C.int(i), C.int(j)) |
||||||
|
m.Topics[thisTopic].Partitions[j] = PartitionMetadata{ |
||||||
|
ID: int32(p.id), |
||||||
|
Error: newError(p.err), |
||||||
|
Leader: int32(p.leader)} |
||||||
|
m.Topics[thisTopic].Partitions[j].Replicas = make([]int32, int(p.replica_cnt)) |
||||||
|
for ir := 0; ir < int(p.replica_cnt); ir++ { |
||||||
|
m.Topics[thisTopic].Partitions[j].Replicas[ir] = int32(C._get_int32_element(p.replicas, C.int(ir))) |
||||||
|
} |
||||||
|
|
||||||
|
m.Topics[thisTopic].Partitions[j].Isrs = make([]int32, int(p.isr_cnt)) |
||||||
|
for ii := 0; ii < int(p.isr_cnt); ii++ { |
||||||
|
m.Topics[thisTopic].Partitions[j].Isrs[ii] = int32(C._get_int32_element(p.isrs, C.int(ii))) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
m.OriginatingBroker = BrokerMetadata{int32(cMd.orig_broker_id), |
||||||
|
C.GoString(cMd.orig_broker_name), 0} |
||||||
|
|
||||||
|
return &m, nil |
||||||
|
} |
||||||
|
|
||||||
|
// queryWatermarkOffsets returns the broker's low and high offsets for the given topic
|
||||||
|
// and partition.
|
||||||
|
func queryWatermarkOffsets(H Handle, topic string, partition int32, timeoutMs int) (low, high int64, err error) { |
||||||
|
h := H.gethandle() |
||||||
|
|
||||||
|
ctopic := C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
|
||||||
|
var cLow, cHigh C.int64_t |
||||||
|
|
||||||
|
e := C.rd_kafka_query_watermark_offsets(h.rk, ctopic, C.int32_t(partition), |
||||||
|
&cLow, &cHigh, C.int(timeoutMs)) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return 0, 0, newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
low = int64(cLow) |
||||||
|
high = int64(cHigh) |
||||||
|
return low, high, nil |
||||||
|
} |
||||||
|
|
||||||
|
// getWatermarkOffsets returns the clients cached low and high offsets for the given topic
|
||||||
|
// and partition.
|
||||||
|
func getWatermarkOffsets(H Handle, topic string, partition int32) (low, high int64, err error) { |
||||||
|
h := H.gethandle() |
||||||
|
|
||||||
|
ctopic := C.CString(topic) |
||||||
|
defer C.free(unsafe.Pointer(ctopic)) |
||||||
|
|
||||||
|
var cLow, cHigh C.int64_t |
||||||
|
|
||||||
|
e := C.rd_kafka_get_watermark_offsets(h.rk, ctopic, C.int32_t(partition), |
||||||
|
&cLow, &cHigh) |
||||||
|
if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return 0, 0, newError(e) |
||||||
|
} |
||||||
|
|
||||||
|
low = int64(cLow) |
||||||
|
high = int64(cHigh) |
||||||
|
|
||||||
|
return low, high, nil |
||||||
|
} |
@ -0,0 +1,35 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import "C" |
||||||
|
|
||||||
|
// bool2int converts a bool to a C.int (1 or 0)
|
||||||
|
func bool2cint(b bool) C.int { |
||||||
|
if b { |
||||||
|
return 1 |
||||||
|
} |
||||||
|
return 0 |
||||||
|
} |
||||||
|
|
||||||
|
// cint2bool converts a C.int to a bool
|
||||||
|
func cint2bool(v C.int) bool { |
||||||
|
if v == 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
@ -0,0 +1,145 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2017 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"strconv" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
|
||||||
|
static int64_t _c_rdkafka_offset_tail(int64_t rel) { |
||||||
|
return RD_KAFKA_OFFSET_TAIL(rel); |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// Offset type (int64) with support for canonical names
|
||||||
|
type Offset int64 |
||||||
|
|
||||||
|
// OffsetBeginning represents the earliest offset (logical)
|
||||||
|
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING) |
||||||
|
|
||||||
|
// OffsetEnd represents the latest offset (logical)
|
||||||
|
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END) |
||||||
|
|
||||||
|
// OffsetInvalid represents an invalid/unspecified offset
|
||||||
|
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID) |
||||||
|
|
||||||
|
// OffsetStored represents a stored offset
|
||||||
|
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED) |
||||||
|
|
||||||
|
func (o Offset) String() string { |
||||||
|
switch o { |
||||||
|
case OffsetBeginning: |
||||||
|
return "beginning" |
||||||
|
case OffsetEnd: |
||||||
|
return "end" |
||||||
|
case OffsetInvalid: |
||||||
|
return "unset" |
||||||
|
case OffsetStored: |
||||||
|
return "stored" |
||||||
|
default: |
||||||
|
return fmt.Sprintf("%d", int64(o)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Set offset value, see NewOffset()
|
||||||
|
func (o *Offset) Set(offset interface{}) error { |
||||||
|
n, err := NewOffset(offset) |
||||||
|
|
||||||
|
if err == nil { |
||||||
|
*o = n |
||||||
|
} |
||||||
|
|
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// NewOffset creates a new Offset using the provided logical string, or an
|
||||||
|
// absolute int64 offset value.
|
||||||
|
// Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored"
|
||||||
|
func NewOffset(offset interface{}) (Offset, error) { |
||||||
|
|
||||||
|
switch v := offset.(type) { |
||||||
|
case string: |
||||||
|
switch v { |
||||||
|
case "beginning": |
||||||
|
fallthrough |
||||||
|
case "earliest": |
||||||
|
return Offset(OffsetBeginning), nil |
||||||
|
|
||||||
|
case "end": |
||||||
|
fallthrough |
||||||
|
case "latest": |
||||||
|
return Offset(OffsetEnd), nil |
||||||
|
|
||||||
|
case "unset": |
||||||
|
fallthrough |
||||||
|
case "invalid": |
||||||
|
return Offset(OffsetInvalid), nil |
||||||
|
|
||||||
|
case "stored": |
||||||
|
return Offset(OffsetStored), nil |
||||||
|
|
||||||
|
default: |
||||||
|
off, err := strconv.Atoi(v) |
||||||
|
return Offset(off), err |
||||||
|
} |
||||||
|
|
||||||
|
case int: |
||||||
|
return Offset((int64)(v)), nil |
||||||
|
case int64: |
||||||
|
return Offset(v), nil |
||||||
|
default: |
||||||
|
return OffsetInvalid, newErrorFromString(ErrInvalidArg, |
||||||
|
fmt.Sprintf("Invalid offset type: %t", v)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetTail returns the logical offset relativeOffset from current end of partition
|
||||||
|
func OffsetTail(relativeOffset Offset) Offset { |
||||||
|
return Offset(C._c_rdkafka_offset_tail(C.int64_t(relativeOffset))) |
||||||
|
} |
||||||
|
|
||||||
|
// offsetsForTimes looks up offsets by timestamp for the given partitions.
|
||||||
|
//
|
||||||
|
// The returned offset for each partition is the earliest offset whose
|
||||||
|
// timestamp is greater than or equal to the given timestamp in the
|
||||||
|
// corresponding partition. If the provided timestamp exceeds that of the
|
||||||
|
// last message in the partition, a value of -1 will be returned.
|
||||||
|
//
|
||||||
|
// The timestamps to query are represented as `.Offset` in the `times`
|
||||||
|
// argument and the looked up offsets are represented as `.Offset` in the returned
|
||||||
|
// `offsets` list.
|
||||||
|
//
|
||||||
|
// The function will block for at most timeoutMs milliseconds.
|
||||||
|
//
|
||||||
|
// Duplicate Topic+Partitions are not supported.
|
||||||
|
// Per-partition errors may be returned in the `.Error` field.
|
||||||
|
func offsetsForTimes(H Handle, times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
cparts := newCPartsFromTopicPartitions(times) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cparts) |
||||||
|
cerr := C.rd_kafka_offsets_for_times(H.gethandle().rk, cparts, C.int(timeoutMs)) |
||||||
|
if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return nil, newError(cerr) |
||||||
|
} |
||||||
|
|
||||||
|
return newTopicPartitionsFromCparts(cparts), nil |
||||||
|
} |
@ -0,0 +1,918 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include <stdlib.h> |
||||||
|
#include "select_rdkafka.h" |
||||||
|
#include "glue_rdkafka.h" |
||||||
|
|
||||||
|
|
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
// Convert tmphdrs to chdrs (created by this function).
|
||||||
|
// If tmphdr.size == -1: value is considered Null
|
||||||
|
// tmphdr.size == 0: value is considered empty (ignored)
|
||||||
|
// tmphdr.size > 0: value is considered non-empty
|
||||||
|
//
|
||||||
|
// WARNING: The header keys and values will be freed by this function.
|
||||||
|
void tmphdrs_to_chdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt, |
||||||
|
rd_kafka_headers_t **chdrs) { |
||||||
|
size_t i; |
||||||
|
|
||||||
|
*chdrs = rd_kafka_headers_new(tmphdrsCnt); |
||||||
|
|
||||||
|
for (i = 0 ; i < tmphdrsCnt ; i++) { |
||||||
|
rd_kafka_header_add(*chdrs, |
||||||
|
tmphdrs[i].key, -1, |
||||||
|
tmphdrs[i].size == -1 ? NULL : |
||||||
|
(tmphdrs[i].size == 0 ? "" : tmphdrs[i].val), |
||||||
|
tmphdrs[i].size == -1 ? 0 : tmphdrs[i].size); |
||||||
|
if (tmphdrs[i].size > 0) |
||||||
|
free((void *)tmphdrs[i].val); |
||||||
|
free((void *)tmphdrs[i].key); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
#else |
||||||
|
void free_tmphdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt) { |
||||||
|
size_t i; |
||||||
|
for (i = 0 ; i < tmphdrsCnt ; i++) { |
||||||
|
if (tmphdrs[i].size > 0) |
||||||
|
free((void *)tmphdrs[i].val); |
||||||
|
free((void *)tmphdrs[i].key); |
||||||
|
} |
||||||
|
} |
||||||
|
#endif |
||||||
|
|
||||||
|
|
||||||
|
rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, |
||||||
|
rd_kafka_topic_t *rkt, int32_t partition, |
||||||
|
int msgflags, |
||||||
|
int valIsNull, void *val, size_t val_len, |
||||||
|
int keyIsNull, void *key, size_t key_len, |
||||||
|
int64_t timestamp, |
||||||
|
tmphdr_t *tmphdrs, size_t tmphdrsCnt, |
||||||
|
uintptr_t cgoid) { |
||||||
|
void *valp = valIsNull ? NULL : val; |
||||||
|
void *keyp = keyIsNull ? NULL : key; |
||||||
|
#ifdef RD_KAFKA_V_TIMESTAMP |
||||||
|
rd_kafka_resp_err_t err; |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
rd_kafka_headers_t *hdrs = NULL; |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
|
||||||
|
|
||||||
|
if (tmphdrsCnt > 0) { |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
tmphdrs_to_chdrs(tmphdrs, tmphdrsCnt, &hdrs); |
||||||
|
#else |
||||||
|
free_tmphdrs(tmphdrs, tmphdrsCnt); |
||||||
|
return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; |
||||||
|
#endif |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
#ifdef RD_KAFKA_V_TIMESTAMP |
||||||
|
err = rd_kafka_producev(rk, |
||||||
|
RD_KAFKA_V_RKT(rkt), |
||||||
|
RD_KAFKA_V_PARTITION(partition), |
||||||
|
RD_KAFKA_V_MSGFLAGS(msgflags), |
||||||
|
RD_KAFKA_V_VALUE(valp, val_len), |
||||||
|
RD_KAFKA_V_KEY(keyp, key_len), |
||||||
|
RD_KAFKA_V_TIMESTAMP(timestamp), |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
RD_KAFKA_V_HEADERS(hdrs), |
||||||
|
#endif |
||||||
|
RD_KAFKA_V_OPAQUE((void *)cgoid), |
||||||
|
RD_KAFKA_V_END); |
||||||
|
#ifdef RD_KAFKA_V_HEADERS |
||||||
|
if (err && hdrs) |
||||||
|
rd_kafka_headers_destroy(hdrs); |
||||||
|
#endif |
||||||
|
return err; |
||||||
|
#else |
||||||
|
if (timestamp) |
||||||
|
return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; |
||||||
|
if (rd_kafka_produce(rkt, partition, msgflags, |
||||||
|
valp, val_len, |
||||||
|
keyp, key_len, |
||||||
|
(void *)cgoid) == -1) |
||||||
|
return rd_kafka_last_error(); |
||||||
|
else |
||||||
|
return RD_KAFKA_RESP_ERR_NO_ERROR; |
||||||
|
#endif |
||||||
|
} |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
// Producer implements a High-level Apache Kafka Producer instance
|
||||||
|
type Producer struct { |
||||||
|
events chan Event |
||||||
|
produceChannel chan *Message |
||||||
|
handle handle |
||||||
|
|
||||||
|
// Terminates the poller() goroutine
|
||||||
|
pollerTermChan chan bool |
||||||
|
} |
||||||
|
|
||||||
|
// String returns a human readable name for a Producer instance
|
||||||
|
func (p *Producer) String() string { |
||||||
|
return p.handle.String() |
||||||
|
} |
||||||
|
|
||||||
|
// get_handle implements the Handle interface
|
||||||
|
func (p *Producer) gethandle() *handle { |
||||||
|
return &p.handle |
||||||
|
} |
||||||
|
|
||||||
|
func (p *Producer) produce(msg *Message, msgFlags int, deliveryChan chan Event) error { |
||||||
|
if msg == nil || msg.TopicPartition.Topic == nil || len(*msg.TopicPartition.Topic) == 0 { |
||||||
|
return newErrorFromString(ErrInvalidArg, "") |
||||||
|
} |
||||||
|
|
||||||
|
crkt := p.handle.getRkt(*msg.TopicPartition.Topic) |
||||||
|
|
||||||
|
// Three problems:
|
||||||
|
// 1) There's a difference between an empty Value or Key (length 0, proper pointer) and
|
||||||
|
// a null Value or Key (length 0, null pointer).
|
||||||
|
// 2) we need to be able to send a null Value or Key, but the unsafe.Pointer(&slice[0])
|
||||||
|
// dereference can't be performed on a nil slice.
|
||||||
|
// 3) cgo's pointer checking requires the unsafe.Pointer(slice..) call to be made
|
||||||
|
// in the call to the C function.
|
||||||
|
//
|
||||||
|
// Solution:
|
||||||
|
// Keep track of whether the Value or Key were nil (1), but let the valp and keyp pointers
|
||||||
|
// point to a 1-byte slice (but the length to send is still 0) so that the dereference (2)
|
||||||
|
// works.
|
||||||
|
// Then perform the unsafe.Pointer() on the valp and keyp pointers (which now either point
|
||||||
|
// to the original msg.Value and msg.Key or to the 1-byte slices) in the call to C (3).
|
||||||
|
//
|
||||||
|
var valp []byte |
||||||
|
var keyp []byte |
||||||
|
oneByte := []byte{0} |
||||||
|
var valIsNull C.int |
||||||
|
var keyIsNull C.int |
||||||
|
var valLen int |
||||||
|
var keyLen int |
||||||
|
|
||||||
|
if msg.Value == nil { |
||||||
|
valIsNull = 1 |
||||||
|
valLen = 0 |
||||||
|
valp = oneByte |
||||||
|
} else { |
||||||
|
valLen = len(msg.Value) |
||||||
|
if valLen > 0 { |
||||||
|
valp = msg.Value |
||||||
|
} else { |
||||||
|
valp = oneByte |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
if msg.Key == nil { |
||||||
|
keyIsNull = 1 |
||||||
|
keyLen = 0 |
||||||
|
keyp = oneByte |
||||||
|
} else { |
||||||
|
keyLen = len(msg.Key) |
||||||
|
if keyLen > 0 { |
||||||
|
keyp = msg.Key |
||||||
|
} else { |
||||||
|
keyp = oneByte |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
var cgoid int |
||||||
|
|
||||||
|
// Per-message state that needs to be retained through the C code:
|
||||||
|
// delivery channel (if specified)
|
||||||
|
// message opaque (if specified)
|
||||||
|
// Since these cant be passed as opaque pointers to the C code,
|
||||||
|
// due to cgo constraints, we add them to a per-producer map for lookup
|
||||||
|
// when the C code triggers the callbacks or events.
|
||||||
|
if deliveryChan != nil || msg.Opaque != nil { |
||||||
|
cgoid = p.handle.cgoPut(cgoDr{deliveryChan: deliveryChan, opaque: msg.Opaque}) |
||||||
|
} |
||||||
|
|
||||||
|
var timestamp int64 |
||||||
|
if !msg.Timestamp.IsZero() { |
||||||
|
timestamp = msg.Timestamp.UnixNano() / 1000000 |
||||||
|
} |
||||||
|
|
||||||
|
// Convert headers to C-friendly tmphdrs
|
||||||
|
var tmphdrs []C.tmphdr_t |
||||||
|
tmphdrsCnt := len(msg.Headers) |
||||||
|
|
||||||
|
if tmphdrsCnt > 0 { |
||||||
|
tmphdrs = make([]C.tmphdr_t, tmphdrsCnt) |
||||||
|
|
||||||
|
for n, hdr := range msg.Headers { |
||||||
|
// Make a copy of the key
|
||||||
|
// to avoid runtime panic with
|
||||||
|
// foreign Go pointers in cgo.
|
||||||
|
tmphdrs[n].key = C.CString(hdr.Key) |
||||||
|
if hdr.Value != nil { |
||||||
|
tmphdrs[n].size = C.ssize_t(len(hdr.Value)) |
||||||
|
if tmphdrs[n].size > 0 { |
||||||
|
// Make a copy of the value
|
||||||
|
// to avoid runtime panic with
|
||||||
|
// foreign Go pointers in cgo.
|
||||||
|
tmphdrs[n].val = C.CBytes(hdr.Value) |
||||||
|
} |
||||||
|
} else { |
||||||
|
// null value
|
||||||
|
tmphdrs[n].size = C.ssize_t(-1) |
||||||
|
} |
||||||
|
} |
||||||
|
} else { |
||||||
|
// no headers, need a dummy tmphdrs of size 1 to avoid index
|
||||||
|
// out of bounds panic in do_produce() call below.
|
||||||
|
// tmphdrsCnt will be 0.
|
||||||
|
tmphdrs = []C.tmphdr_t{{nil, nil, 0}} |
||||||
|
} |
||||||
|
|
||||||
|
cErr := C.do_produce(p.handle.rk, crkt, |
||||||
|
C.int32_t(msg.TopicPartition.Partition), |
||||||
|
C.int(msgFlags)|C.RD_KAFKA_MSG_F_COPY, |
||||||
|
valIsNull, unsafe.Pointer(&valp[0]), C.size_t(valLen), |
||||||
|
keyIsNull, unsafe.Pointer(&keyp[0]), C.size_t(keyLen), |
||||||
|
C.int64_t(timestamp), |
||||||
|
(*C.tmphdr_t)(unsafe.Pointer(&tmphdrs[0])), C.size_t(tmphdrsCnt), |
||||||
|
(C.uintptr_t)(cgoid)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
if cgoid != 0 { |
||||||
|
p.handle.cgoGet(cgoid) |
||||||
|
} |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Produce single message.
|
||||||
|
// This is an asynchronous call that enqueues the message on the internal
|
||||||
|
// transmit queue, thus returning immediately.
|
||||||
|
// The delivery report will be sent on the provided deliveryChan if specified,
|
||||||
|
// or on the Producer object's Events() channel if not.
|
||||||
|
// msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented),
|
||||||
|
// api.version.request=true, and broker >= 0.10.0.0.
|
||||||
|
// msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented),
|
||||||
|
// api.version.request=true, and broker >= 0.11.0.0.
|
||||||
|
// Returns an error if message could not be enqueued.
|
||||||
|
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error { |
||||||
|
return p.produce(msg, 0, deliveryChan) |
||||||
|
} |
||||||
|
|
||||||
|
// Produce a batch of messages.
|
||||||
|
// These batches do not relate to the message batches sent to the broker, the latter
|
||||||
|
// are collected on the fly internally in librdkafka.
|
||||||
|
// WARNING: This is an experimental API.
|
||||||
|
// NOTE: timestamps and headers are not supported with this API.
|
||||||
|
func (p *Producer) produceBatch(topic string, msgs []*Message, msgFlags int) error { |
||||||
|
crkt := p.handle.getRkt(topic) |
||||||
|
|
||||||
|
cmsgs := make([]C.rd_kafka_message_t, len(msgs)) |
||||||
|
for i, m := range msgs { |
||||||
|
p.handle.messageToC(m, &cmsgs[i]) |
||||||
|
} |
||||||
|
r := C.rd_kafka_produce_batch(crkt, C.RD_KAFKA_PARTITION_UA, C.int(msgFlags)|C.RD_KAFKA_MSG_F_FREE, |
||||||
|
(*C.rd_kafka_message_t)(&cmsgs[0]), C.int(len(msgs))) |
||||||
|
if r == -1 { |
||||||
|
return newError(C.rd_kafka_last_error()) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Events returns the Events channel (read)
|
||||||
|
func (p *Producer) Events() chan Event { |
||||||
|
return p.events |
||||||
|
} |
||||||
|
|
||||||
|
// Logs returns the Log channel (if enabled), else nil
|
||||||
|
func (p *Producer) Logs() chan LogEvent { |
||||||
|
return p.handle.logs |
||||||
|
} |
||||||
|
|
||||||
|
// ProduceChannel returns the produce *Message channel (write)
|
||||||
|
func (p *Producer) ProduceChannel() chan *Message { |
||||||
|
return p.produceChannel |
||||||
|
} |
||||||
|
|
||||||
|
// Len returns the number of messages and requests waiting to be transmitted to the broker
|
||||||
|
// as well as delivery reports queued for the application.
|
||||||
|
// Includes messages on ProduceChannel.
|
||||||
|
func (p *Producer) Len() int { |
||||||
|
return len(p.produceChannel) + len(p.events) + int(C.rd_kafka_outq_len(p.handle.rk)) |
||||||
|
} |
||||||
|
|
||||||
|
// Flush and wait for outstanding messages and requests to complete delivery.
|
||||||
|
// Includes messages on ProduceChannel.
|
||||||
|
// Runs until value reaches zero or on timeoutMs.
|
||||||
|
// Returns the number of outstanding events still un-flushed.
|
||||||
|
func (p *Producer) Flush(timeoutMs int) int { |
||||||
|
termChan := make(chan bool) // unused stand-in termChan
|
||||||
|
|
||||||
|
d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs)) |
||||||
|
tEnd := time.Now().Add(d) |
||||||
|
for p.Len() > 0 { |
||||||
|
remain := tEnd.Sub(time.Now()).Seconds() |
||||||
|
if remain <= 0.0 { |
||||||
|
return p.Len() |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.eventPoll(p.events, |
||||||
|
int(math.Min(100, remain*1000)), 1000, termChan) |
||||||
|
} |
||||||
|
|
||||||
|
return 0 |
||||||
|
} |
||||||
|
|
||||||
|
// Close a Producer instance.
|
||||||
|
// The Producer object or its channels are no longer usable after this call.
|
||||||
|
func (p *Producer) Close() { |
||||||
|
// Wait for poller() (signaled by closing pollerTermChan)
|
||||||
|
// and channel_producer() (signaled by closing ProduceChannel)
|
||||||
|
close(p.pollerTermChan) |
||||||
|
close(p.produceChannel) |
||||||
|
p.handle.waitGroup.Wait() |
||||||
|
|
||||||
|
close(p.events) |
||||||
|
|
||||||
|
p.handle.cleanup() |
||||||
|
|
||||||
|
C.rd_kafka_destroy(p.handle.rk) |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
// PurgeInFlight purges messages in-flight to or from the broker.
|
||||||
|
// Purging these messages will void any future acknowledgements from the
|
||||||
|
// broker, making it impossible for the application to know if these
|
||||||
|
// messages were successfully delivered or not.
|
||||||
|
// Retrying these messages may lead to duplicates.
|
||||||
|
PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT) |
||||||
|
|
||||||
|
// PurgeQueue Purge messages in internal queues.
|
||||||
|
PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE) |
||||||
|
|
||||||
|
// PurgeNonBlocking Don't wait for background thread queue purging to finish.
|
||||||
|
PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING) |
||||||
|
) |
||||||
|
|
||||||
|
// Purge messages currently handled by this producer instance.
|
||||||
|
//
|
||||||
|
// flags is a combination of PurgeQueue, PurgeInFlight and PurgeNonBlocking.
|
||||||
|
//
|
||||||
|
// The application will need to call Poll(), Flush() or read the Events() channel
|
||||||
|
// after this call to serve delivery reports for the purged messages.
|
||||||
|
//
|
||||||
|
// Messages purged from internal queues fail with the delivery report
|
||||||
|
// error code set to ErrPurgeQueue, while purged messages that
|
||||||
|
// are in-flight to or from the broker will fail with the error code set to
|
||||||
|
// ErrPurgeInflight.
|
||||||
|
//
|
||||||
|
// Warning: Purging messages that are in-flight to or from the broker
|
||||||
|
// will ignore any sub-sequent acknowledgement for these messages
|
||||||
|
// received from the broker, effectively making it impossible
|
||||||
|
// for the application to know if the messages were successfully
|
||||||
|
// produced or not. This may result in duplicate messages if the
|
||||||
|
// application retries these messages at a later time.
|
||||||
|
//
|
||||||
|
// Note: This call may block for a short time while background thread
|
||||||
|
// queues are purged.
|
||||||
|
//
|
||||||
|
// Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown.
|
||||||
|
func (p *Producer) Purge(flags int) error { |
||||||
|
cErr := C.rd_kafka_purge(p.handle.rk, C.int(flags)) |
||||||
|
if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { |
||||||
|
return newError(cErr) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// NewProducer creates a new high-level Producer instance.
|
||||||
|
//
|
||||||
|
// conf is a *ConfigMap with standard librdkafka configuration properties.
|
||||||
|
//
|
||||||
|
// Supported special configuration properties (type, default):
|
||||||
|
// go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance).
|
||||||
|
// These batches do not relate to Kafka message batches in any way.
|
||||||
|
// Note: timestamps and headers are not supported with this interface.
|
||||||
|
// go.delivery.reports (bool, true) - Forward per-message delivery reports to the
|
||||||
|
// Events() channel.
|
||||||
|
// go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports.
|
||||||
|
// Allowed values: all, none (or empty string), key, value, headers
|
||||||
|
// Warning: There is a performance penalty to include headers in the delivery report.
|
||||||
|
// go.events.channel.size (int, 1000000) - Events().
|
||||||
|
// go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
|
||||||
|
// go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
|
||||||
|
// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
|
||||||
|
//
|
||||||
|
func NewProducer(conf *ConfigMap) (*Producer, error) { |
||||||
|
|
||||||
|
err := versionCheck() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
p := &Producer{} |
||||||
|
|
||||||
|
// before we do anything with the configuration, create a copy such that
|
||||||
|
// the original is not mutated.
|
||||||
|
confCopy := conf.clone() |
||||||
|
|
||||||
|
v, err := confCopy.extract("delivery.report.only.error", false) |
||||||
|
if v == true { |
||||||
|
// FIXME: The filtering of successful DRs must be done in
|
||||||
|
// the Go client to avoid cgoDr memory leaks.
|
||||||
|
return nil, newErrorFromString(ErrUnsupportedFeature, |
||||||
|
"delivery.report.only.error=true is not currently supported by the Go client") |
||||||
|
} |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.batch.producer", false) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
batchProducer := v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.delivery.reports", true) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
p.handle.fwdDr = v.(bool) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.delivery.report.fields", "key,value") |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.msgFields, err = newMessageFieldsFrom(v) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.events.channel.size", 1000000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
eventsChanSize := v.(int) |
||||||
|
|
||||||
|
v, err = confCopy.extract("go.produce.channel.size", 1000000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
produceChannelSize := v.(int) |
||||||
|
|
||||||
|
logsChanEnable, logsChan, err := confCopy.extractLogConfig() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
if int(C.rd_kafka_version()) < 0x01000000 { |
||||||
|
// produce.offset.report is no longer used in librdkafka >= v1.0.0
|
||||||
|
v, _ = confCopy.extract("{topic}.produce.offset.report", nil) |
||||||
|
if v == nil { |
||||||
|
// Enable offset reporting by default, unless overriden.
|
||||||
|
confCopy.SetKey("{topic}.produce.offset.report", true) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Convert ConfigMap to librdkafka conf_t
|
||||||
|
cConf, err := confCopy.convert() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
cErrstr := (*C.char)(C.malloc(C.size_t(256))) |
||||||
|
defer C.free(unsafe.Pointer(cErrstr)) |
||||||
|
|
||||||
|
C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_DR|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) |
||||||
|
|
||||||
|
// Create librdkafka producer instance
|
||||||
|
p.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) |
||||||
|
if p.handle.rk == nil { |
||||||
|
return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.p = p |
||||||
|
p.handle.setup() |
||||||
|
p.handle.rkq = C.rd_kafka_queue_get_main(p.handle.rk) |
||||||
|
p.events = make(chan Event, eventsChanSize) |
||||||
|
p.produceChannel = make(chan *Message, produceChannelSize) |
||||||
|
p.pollerTermChan = make(chan bool) |
||||||
|
|
||||||
|
if logsChanEnable { |
||||||
|
p.handle.setupLogQueue(logsChan, p.pollerTermChan) |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
poller(p, p.pollerTermChan) |
||||||
|
p.handle.waitGroup.Done() |
||||||
|
}() |
||||||
|
|
||||||
|
// non-batch or batch producer, only one must be used
|
||||||
|
var producer func(*Producer) |
||||||
|
if batchProducer { |
||||||
|
producer = channelBatchProducer |
||||||
|
} else { |
||||||
|
producer = channelProducer |
||||||
|
} |
||||||
|
|
||||||
|
p.handle.waitGroup.Add(1) |
||||||
|
go func() { |
||||||
|
producer(p) |
||||||
|
p.handle.waitGroup.Done() |
||||||
|
}() |
||||||
|
|
||||||
|
return p, nil |
||||||
|
} |
||||||
|
|
||||||
|
// channel_producer serves the ProduceChannel channel
|
||||||
|
func channelProducer(p *Producer) { |
||||||
|
for m := range p.produceChannel { |
||||||
|
err := p.produce(m, C.RD_KAFKA_MSG_F_BLOCK, nil) |
||||||
|
if err != nil { |
||||||
|
m.TopicPartition.Error = err |
||||||
|
p.events <- m |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// channelBatchProducer serves the ProduceChannel channel and attempts to
|
||||||
|
// improve cgo performance by using the produceBatch() interface.
|
||||||
|
func channelBatchProducer(p *Producer) { |
||||||
|
var buffered = make(map[string][]*Message) |
||||||
|
bufferedCnt := 0 |
||||||
|
const batchSize int = 1000000 |
||||||
|
totMsgCnt := 0 |
||||||
|
totBatchCnt := 0 |
||||||
|
|
||||||
|
for m := range p.produceChannel { |
||||||
|
buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) |
||||||
|
bufferedCnt++ |
||||||
|
|
||||||
|
loop2: |
||||||
|
for true { |
||||||
|
select { |
||||||
|
case m, ok := <-p.produceChannel: |
||||||
|
if !ok { |
||||||
|
break loop2 |
||||||
|
} |
||||||
|
if m == nil { |
||||||
|
panic("nil message received on ProduceChannel") |
||||||
|
} |
||||||
|
if m.TopicPartition.Topic == nil { |
||||||
|
panic(fmt.Sprintf("message without Topic received on ProduceChannel: %v", m)) |
||||||
|
} |
||||||
|
buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) |
||||||
|
bufferedCnt++ |
||||||
|
if bufferedCnt >= batchSize { |
||||||
|
break loop2 |
||||||
|
} |
||||||
|
default: |
||||||
|
break loop2 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
totBatchCnt++ |
||||||
|
totMsgCnt += len(buffered) |
||||||
|
|
||||||
|
for topic, buffered2 := range buffered { |
||||||
|
err := p.produceBatch(topic, buffered2, C.RD_KAFKA_MSG_F_BLOCK) |
||||||
|
if err != nil { |
||||||
|
for _, m = range buffered2 { |
||||||
|
m.TopicPartition.Error = err |
||||||
|
p.events <- m |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
buffered = make(map[string][]*Message) |
||||||
|
bufferedCnt = 0 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// poller polls the rd_kafka_t handle for events until signalled for termination
|
||||||
|
func poller(p *Producer, termChan chan bool) { |
||||||
|
for { |
||||||
|
select { |
||||||
|
case _ = <-termChan: |
||||||
|
return |
||||||
|
|
||||||
|
default: |
||||||
|
_, term := p.handle.eventPoll(p.events, 100, 1000, termChan) |
||||||
|
if term { |
||||||
|
return |
||||||
|
} |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// GetMetadata queries broker for cluster and topic metadata.
|
||||||
|
// If topic is non-nil only information about that topic is returned, else if
|
||||||
|
// allTopics is false only information about locally used topics is returned,
|
||||||
|
// else information about all topics is returned.
|
||||||
|
// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
|
||||||
|
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { |
||||||
|
return getMetadata(p, topic, allTopics, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// QueryWatermarkOffsets returns the broker's low and high offsets for the given topic
|
||||||
|
// and partition.
|
||||||
|
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { |
||||||
|
return queryWatermarkOffsets(p, topic, partition, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// OffsetsForTimes looks up offsets by timestamp for the given partitions.
|
||||||
|
//
|
||||||
|
// The returned offset for each partition is the earliest offset whose
|
||||||
|
// timestamp is greater than or equal to the given timestamp in the
|
||||||
|
// corresponding partition. If the provided timestamp exceeds that of the
|
||||||
|
// last message in the partition, a value of -1 will be returned.
|
||||||
|
//
|
||||||
|
// The timestamps to query are represented as `.Offset` in the `times`
|
||||||
|
// argument and the looked up offsets are represented as `.Offset` in the returned
|
||||||
|
// `offsets` list.
|
||||||
|
//
|
||||||
|
// The function will block for at most timeoutMs milliseconds.
|
||||||
|
//
|
||||||
|
// Duplicate Topic+Partitions are not supported.
|
||||||
|
// Per-partition errors may be returned in the `.Error` field.
|
||||||
|
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { |
||||||
|
return offsetsForTimes(p, times, timeoutMs) |
||||||
|
} |
||||||
|
|
||||||
|
// GetFatalError returns an Error object if the client instance has raised a fatal error, else nil.
|
||||||
|
func (p *Producer) GetFatalError() error { |
||||||
|
return getFatalError(p) |
||||||
|
} |
||||||
|
|
||||||
|
// TestFatalError triggers a fatal error in the underlying client.
|
||||||
|
// This is to be used strictly for testing purposes.
|
||||||
|
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode { |
||||||
|
return testFatalError(p, code, str) |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerToken sets the the data to be transmitted
|
||||||
|
// to a broker during SASL/OAUTHBEARER authentication. It will return nil
|
||||||
|
// on success, otherwise an error if:
|
||||||
|
// 1) the token data is invalid (meaning an expiration time in the past
|
||||||
|
// or either a token value or an extension key or value that does not meet
|
||||||
|
// the regular expression requirements as per
|
||||||
|
// https://tools.ietf.org/html/rfc7628#section-3.1);
|
||||||
|
// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 3) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { |
||||||
|
return p.handle.setOAuthBearerToken(oauthBearerToken) |
||||||
|
} |
||||||
|
|
||||||
|
// SetOAuthBearerTokenFailure sets the error message describing why token
|
||||||
|
// retrieval/setting failed; it also schedules a new token refresh event for 10
|
||||||
|
// seconds later so the attempt may be retried. It will return nil on
|
||||||
|
// success, otherwise an error if:
|
||||||
|
// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
|
||||||
|
// 2) SASL/OAUTHBEARER is supported but is not configured as the client's
|
||||||
|
// authentication mechanism.
|
||||||
|
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error { |
||||||
|
return p.handle.setOAuthBearerTokenFailure(errstr) |
||||||
|
} |
||||||
|
|
||||||
|
// Transactional API
|
||||||
|
|
||||||
|
// InitTransactions Initializes transactions for the producer instance.
|
||||||
|
//
|
||||||
|
// This function ensures any transactions initiated by previous instances
|
||||||
|
// of the producer with the same `transactional.id` are completed.
|
||||||
|
// If the previous instance failed with a transaction in progress the
|
||||||
|
// previous transaction will be aborted.
|
||||||
|
// This function needs to be called before any other transactional or
|
||||||
|
// produce functions are called when the `transactional.id` is configured.
|
||||||
|
//
|
||||||
|
// If the last transaction had begun completion (following transaction commit)
|
||||||
|
// but not yet finished, this function will await the previous transaction's
|
||||||
|
// completion.
|
||||||
|
//
|
||||||
|
// When any previous transactions have been fenced this function
|
||||||
|
// will acquire the internal producer id and epoch, used in all future
|
||||||
|
// transactional messages issued by this producer instance.
|
||||||
|
//
|
||||||
|
// Upon successful return from this function the application has to perform at
|
||||||
|
// least one of the following operations within `transaction.timeout.ms` to
|
||||||
|
// avoid timing out the transaction on the broker:
|
||||||
|
// * `Produce()` (et.al)
|
||||||
|
// * `SendOffsetsToTransaction()`
|
||||||
|
// * `CommitTransaction()`
|
||||||
|
// * `AbortTransaction()`
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum time to block, or nil for indefinite.
|
||||||
|
// On timeout the operation may continue in the background,
|
||||||
|
// depending on state, and it is okay to call `InitTransactions()`
|
||||||
|
// again.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal
|
||||||
|
// error has been raised by calling `err.(kafka.Error).IsFatal()`.
|
||||||
|
func (p *Producer) InitTransactions(ctx context.Context) error { |
||||||
|
cError := C.rd_kafka_init_transactions(p.handle.rk, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// BeginTransaction starts a new transaction.
|
||||||
|
//
|
||||||
|
// `InitTransactions()` must have been called successfully (once)
|
||||||
|
// before this function is called.
|
||||||
|
//
|
||||||
|
// Any messages produced, offsets sent (`SendOffsetsToTransaction()`),
|
||||||
|
// etc, after the successful return of this function will be part of
|
||||||
|
// the transaction and committed or aborted atomatically.
|
||||||
|
//
|
||||||
|
// Finish the transaction by calling `CommitTransaction()` or
|
||||||
|
// abort the transaction by calling `AbortTransaction()`.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether a fatal error has been raised by
|
||||||
|
// calling `err.(kafka.Error).IsFatal()`.
|
||||||
|
//
|
||||||
|
// Note: With the transactional producer, `Produce()`, et.al, are only
|
||||||
|
// allowed during an on-going transaction, as started with this function.
|
||||||
|
// Any produce call outside an on-going transaction, or for a failed
|
||||||
|
// transaction, will fail.
|
||||||
|
func (p *Producer) BeginTransaction() error { |
||||||
|
cError := C.rd_kafka_begin_transaction(p.handle.rk) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SendOffsetsToTransaction sends a list of topic partition offsets to the
|
||||||
|
// consumer group coordinator for `consumerMetadata`, and marks the offsets
|
||||||
|
// as part part of the current transaction.
|
||||||
|
// These offsets will be considered committed only if the transaction is
|
||||||
|
// committed successfully.
|
||||||
|
//
|
||||||
|
// The offsets should be the next message your application will consume,
|
||||||
|
// i.e., the last processed message's offset + 1 for each partition.
|
||||||
|
// Either track the offsets manually during processing or use
|
||||||
|
// `consumer.Position()` (on the consumer) to get the current offsets for
|
||||||
|
// the partitions assigned to the consumer.
|
||||||
|
//
|
||||||
|
// Use this method at the end of a consume-transform-produce loop prior
|
||||||
|
// to committing the transaction with `CommitTransaction()`.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum amount of time to block, or nil for indefinite.
|
||||||
|
// * `offsets` - List of offsets to commit to the consumer group upon
|
||||||
|
// successful commit of the transaction. Offsets should be
|
||||||
|
// the next message to consume, e.g., last processed message + 1.
|
||||||
|
// * `consumerMetadata` - The current consumer group metadata as returned by
|
||||||
|
// `consumer.GetConsumerGroupMetadata()` on the consumer
|
||||||
|
// instance the provided offsets were consumed from.
|
||||||
|
//
|
||||||
|
// Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer).
|
||||||
|
//
|
||||||
|
// Note: Logical and invalid offsets (e.g., OffsetInvalid) in
|
||||||
|
// `offsets` will be ignored. If there are no valid offsets in
|
||||||
|
// `offsets` the function will return nil and no action will be taken.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable
|
||||||
|
// or fatal error has been raised by calling
|
||||||
|
// `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()`
|
||||||
|
// respectively.
|
||||||
|
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error { |
||||||
|
var cOffsets *C.rd_kafka_topic_partition_list_t |
||||||
|
if offsets != nil { |
||||||
|
cOffsets = newCPartsFromTopicPartitions(offsets) |
||||||
|
defer C.rd_kafka_topic_partition_list_destroy(cOffsets) |
||||||
|
} |
||||||
|
|
||||||
|
cgmd, err := deserializeConsumerGroupMetadata(consumerMetadata.serialized) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer C.rd_kafka_consumer_group_metadata_destroy(cgmd) |
||||||
|
|
||||||
|
cError := C.rd_kafka_send_offsets_to_transaction( |
||||||
|
p.handle.rk, |
||||||
|
cOffsets, |
||||||
|
cgmd, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// CommitTransaction commits the current transaction.
|
||||||
|
//
|
||||||
|
// Any outstanding messages will be flushed (delivered) before actually
|
||||||
|
// committing the transaction.
|
||||||
|
//
|
||||||
|
// If any of the outstanding messages fail permanently the current
|
||||||
|
// transaction will enter the abortable error state and this
|
||||||
|
// function will return an abortable error, in this case the application
|
||||||
|
// must call `AbortTransaction()` before attempting a new
|
||||||
|
// transaction with `BeginTransaction()`.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum amount of time to block, or nil for indefinite.
|
||||||
|
//
|
||||||
|
// Note: This function will block until all outstanding messages are
|
||||||
|
// delivered and the transaction commit request has been successfully
|
||||||
|
// handled by the transaction coordinator, or until the `ctx` expires,
|
||||||
|
// which ever comes first. On timeout the application may
|
||||||
|
// call the function again.
|
||||||
|
//
|
||||||
|
// Note: Will automatically call `Flush()` to ensure all queued
|
||||||
|
// messages are delivered before attempting to commit the transaction.
|
||||||
|
// The application MUST serve the `producer.Events()` channel for delivery
|
||||||
|
// reports in a separate go-routine during this time.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable
|
||||||
|
// or fatal error has been raised by calling
|
||||||
|
// `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()`
|
||||||
|
// respectively.
|
||||||
|
func (p *Producer) CommitTransaction(ctx context.Context) error { |
||||||
|
cError := C.rd_kafka_commit_transaction(p.handle.rk, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// AbortTransaction aborts the ongoing transaction.
|
||||||
|
//
|
||||||
|
// This function should also be used to recover from non-fatal abortable
|
||||||
|
// transaction errors.
|
||||||
|
//
|
||||||
|
// Any outstanding messages will be purged and fail with
|
||||||
|
// `ErrPurgeInflight` or `ErrPurgeQueue`.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// * `ctx` - The maximum amount of time to block, or nil for indefinite.
|
||||||
|
//
|
||||||
|
// Note: This function will block until all outstanding messages are purged
|
||||||
|
// and the transaction abort request has been successfully
|
||||||
|
// handled by the transaction coordinator, or until the `ctx` expires,
|
||||||
|
// which ever comes first. On timeout the application may
|
||||||
|
// call the function again.
|
||||||
|
//
|
||||||
|
// Note: Will automatically call `Purge()` and `Flush()` to ensure all queued
|
||||||
|
// and in-flight messages are purged before attempting to abort the transaction.
|
||||||
|
// The application MUST serve the `producer.Events()` channel for delivery
|
||||||
|
// reports in a separate go-routine during this time.
|
||||||
|
//
|
||||||
|
// Returns nil on success or an error object on failure.
|
||||||
|
// Check whether the returned error object permits retrying
|
||||||
|
// by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error
|
||||||
|
// has been raised by calling `err.(kafka.Error).IsFatal()`.
|
||||||
|
func (p *Producer) AbortTransaction(ctx context.Context) error { |
||||||
|
cError := C.rd_kafka_abort_transaction(p.handle.rk, |
||||||
|
cTimeoutFromContext(ctx)) |
||||||
|
if cError != nil { |
||||||
|
return newErrorFromCErrorDestroy(cError) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,29 @@ |
|||||||
|
/**
|
||||||
|
* Copyright 2020 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
// This file uses a preprocessor macro defined by the various build_*.go
|
||||||
|
// files to determine whether to import the bundled librdkafka header, or
|
||||||
|
// the system one.
|
||||||
|
// This is needed because cgo will automatically add -I. to the include
|
||||||
|
// path, so <librdkafka/rdkafka.h> would find a bundled header instead of
|
||||||
|
// the system one if it were called librdkafka/rdkafka.h instead of
|
||||||
|
// librdkafka_vendor/rdkafka.h
|
||||||
|
|
||||||
|
#ifdef USE_VENDORED_LIBRDKAFKA |
||||||
|
#include "librdkafka_vendor/rdkafka.h" |
||||||
|
#else |
||||||
|
#include <librdkafka/rdkafka.h> |
||||||
|
#endif |
@ -0,0 +1,8 @@ |
|||||||
|
{ |
||||||
|
"Brokers": "mybroker or $BROKERS env", |
||||||
|
"Topic": "test", |
||||||
|
"GroupID": "testgroup", |
||||||
|
"PerfMsgCount": 1000000, |
||||||
|
"PerfMsgSize": 100, |
||||||
|
"Config": ["api.version.request=true"] |
||||||
|
} |
@ -0,0 +1,248 @@ |
|||||||
|
package kafka |
||||||
|
|
||||||
|
/** |
||||||
|
* Copyright 2016 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
"math/rand" |
||||||
|
"os" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
#include "select_rdkafka.h" |
||||||
|
*/ |
||||||
|
import "C" |
||||||
|
|
||||||
|
var testconf struct { |
||||||
|
Brokers string |
||||||
|
Topic string |
||||||
|
GroupID string |
||||||
|
PerfMsgCount int |
||||||
|
PerfMsgSize int |
||||||
|
Config []string |
||||||
|
conf ConfigMap |
||||||
|
} |
||||||
|
|
||||||
|
// testconf_read reads the test suite config file testconf.json which must
|
||||||
|
// contain at least Brokers and Topic string properties.
|
||||||
|
// Returns true if the testconf was found and usable, false if no such file, or panics
|
||||||
|
// if the file format is wrong.
|
||||||
|
func testconfRead() bool { |
||||||
|
cf, err := os.Open("testconf.json") |
||||||
|
if err != nil { |
||||||
|
fmt.Fprintf(os.Stderr, "%% testconf.json not found - ignoring test\n") |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// Default values
|
||||||
|
testconf.PerfMsgCount = 2000000 |
||||||
|
testconf.PerfMsgSize = 100 |
||||||
|
testconf.GroupID = "testgroup" |
||||||
|
|
||||||
|
jp := json.NewDecoder(cf) |
||||||
|
err = jp.Decode(&testconf) |
||||||
|
if err != nil { |
||||||
|
panic(fmt.Sprintf("Failed to parse testconf: %s", err)) |
||||||
|
} |
||||||
|
|
||||||
|
cf.Close() |
||||||
|
|
||||||
|
if testconf.Brokers[0] == '$' { |
||||||
|
// Read broker list from environment variable
|
||||||
|
testconf.Brokers = os.Getenv(testconf.Brokers[1:]) |
||||||
|
} |
||||||
|
|
||||||
|
if testconf.Brokers == "" || testconf.Topic == "" { |
||||||
|
panic("Missing Brokers or Topic in testconf.json") |
||||||
|
} |
||||||
|
|
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// update existing ConfigMap with key=value pairs from testconf.Config
|
||||||
|
func (cm *ConfigMap) updateFromTestconf() error { |
||||||
|
if testconf.Config == nil { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Translate "key=value" pairs in Config to ConfigMap
|
||||||
|
for _, s := range testconf.Config { |
||||||
|
err := cm.Set(s) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
// Return the number of messages available in all partitions of a topic.
|
||||||
|
// WARNING: This uses watermark offsets so it will be incorrect for compacted topics.
|
||||||
|
func getMessageCountInTopic(topic string) (int, error) { |
||||||
|
|
||||||
|
// Create consumer
|
||||||
|
config := &ConfigMap{"bootstrap.servers": testconf.Brokers, |
||||||
|
"group.id": testconf.GroupID} |
||||||
|
config.updateFromTestconf() |
||||||
|
|
||||||
|
c, err := NewConsumer(config) |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
defer c.Close() |
||||||
|
|
||||||
|
// get metadata for the topic to find out number of partitions
|
||||||
|
|
||||||
|
metadata, err := c.GetMetadata(&topic, false, 5*1000) |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
|
||||||
|
t, ok := metadata.Topics[topic] |
||||||
|
if !ok { |
||||||
|
return 0, newError(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) |
||||||
|
} |
||||||
|
|
||||||
|
cnt := 0 |
||||||
|
for _, p := range t.Partitions { |
||||||
|
low, high, err := c.QueryWatermarkOffsets(topic, p.ID, 5*1000) |
||||||
|
if err != nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
cnt += int(high - low) |
||||||
|
} |
||||||
|
|
||||||
|
return cnt, nil |
||||||
|
} |
||||||
|
|
||||||
|
// getBrokerList returns a list of brokers (ids) in the cluster
|
||||||
|
func getBrokerList(H Handle) (brokers []int32, err error) { |
||||||
|
md, err := getMetadata(H, nil, true, 15*1000) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
brokers = make([]int32, len(md.Brokers)) |
||||||
|
for i, mdBroker := range md.Brokers { |
||||||
|
brokers[i] = mdBroker.ID |
||||||
|
} |
||||||
|
|
||||||
|
return brokers, nil |
||||||
|
} |
||||||
|
|
||||||
|
// waitTopicInMetadata waits for the given topic to show up in metadata
|
||||||
|
func waitTopicInMetadata(H Handle, topic string, timeoutMs int) error { |
||||||
|
d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs)) |
||||||
|
tEnd := time.Now().Add(d) |
||||||
|
|
||||||
|
for { |
||||||
|
remain := tEnd.Sub(time.Now()).Seconds() |
||||||
|
if remain < 0.0 { |
||||||
|
return newErrorFromString(ErrTimedOut, |
||||||
|
fmt.Sprintf("Timed out waiting for topic %s to appear in metadata", topic)) |
||||||
|
} |
||||||
|
|
||||||
|
md, err := getMetadata(H, nil, true, int(remain*1000)) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
for _, t := range md.Topics { |
||||||
|
if t.Topic != topic { |
||||||
|
continue |
||||||
|
} |
||||||
|
if t.Error.Code() != ErrNoError || len(t.Partitions) < 1 { |
||||||
|
continue |
||||||
|
} |
||||||
|
// Proper topic found in metadata
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
time.Sleep(500 * 1000) // 500ms
|
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
func createAdminClient(t *testing.T) (a *AdminClient) { |
||||||
|
numver, strver := LibraryVersion() |
||||||
|
if numver < 0x000b0500 { |
||||||
|
t.Skipf("Requires librdkafka >=0.11.5 (currently on %s, 0x%x)", strver, numver) |
||||||
|
} |
||||||
|
|
||||||
|
if !testconfRead() { |
||||||
|
t.Skipf("Missing testconf.json") |
||||||
|
} |
||||||
|
|
||||||
|
conf := ConfigMap{"bootstrap.servers": testconf.Brokers} |
||||||
|
conf.updateFromTestconf() |
||||||
|
|
||||||
|
/* |
||||||
|
* Create producer and produce a couple of messages with and without |
||||||
|
* headers. |
||||||
|
*/ |
||||||
|
a, err := NewAdminClient(&conf) |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("NewAdminClient: %v", err) |
||||||
|
} |
||||||
|
|
||||||
|
return a |
||||||
|
} |
||||||
|
|
||||||
|
func createTestTopic(t *testing.T, suffix string, numPartitions int, replicationFactor int) string { |
||||||
|
rand.Seed(time.Now().Unix()) |
||||||
|
|
||||||
|
topic := fmt.Sprintf("%s-%s-%d", testconf.Topic, suffix, rand.Intn(100000)) |
||||||
|
|
||||||
|
a := createAdminClient(t) |
||||||
|
defer a.Close() |
||||||
|
|
||||||
|
newTopics := []TopicSpecification{ |
||||||
|
{ |
||||||
|
Topic: topic, |
||||||
|
NumPartitions: numPartitions, |
||||||
|
ReplicationFactor: replicationFactor, |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
maxDuration, err := time.ParseDuration("30s") |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("%s", err) |
||||||
|
} |
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), maxDuration) |
||||||
|
defer cancel() |
||||||
|
result, err := a.CreateTopics(ctx, newTopics, nil) |
||||||
|
if err != nil { |
||||||
|
t.Fatalf("CreateTopics() failed: %s", err) |
||||||
|
} |
||||||
|
|
||||||
|
for _, res := range result { |
||||||
|
if res.Error.Code() != ErrNoError { |
||||||
|
t.Errorf("Failed to create topic %s: %s\n", |
||||||
|
res.Topic, res.Error) |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
return topic |
||||||
|
} |
@ -0,0 +1,55 @@ |
|||||||
|
/** |
||||||
|
* Copyright 2019 Confluent Inc. |
||||||
|
* |
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
* you may not use this file except in compliance with the License. |
||||||
|
* You may obtain a copy of the License at |
||||||
|
* |
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
* |
||||||
|
* Unless required by applicable law or agreed to in writing, software |
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
* See the License for the specific language governing permissions and |
||||||
|
* limitations under the License. |
||||||
|
*/ |
||||||
|
|
||||||
|
package kafka |
||||||
|
|
||||||
|
import "C" |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
cTimeoutInfinite = C.int(-1) // Blocks indefinitely until completion.
|
||||||
|
cTimeoutNoWait = C.int(0) // Returns immediately without blocking.
|
||||||
|
) |
||||||
|
|
||||||
|
// cTimeoutFromContext returns the remaining time after which work done on behalf of this context
|
||||||
|
// should be canceled, in milliseconds.
|
||||||
|
//
|
||||||
|
// If no deadline/timeout is set, or if the timeout does not fit in an int32, it returns
|
||||||
|
// cTimeoutInfinite;
|
||||||
|
// If there is no time left in this context, it returns cTimeoutNoWait.
|
||||||
|
func cTimeoutFromContext(ctx context.Context) C.int { |
||||||
|
if ctx == nil { |
||||||
|
return cTimeoutInfinite |
||||||
|
} |
||||||
|
timeout, hasTimeout := timeout(ctx) |
||||||
|
if !hasTimeout { |
||||||
|
return cTimeoutInfinite |
||||||
|
} |
||||||
|
if timeout <= 0 { |
||||||
|
return cTimeoutNoWait |
||||||
|
} |
||||||
|
|
||||||
|
timeoutMs := int64(timeout / time.Millisecond) |
||||||
|
if int64(int32(timeoutMs)) < timeoutMs { |
||||||
|
return cTimeoutInfinite |
||||||
|
} |
||||||
|
|
||||||
|
return C.int(timeoutMs) |
||||||
|
} |
@ -0,0 +1,21 @@ |
|||||||
|
The MIT License (MIT) |
||||||
|
|
||||||
|
Copyright (c) 2014 Chris Hines |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all |
||||||
|
copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||||
|
SOFTWARE. |
@ -0,0 +1,38 @@ |
|||||||
|
[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) |
||||||
|
[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) |
||||||
|
[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) |
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) |
||||||
|
|
||||||
|
# stack |
||||||
|
|
||||||
|
Package stack implements utilities to capture, manipulate, and format call |
||||||
|
stacks. It provides a simpler API than package runtime. |
||||||
|
|
||||||
|
The implementation takes care of the minutia and special cases of interpreting |
||||||
|
the program counter (pc) values returned by runtime.Callers. |
||||||
|
|
||||||
|
## Versioning |
||||||
|
|
||||||
|
Package stack publishes releases via [semver](http://semver.org/) compatible Git |
||||||
|
tags prefixed with a single 'v'. The master branch always contains the latest |
||||||
|
release. The develop branch contains unreleased commits. |
||||||
|
|
||||||
|
## Formatting |
||||||
|
|
||||||
|
Package stack's types implement fmt.Formatter, which provides a simple and |
||||||
|
flexible way to declaratively configure formatting when used with logging or |
||||||
|
error tracking packages. |
||||||
|
|
||||||
|
```go |
||||||
|
func DoTheThing() { |
||||||
|
c := stack.Caller(0) |
||||||
|
log.Print(c) // "source.go:10" |
||||||
|
log.Printf("%+v", c) // "pkg/path/source.go:10" |
||||||
|
log.Printf("%n", c) // "DoTheThing" |
||||||
|
|
||||||
|
s := stack.Trace().TrimRuntime() |
||||||
|
log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" |
||||||
|
} |
||||||
|
``` |
||||||
|
|
||||||
|
See the docs for all of the supported formatting options. |
@ -0,0 +1,400 @@ |
|||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
// Package stack implements utilities to capture, manipulate, and format call
|
||||||
|
// stacks. It provides a simpler API than package runtime.
|
||||||
|
//
|
||||||
|
// The implementation takes care of the minutia and special cases of
|
||||||
|
// interpreting the program counter (pc) values returned by runtime.Callers.
|
||||||
|
//
|
||||||
|
// Package stack's types implement fmt.Formatter, which provides a simple and
|
||||||
|
// flexible way to declaratively configure formatting when used with logging
|
||||||
|
// or error tracking packages.
|
||||||
|
package stack |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"runtime" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
) |
||||||
|
|
||||||
|
// Call records a single function invocation from a goroutine stack.
|
||||||
|
type Call struct { |
||||||
|
frame runtime.Frame |
||||||
|
} |
||||||
|
|
||||||
|
// Caller returns a Call from the stack of the current goroutine. The argument
|
||||||
|
// skip is the number of stack frames to ascend, with 0 identifying the
|
||||||
|
// calling function.
|
||||||
|
func Caller(skip int) Call { |
||||||
|
// As of Go 1.9 we need room for up to three PC entries.
|
||||||
|
//
|
||||||
|
// 0. An entry for the stack frame prior to the target to check for
|
||||||
|
// special handling needed if that prior entry is runtime.sigpanic.
|
||||||
|
// 1. A possible second entry to hold metadata about skipped inlined
|
||||||
|
// functions. If inline functions were not skipped the target frame
|
||||||
|
// PC will be here.
|
||||||
|
// 2. A third entry for the target frame PC when the second entry
|
||||||
|
// is used for skipped inline functions.
|
||||||
|
var pcs [3]uintptr |
||||||
|
n := runtime.Callers(skip+1, pcs[:]) |
||||||
|
frames := runtime.CallersFrames(pcs[:n]) |
||||||
|
frame, _ := frames.Next() |
||||||
|
frame, _ = frames.Next() |
||||||
|
|
||||||
|
return Call{ |
||||||
|
frame: frame, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
|
||||||
|
func (c Call) String() string { |
||||||
|
return fmt.Sprint(c) |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler. It formats the Call the same
|
||||||
|
// as fmt.Sprintf("%v", c).
|
||||||
|
func (c Call) MarshalText() ([]byte, error) { |
||||||
|
if c.frame == (runtime.Frame{}) { |
||||||
|
return nil, ErrNoFunc |
||||||
|
} |
||||||
|
|
||||||
|
buf := bytes.Buffer{} |
||||||
|
fmt.Fprint(&buf, c) |
||||||
|
return buf.Bytes(), nil |
||||||
|
} |
||||||
|
|
||||||
|
// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
|
||||||
|
// cause is a Call with the zero value.
|
||||||
|
var ErrNoFunc = errors.New("no call stack information") |
||||||
|
|
||||||
|
// Format implements fmt.Formatter with support for the following verbs.
|
||||||
|
//
|
||||||
|
// %s source file
|
||||||
|
// %d line number
|
||||||
|
// %n function name
|
||||||
|
// %k last segment of the package path
|
||||||
|
// %v equivalent to %s:%d
|
||||||
|
//
|
||||||
|
// It accepts the '+' and '#' flags for most of the verbs as follows.
|
||||||
|
//
|
||||||
|
// %+s path of source file relative to the compile time GOPATH,
|
||||||
|
// or the module path joined to the path of source file relative
|
||||||
|
// to module root
|
||||||
|
// %#s full path of source file
|
||||||
|
// %+n import path qualified function name
|
||||||
|
// %+k full package path
|
||||||
|
// %+v equivalent to %+s:%d
|
||||||
|
// %#v equivalent to %#s:%d
|
||||||
|
func (c Call) Format(s fmt.State, verb rune) { |
||||||
|
if c.frame == (runtime.Frame{}) { |
||||||
|
fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
switch verb { |
||||||
|
case 's', 'v': |
||||||
|
file := c.frame.File |
||||||
|
switch { |
||||||
|
case s.Flag('#'): |
||||||
|
// done
|
||||||
|
case s.Flag('+'): |
||||||
|
file = pkgFilePath(&c.frame) |
||||||
|
default: |
||||||
|
const sep = "/" |
||||||
|
if i := strings.LastIndex(file, sep); i != -1 { |
||||||
|
file = file[i+len(sep):] |
||||||
|
} |
||||||
|
} |
||||||
|
io.WriteString(s, file) |
||||||
|
if verb == 'v' { |
||||||
|
buf := [7]byte{':'} |
||||||
|
s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) |
||||||
|
} |
||||||
|
|
||||||
|
case 'd': |
||||||
|
buf := [6]byte{} |
||||||
|
s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) |
||||||
|
|
||||||
|
case 'k': |
||||||
|
name := c.frame.Function |
||||||
|
const pathSep = "/" |
||||||
|
start, end := 0, len(name) |
||||||
|
if i := strings.LastIndex(name, pathSep); i != -1 { |
||||||
|
start = i + len(pathSep) |
||||||
|
} |
||||||
|
const pkgSep = "." |
||||||
|
if i := strings.Index(name[start:], pkgSep); i != -1 { |
||||||
|
end = start + i |
||||||
|
} |
||||||
|
if s.Flag('+') { |
||||||
|
start = 0 |
||||||
|
} |
||||||
|
io.WriteString(s, name[start:end]) |
||||||
|
|
||||||
|
case 'n': |
||||||
|
name := c.frame.Function |
||||||
|
if !s.Flag('+') { |
||||||
|
const pathSep = "/" |
||||||
|
if i := strings.LastIndex(name, pathSep); i != -1 { |
||||||
|
name = name[i+len(pathSep):] |
||||||
|
} |
||||||
|
const pkgSep = "." |
||||||
|
if i := strings.Index(name, pkgSep); i != -1 { |
||||||
|
name = name[i+len(pkgSep):] |
||||||
|
} |
||||||
|
} |
||||||
|
io.WriteString(s, name) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Frame returns the call frame infomation for the Call.
|
||||||
|
func (c Call) Frame() runtime.Frame { |
||||||
|
return c.frame |
||||||
|
} |
||||||
|
|
||||||
|
// PC returns the program counter for this call frame; multiple frames may
|
||||||
|
// have the same PC value.
|
||||||
|
//
|
||||||
|
// Deprecated: Use Call.Frame instead.
|
||||||
|
func (c Call) PC() uintptr { |
||||||
|
return c.frame.PC |
||||||
|
} |
||||||
|
|
||||||
|
// CallStack records a sequence of function invocations from a goroutine
|
||||||
|
// stack.
|
||||||
|
type CallStack []Call |
||||||
|
|
||||||
|
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
|
||||||
|
func (cs CallStack) String() string { |
||||||
|
return fmt.Sprint(cs) |
||||||
|
} |
||||||
|
|
||||||
|
var ( |
||||||
|
openBracketBytes = []byte("[") |
||||||
|
closeBracketBytes = []byte("]") |
||||||
|
spaceBytes = []byte(" ") |
||||||
|
) |
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
|
||||||
|
// same as fmt.Sprintf("%v", cs).
|
||||||
|
func (cs CallStack) MarshalText() ([]byte, error) { |
||||||
|
buf := bytes.Buffer{} |
||||||
|
buf.Write(openBracketBytes) |
||||||
|
for i, pc := range cs { |
||||||
|
if i > 0 { |
||||||
|
buf.Write(spaceBytes) |
||||||
|
} |
||||||
|
fmt.Fprint(&buf, pc) |
||||||
|
} |
||||||
|
buf.Write(closeBracketBytes) |
||||||
|
return buf.Bytes(), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Format implements fmt.Formatter by printing the CallStack as square brackets
|
||||||
|
// ([, ]) surrounding a space separated list of Calls each formatted with the
|
||||||
|
// supplied verb and options.
|
||||||
|
func (cs CallStack) Format(s fmt.State, verb rune) { |
||||||
|
s.Write(openBracketBytes) |
||||||
|
for i, pc := range cs { |
||||||
|
if i > 0 { |
||||||
|
s.Write(spaceBytes) |
||||||
|
} |
||||||
|
pc.Format(s, verb) |
||||||
|
} |
||||||
|
s.Write(closeBracketBytes) |
||||||
|
} |
||||||
|
|
||||||
|
// Trace returns a CallStack for the current goroutine with element 0
|
||||||
|
// identifying the calling function.
|
||||||
|
func Trace() CallStack { |
||||||
|
var pcs [512]uintptr |
||||||
|
n := runtime.Callers(1, pcs[:]) |
||||||
|
|
||||||
|
frames := runtime.CallersFrames(pcs[:n]) |
||||||
|
cs := make(CallStack, 0, n) |
||||||
|
|
||||||
|
// Skip extra frame retrieved just to make sure the runtime.sigpanic
|
||||||
|
// special case is handled.
|
||||||
|
frame, more := frames.Next() |
||||||
|
|
||||||
|
for more { |
||||||
|
frame, more = frames.Next() |
||||||
|
cs = append(cs, Call{frame: frame}) |
||||||
|
} |
||||||
|
|
||||||
|
return cs |
||||||
|
} |
||||||
|
|
||||||
|
// TrimBelow returns a slice of the CallStack with all entries below c
|
||||||
|
// removed.
|
||||||
|
func (cs CallStack) TrimBelow(c Call) CallStack { |
||||||
|
for len(cs) > 0 && cs[0] != c { |
||||||
|
cs = cs[1:] |
||||||
|
} |
||||||
|
return cs |
||||||
|
} |
||||||
|
|
||||||
|
// TrimAbove returns a slice of the CallStack with all entries above c
|
||||||
|
// removed.
|
||||||
|
func (cs CallStack) TrimAbove(c Call) CallStack { |
||||||
|
for len(cs) > 0 && cs[len(cs)-1] != c { |
||||||
|
cs = cs[:len(cs)-1] |
||||||
|
} |
||||||
|
return cs |
||||||
|
} |
||||||
|
|
||||||
|
// pkgIndex returns the index that results in file[index:] being the path of
|
||||||
|
// file relative to the compile time GOPATH, and file[:index] being the
|
||||||
|
// $GOPATH/src/ portion of file. funcName must be the name of a function in
|
||||||
|
// file as returned by runtime.Func.Name.
|
||||||
|
func pkgIndex(file, funcName string) int { |
||||||
|
// As of Go 1.6.2 there is no direct way to know the compile time GOPATH
|
||||||
|
// at runtime, but we can infer the number of path segments in the GOPATH.
|
||||||
|
// We note that runtime.Func.Name() returns the function name qualified by
|
||||||
|
// the import path, which does not include the GOPATH. Thus we can trim
|
||||||
|
// segments from the beginning of the file path until the number of path
|
||||||
|
// separators remaining is one more than the number of path separators in
|
||||||
|
// the function name. For example, given:
|
||||||
|
//
|
||||||
|
// GOPATH /home/user
|
||||||
|
// file /home/user/src/pkg/sub/file.go
|
||||||
|
// fn.Name() pkg/sub.Type.Method
|
||||||
|
//
|
||||||
|
// We want to produce:
|
||||||
|
//
|
||||||
|
// file[:idx] == /home/user/src/
|
||||||
|
// file[idx:] == pkg/sub/file.go
|
||||||
|
//
|
||||||
|
// From this we can easily see that fn.Name() has one less path separator
|
||||||
|
// than our desired result for file[idx:]. We count separators from the
|
||||||
|
// end of the file path until it finds two more than in the function name
|
||||||
|
// and then move one character forward to preserve the initial path
|
||||||
|
// segment without a leading separator.
|
||||||
|
const sep = "/" |
||||||
|
i := len(file) |
||||||
|
for n := strings.Count(funcName, sep) + 2; n > 0; n-- { |
||||||
|
i = strings.LastIndex(file[:i], sep) |
||||||
|
if i == -1 { |
||||||
|
i = -len(sep) |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
// get back to 0 or trim the leading separator
|
||||||
|
return i + len(sep) |
||||||
|
} |
||||||
|
|
||||||
|
// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH,
|
||||||
|
// or its module path joined to its path relative to the module root.
|
||||||
|
//
|
||||||
|
// As of Go 1.11 there is no direct way to know the compile time GOPATH or
|
||||||
|
// module paths at runtime, but we can piece together the desired information
|
||||||
|
// from available information. We note that runtime.Frame.Function contains the
|
||||||
|
// function name qualified by the package path, which includes the module path
|
||||||
|
// but not the GOPATH. We can extract the package path from that and append the
|
||||||
|
// last segments of the file path to arrive at the desired package qualified
|
||||||
|
// file path. For example, given:
|
||||||
|
//
|
||||||
|
// GOPATH /home/user
|
||||||
|
// import path pkg/sub
|
||||||
|
// frame.File /home/user/src/pkg/sub/file.go
|
||||||
|
// frame.Function pkg/sub.Type.Method
|
||||||
|
// Desired return pkg/sub/file.go
|
||||||
|
//
|
||||||
|
// It appears that we simply need to trim ".Type.Method" from frame.Function and
|
||||||
|
// append "/" + path.Base(file).
|
||||||
|
//
|
||||||
|
// But there are other wrinkles. Although it is idiomatic to do so, the internal
|
||||||
|
// name of a package is not required to match the last segment of its import
|
||||||
|
// path. In addition, the introduction of modules in Go 1.11 allows working
|
||||||
|
// without a GOPATH. So we also must make these work right:
|
||||||
|
//
|
||||||
|
// GOPATH /home/user
|
||||||
|
// import path pkg/go-sub
|
||||||
|
// package name sub
|
||||||
|
// frame.File /home/user/src/pkg/go-sub/file.go
|
||||||
|
// frame.Function pkg/sub.Type.Method
|
||||||
|
// Desired return pkg/go-sub/file.go
|
||||||
|
//
|
||||||
|
// Module path pkg/v2
|
||||||
|
// import path pkg/v2/go-sub
|
||||||
|
// package name sub
|
||||||
|
// frame.File /home/user/cloned-pkg/go-sub/file.go
|
||||||
|
// frame.Function pkg/v2/sub.Type.Method
|
||||||
|
// Desired return pkg/v2/go-sub/file.go
|
||||||
|
//
|
||||||
|
// We can handle all of these situations by using the package path extracted
|
||||||
|
// from frame.Function up to, but not including, the last segment as the prefix
|
||||||
|
// and the last two segments of frame.File as the suffix of the returned path.
|
||||||
|
// This preserves the existing behavior when working in a GOPATH without modules
|
||||||
|
// and a semantically equivalent behavior when used in module aware project.
|
||||||
|
func pkgFilePath(frame *runtime.Frame) string { |
||||||
|
pre := pkgPrefix(frame.Function) |
||||||
|
post := pathSuffix(frame.File) |
||||||
|
if pre == "" { |
||||||
|
return post |
||||||
|
} |
||||||
|
return pre + "/" + post |
||||||
|
} |
||||||
|
|
||||||
|
// pkgPrefix returns the import path of the function's package with the final
|
||||||
|
// segment removed.
|
||||||
|
func pkgPrefix(funcName string) string { |
||||||
|
const pathSep = "/" |
||||||
|
end := strings.LastIndex(funcName, pathSep) |
||||||
|
if end == -1 { |
||||||
|
return "" |
||||||
|
} |
||||||
|
return funcName[:end] |
||||||
|
} |
||||||
|
|
||||||
|
// pathSuffix returns the last two segments of path.
|
||||||
|
func pathSuffix(path string) string { |
||||||
|
const pathSep = "/" |
||||||
|
lastSep := strings.LastIndex(path, pathSep) |
||||||
|
if lastSep == -1 { |
||||||
|
return path |
||||||
|
} |
||||||
|
return path[strings.LastIndex(path[:lastSep], pathSep)+1:] |
||||||
|
} |
||||||
|
|
||||||
|
var runtimePath string |
||||||
|
|
||||||
|
func init() { |
||||||
|
var pcs [3]uintptr |
||||||
|
runtime.Callers(0, pcs[:]) |
||||||
|
frames := runtime.CallersFrames(pcs[:]) |
||||||
|
frame, _ := frames.Next() |
||||||
|
file := frame.File |
||||||
|
|
||||||
|
idx := pkgIndex(frame.File, frame.Function) |
||||||
|
|
||||||
|
runtimePath = file[:idx] |
||||||
|
if runtime.GOOS == "windows" { |
||||||
|
runtimePath = strings.ToLower(runtimePath) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func inGoroot(c Call) bool { |
||||||
|
file := c.frame.File |
||||||
|
if len(file) == 0 || file[0] == '?' { |
||||||
|
return true |
||||||
|
} |
||||||
|
if runtime.GOOS == "windows" { |
||||||
|
file = strings.ToLower(file) |
||||||
|
} |
||||||
|
return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") |
||||||
|
} |
||||||
|
|
||||||
|
// TrimRuntime returns a slice of the CallStack with the topmost entries from
|
||||||
|
// the go runtime removed. It considers any calls originating from unknown
|
||||||
|
// files, files under GOROOT, or _testmain.go as part of the runtime.
|
||||||
|
func (cs CallStack) TrimRuntime() CallStack { |
||||||
|
for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { |
||||||
|
cs = cs[:len(cs)-1] |
||||||
|
} |
||||||
|
return cs |
||||||
|
} |
@ -0,0 +1,16 @@ |
|||||||
|
cmd/snappytool/snappytool |
||||||
|
testdata/bench |
||||||
|
|
||||||
|
# These explicitly listed benchmark data files are for an obsolete version of |
||||||
|
# snappy_test.go. |
||||||
|
testdata/alice29.txt |
||||||
|
testdata/asyoulik.txt |
||||||
|
testdata/fireworks.jpeg |
||||||
|
testdata/geo.protodata |
||||||
|
testdata/html |
||||||
|
testdata/html_x_4 |
||||||
|
testdata/kppkn.gtb |
||||||
|
testdata/lcet10.txt |
||||||
|
testdata/paper-100k.pdf |
||||||
|
testdata/plrabn12.txt |
||||||
|
testdata/urls.10K |
@ -0,0 +1,18 @@ |
|||||||
|
# This is the official list of Snappy-Go authors for copyright purposes. |
||||||
|
# This file is distinct from the CONTRIBUTORS files. |
||||||
|
# See the latter for an explanation. |
||||||
|
|
||||||
|
# Names should be added to this file as |
||||||
|
# Name or Organization <email address> |
||||||
|
# The email address is not required for organizations. |
||||||
|
|
||||||
|
# Please keep the list sorted. |
||||||
|
|
||||||
|
Amazon.com, Inc |
||||||
|
Damian Gryski <dgryski@gmail.com> |
||||||
|
Eric Buth <eric@topos.com> |
||||||
|
Google Inc. |
||||||
|
Jan Mercl <0xjnml@gmail.com> |
||||||
|
Klaus Post <klauspost@gmail.com> |
||||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com> |
||||||
|
Sebastien Binet <seb.binet@gmail.com> |
@ -0,0 +1,41 @@ |
|||||||
|
# This is the official list of people who can contribute |
||||||
|
# (and typically have contributed) code to the Snappy-Go repository. |
||||||
|
# The AUTHORS file lists the copyright holders; this file |
||||||
|
# lists people. For example, Google employees are listed here |
||||||
|
# but not in AUTHORS, because Google holds the copyright. |
||||||
|
# |
||||||
|
# The submission process automatically checks to make sure |
||||||
|
# that people submitting code are listed in this file (by email address). |
||||||
|
# |
||||||
|
# Names should be added to this file only after verifying that |
||||||
|
# the individual or the individual's organization has agreed to |
||||||
|
# the appropriate Contributor License Agreement, found here: |
||||||
|
# |
||||||
|
# http://code.google.com/legal/individual-cla-v1.0.html |
||||||
|
# http://code.google.com/legal/corporate-cla-v1.0.html |
||||||
|
# |
||||||
|
# The agreement for individuals can be filled out on the web. |
||||||
|
# |
||||||
|
# When adding J Random Contributor's name to this file, |
||||||
|
# either J's name or J's organization's name should be |
||||||
|
# added to the AUTHORS file, depending on whether the |
||||||
|
# individual or corporate CLA was used. |
||||||
|
|
||||||
|
# Names should be added to this file like so: |
||||||
|
# Name <email address> |
||||||
|
|
||||||
|
# Please keep the list sorted. |
||||||
|
|
||||||
|
Alex Legg <alexlegg@google.com> |
||||||
|
Damian Gryski <dgryski@gmail.com> |
||||||
|
Eric Buth <eric@topos.com> |
||||||
|
Jan Mercl <0xjnml@gmail.com> |
||||||
|
Jonathan Swinney <jswinney@amazon.com> |
||||||
|
Kai Backman <kaib@golang.org> |
||||||
|
Klaus Post <klauspost@gmail.com> |
||||||
|
Marc-Antoine Ruel <maruel@chromium.org> |
||||||
|
Nigel Tao <nigeltao@golang.org> |
||||||
|
Rob Pike <r@golang.org> |
||||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com> |
||||||
|
Russ Cox <rsc@golang.org> |
||||||
|
Sebastien Binet <seb.binet@gmail.com> |
@ -0,0 +1,27 @@ |
|||||||
|
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are |
||||||
|
met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright |
||||||
|
notice, this list of conditions and the following disclaimer. |
||||||
|
* Redistributions in binary form must reproduce the above |
||||||
|
copyright notice, this list of conditions and the following disclaimer |
||||||
|
in the documentation and/or other materials provided with the |
||||||
|
distribution. |
||||||
|
* Neither the name of Google Inc. nor the names of its |
||||||
|
contributors may be used to endorse or promote products derived from |
||||||
|
this software without specific prior written permission. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,107 @@ |
|||||||
|
The Snappy compression format in the Go programming language. |
||||||
|
|
||||||
|
To download and install from source: |
||||||
|
$ go get github.com/golang/snappy |
||||||
|
|
||||||
|
Unless otherwise noted, the Snappy-Go source files are distributed |
||||||
|
under the BSD-style license found in the LICENSE file. |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Benchmarks. |
||||||
|
|
||||||
|
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten |
||||||
|
or so files, the same set used by the C++ Snappy code (github.com/google/snappy |
||||||
|
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ |
||||||
|
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: |
||||||
|
|
||||||
|
"go test -test.bench=." |
||||||
|
|
||||||
|
_UFlat0-8 2.19GB/s ± 0% html |
||||||
|
_UFlat1-8 1.41GB/s ± 0% urls |
||||||
|
_UFlat2-8 23.5GB/s ± 2% jpg |
||||||
|
_UFlat3-8 1.91GB/s ± 0% jpg_200 |
||||||
|
_UFlat4-8 14.0GB/s ± 1% pdf |
||||||
|
_UFlat5-8 1.97GB/s ± 0% html4 |
||||||
|
_UFlat6-8 814MB/s ± 0% txt1 |
||||||
|
_UFlat7-8 785MB/s ± 0% txt2 |
||||||
|
_UFlat8-8 857MB/s ± 0% txt3 |
||||||
|
_UFlat9-8 719MB/s ± 1% txt4 |
||||||
|
_UFlat10-8 2.84GB/s ± 0% pb |
||||||
|
_UFlat11-8 1.05GB/s ± 0% gaviota |
||||||
|
|
||||||
|
_ZFlat0-8 1.04GB/s ± 0% html |
||||||
|
_ZFlat1-8 534MB/s ± 0% urls |
||||||
|
_ZFlat2-8 15.7GB/s ± 1% jpg |
||||||
|
_ZFlat3-8 740MB/s ± 3% jpg_200 |
||||||
|
_ZFlat4-8 9.20GB/s ± 1% pdf |
||||||
|
_ZFlat5-8 991MB/s ± 0% html4 |
||||||
|
_ZFlat6-8 379MB/s ± 0% txt1 |
||||||
|
_ZFlat7-8 352MB/s ± 0% txt2 |
||||||
|
_ZFlat8-8 396MB/s ± 1% txt3 |
||||||
|
_ZFlat9-8 327MB/s ± 1% txt4 |
||||||
|
_ZFlat10-8 1.33GB/s ± 1% pb |
||||||
|
_ZFlat11-8 605MB/s ± 1% gaviota |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"go test -test.bench=. -tags=noasm" |
||||||
|
|
||||||
|
_UFlat0-8 621MB/s ± 2% html |
||||||
|
_UFlat1-8 494MB/s ± 1% urls |
||||||
|
_UFlat2-8 23.2GB/s ± 1% jpg |
||||||
|
_UFlat3-8 1.12GB/s ± 1% jpg_200 |
||||||
|
_UFlat4-8 4.35GB/s ± 1% pdf |
||||||
|
_UFlat5-8 609MB/s ± 0% html4 |
||||||
|
_UFlat6-8 296MB/s ± 0% txt1 |
||||||
|
_UFlat7-8 288MB/s ± 0% txt2 |
||||||
|
_UFlat8-8 309MB/s ± 1% txt3 |
||||||
|
_UFlat9-8 280MB/s ± 1% txt4 |
||||||
|
_UFlat10-8 753MB/s ± 0% pb |
||||||
|
_UFlat11-8 400MB/s ± 0% gaviota |
||||||
|
|
||||||
|
_ZFlat0-8 409MB/s ± 1% html |
||||||
|
_ZFlat1-8 250MB/s ± 1% urls |
||||||
|
_ZFlat2-8 12.3GB/s ± 1% jpg |
||||||
|
_ZFlat3-8 132MB/s ± 0% jpg_200 |
||||||
|
_ZFlat4-8 2.92GB/s ± 0% pdf |
||||||
|
_ZFlat5-8 405MB/s ± 1% html4 |
||||||
|
_ZFlat6-8 179MB/s ± 1% txt1 |
||||||
|
_ZFlat7-8 170MB/s ± 1% txt2 |
||||||
|
_ZFlat8-8 189MB/s ± 1% txt3 |
||||||
|
_ZFlat9-8 164MB/s ± 1% txt4 |
||||||
|
_ZFlat10-8 479MB/s ± 1% pb |
||||||
|
_ZFlat11-8 270MB/s ± 1% gaviota |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here |
||||||
|
are the numbers from C++ Snappy's |
||||||
|
|
||||||
|
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log |
||||||
|
|
||||||
|
BM_UFlat/0 2.4GB/s html |
||||||
|
BM_UFlat/1 1.4GB/s urls |
||||||
|
BM_UFlat/2 21.8GB/s jpg |
||||||
|
BM_UFlat/3 1.5GB/s jpg_200 |
||||||
|
BM_UFlat/4 13.3GB/s pdf |
||||||
|
BM_UFlat/5 2.1GB/s html4 |
||||||
|
BM_UFlat/6 1.0GB/s txt1 |
||||||
|
BM_UFlat/7 959.4MB/s txt2 |
||||||
|
BM_UFlat/8 1.0GB/s txt3 |
||||||
|
BM_UFlat/9 864.5MB/s txt4 |
||||||
|
BM_UFlat/10 2.9GB/s pb |
||||||
|
BM_UFlat/11 1.2GB/s gaviota |
||||||
|
|
||||||
|
BM_ZFlat/0 944.3MB/s html (22.31 %) |
||||||
|
BM_ZFlat/1 501.6MB/s urls (47.78 %) |
||||||
|
BM_ZFlat/2 14.3GB/s jpg (99.95 %) |
||||||
|
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) |
||||||
|
BM_ZFlat/4 8.3GB/s pdf (83.30 %) |
||||||
|
BM_ZFlat/5 903.5MB/s html4 (22.52 %) |
||||||
|
BM_ZFlat/6 336.0MB/s txt1 (57.88 %) |
||||||
|
BM_ZFlat/7 312.3MB/s txt2 (61.91 %) |
||||||
|
BM_ZFlat/8 353.1MB/s txt3 (54.99 %) |
||||||
|
BM_ZFlat/9 289.9MB/s txt4 (66.26 %) |
||||||
|
BM_ZFlat/10 1.2GB/s pb (19.68 %) |
||||||
|
BM_ZFlat/11 527.4MB/s gaviota (37.72 %) |
@ -0,0 +1,264 @@ |
|||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package snappy |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
"errors" |
||||||
|
"io" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
// ErrCorrupt reports that the input is invalid.
|
||||||
|
ErrCorrupt = errors.New("snappy: corrupt input") |
||||||
|
// ErrTooLarge reports that the uncompressed length is too large.
|
||||||
|
ErrTooLarge = errors.New("snappy: decoded block is too large") |
||||||
|
// ErrUnsupported reports that the input isn't supported.
|
||||||
|
ErrUnsupported = errors.New("snappy: unsupported input") |
||||||
|
|
||||||
|
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") |
||||||
|
) |
||||||
|
|
||||||
|
// DecodedLen returns the length of the decoded block.
|
||||||
|
func DecodedLen(src []byte) (int, error) { |
||||||
|
v, _, err := decodedLen(src) |
||||||
|
return v, err |
||||||
|
} |
||||||
|
|
||||||
|
// decodedLen returns the length of the decoded block and the number of bytes
|
||||||
|
// that the length header occupied.
|
||||||
|
func decodedLen(src []byte) (blockLen, headerLen int, err error) { |
||||||
|
v, n := binary.Uvarint(src) |
||||||
|
if n <= 0 || v > 0xffffffff { |
||||||
|
return 0, 0, ErrCorrupt |
||||||
|
} |
||||||
|
|
||||||
|
const wordSize = 32 << (^uint(0) >> 32 & 1) |
||||||
|
if wordSize == 32 && v > 0x7fffffff { |
||||||
|
return 0, 0, ErrTooLarge |
||||||
|
} |
||||||
|
return int(v), n, nil |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
decodeErrCodeCorrupt = 1 |
||||||
|
decodeErrCodeUnsupportedLiteralLength = 2 |
||||||
|
) |
||||||
|
|
||||||
|
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
//
|
||||||
|
// Decode handles the Snappy block format, not the Snappy stream format.
|
||||||
|
func Decode(dst, src []byte) ([]byte, error) { |
||||||
|
dLen, s, err := decodedLen(src) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if dLen <= len(dst) { |
||||||
|
dst = dst[:dLen] |
||||||
|
} else { |
||||||
|
dst = make([]byte, dLen) |
||||||
|
} |
||||||
|
switch decode(dst, src[s:]) { |
||||||
|
case 0: |
||||||
|
return dst, nil |
||||||
|
case decodeErrCodeUnsupportedLiteralLength: |
||||||
|
return nil, errUnsupportedLiteralLength |
||||||
|
} |
||||||
|
return nil, ErrCorrupt |
||||||
|
} |
||||||
|
|
||||||
|
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||||
|
// format described at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
func NewReader(r io.Reader) *Reader { |
||||||
|
return &Reader{ |
||||||
|
r: r, |
||||||
|
decoded: make([]byte, maxBlockSize), |
||||||
|
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||||
|
//
|
||||||
|
// Reader handles the Snappy stream format, not the Snappy block format.
|
||||||
|
type Reader struct { |
||||||
|
r io.Reader |
||||||
|
err error |
||||||
|
decoded []byte |
||||||
|
buf []byte |
||||||
|
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||||
|
i, j int |
||||||
|
readHeader bool |
||||||
|
} |
||||||
|
|
||||||
|
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||||
|
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||||
|
// a new one.
|
||||||
|
func (r *Reader) Reset(reader io.Reader) { |
||||||
|
r.r = reader |
||||||
|
r.err = nil |
||||||
|
r.i = 0 |
||||||
|
r.j = 0 |
||||||
|
r.readHeader = false |
||||||
|
} |
||||||
|
|
||||||
|
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { |
||||||
|
if _, r.err = io.ReadFull(r.r, p); r.err != nil { |
||||||
|
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { |
||||||
|
r.err = ErrCorrupt |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
func (r *Reader) fill() error { |
||||||
|
for r.i >= r.j { |
||||||
|
if !r.readFull(r.buf[:4], true) { |
||||||
|
return r.err |
||||||
|
} |
||||||
|
chunkType := r.buf[0] |
||||||
|
if !r.readHeader { |
||||||
|
if chunkType != chunkTypeStreamIdentifier { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
r.readHeader = true |
||||||
|
} |
||||||
|
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 |
||||||
|
if chunkLen > len(r.buf) { |
||||||
|
r.err = ErrUnsupported |
||||||
|
return r.err |
||||||
|
} |
||||||
|
|
||||||
|
// The chunk types are specified at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
switch chunkType { |
||||||
|
case chunkTypeCompressedData: |
||||||
|
// Section 4.2. Compressed data (chunk type 0x00).
|
||||||
|
if chunkLen < checksumSize { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
buf := r.buf[:chunkLen] |
||||||
|
if !r.readFull(buf, false) { |
||||||
|
return r.err |
||||||
|
} |
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 |
||||||
|
buf = buf[checksumSize:] |
||||||
|
|
||||||
|
n, err := DecodedLen(buf) |
||||||
|
if err != nil { |
||||||
|
r.err = err |
||||||
|
return r.err |
||||||
|
} |
||||||
|
if n > len(r.decoded) { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
if _, err := Decode(r.decoded, buf); err != nil { |
||||||
|
r.err = err |
||||||
|
return r.err |
||||||
|
} |
||||||
|
if crc(r.decoded[:n]) != checksum { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
r.i, r.j = 0, n |
||||||
|
continue |
||||||
|
|
||||||
|
case chunkTypeUncompressedData: |
||||||
|
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||||
|
if chunkLen < checksumSize { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
buf := r.buf[:checksumSize] |
||||||
|
if !r.readFull(buf, false) { |
||||||
|
return r.err |
||||||
|
} |
||||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 |
||||||
|
// Read directly into r.decoded instead of via r.buf.
|
||||||
|
n := chunkLen - checksumSize |
||||||
|
if n > len(r.decoded) { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
if !r.readFull(r.decoded[:n], false) { |
||||||
|
return r.err |
||||||
|
} |
||||||
|
if crc(r.decoded[:n]) != checksum { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
r.i, r.j = 0, n |
||||||
|
continue |
||||||
|
|
||||||
|
case chunkTypeStreamIdentifier: |
||||||
|
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||||
|
if chunkLen != len(magicBody) { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
if !r.readFull(r.buf[:len(magicBody)], false) { |
||||||
|
return r.err |
||||||
|
} |
||||||
|
for i := 0; i < len(magicBody); i++ { |
||||||
|
if r.buf[i] != magicBody[i] { |
||||||
|
r.err = ErrCorrupt |
||||||
|
return r.err |
||||||
|
} |
||||||
|
} |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
if chunkType <= 0x7f { |
||||||
|
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||||
|
r.err = ErrUnsupported |
||||||
|
return r.err |
||||||
|
} |
||||||
|
// Section 4.4 Padding (chunk type 0xfe).
|
||||||
|
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||||
|
if !r.readFull(r.buf[:chunkLen], false) { |
||||||
|
return r.err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Read satisfies the io.Reader interface.
|
||||||
|
func (r *Reader) Read(p []byte) (int, error) { |
||||||
|
if r.err != nil { |
||||||
|
return 0, r.err |
||||||
|
} |
||||||
|
|
||||||
|
if err := r.fill(); err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
|
||||||
|
n := copy(p, r.decoded[r.i:r.j]) |
||||||
|
r.i += n |
||||||
|
return n, nil |
||||||
|
} |
||||||
|
|
||||||
|
// ReadByte satisfies the io.ByteReader interface.
|
||||||
|
func (r *Reader) ReadByte() (byte, error) { |
||||||
|
if r.err != nil { |
||||||
|
return 0, r.err |
||||||
|
} |
||||||
|
|
||||||
|
if err := r.fill(); err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
|
||||||
|
c := r.decoded[r.i] |
||||||
|
r.i++ |
||||||
|
return c, nil |
||||||
|
} |
@ -0,0 +1,490 @@ |
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved. |
||||||
|
// Use of this source code is governed by a BSD-style |
||||||
|
// license that can be found in the LICENSE file. |
||||||
|
|
||||||
|
// +build !appengine |
||||||
|
// +build gc |
||||||
|
// +build !noasm |
||||||
|
|
||||||
|
#include "textflag.h" |
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in decode_other.go, except |
||||||
|
// where marked with a "!!!". |
||||||
|
|
||||||
|
// func decode(dst, src []byte) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The non-zero stack size is only to |
||||||
|
// spill registers and push args when issuing a CALL. The register allocation: |
||||||
|
// - AX scratch |
||||||
|
// - BX scratch |
||||||
|
// - CX length or x |
||||||
|
// - DX offset |
||||||
|
// - SI &src[s] |
||||||
|
// - DI &dst[d] |
||||||
|
// + R8 dst_base |
||||||
|
// + R9 dst_len |
||||||
|
// + R10 dst_base + dst_len |
||||||
|
// + R11 src_base |
||||||
|
// + R12 src_len |
||||||
|
// + R13 src_base + src_len |
||||||
|
// - R14 used by doCopy |
||||||
|
// - R15 used by doCopy |
||||||
|
// |
||||||
|
// The registers R8-R13 (marked with a "+") are set at the start of the |
||||||
|
// function, and after a CALL returns, and are not otherwise modified. |
||||||
|
// |
||||||
|
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. |
||||||
|
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. |
||||||
|
TEXT ·decode(SB), NOSPLIT, $48-56 |
||||||
|
// Initialize SI, DI and R8-R13. |
||||||
|
MOVQ dst_base+0(FP), R8 |
||||||
|
MOVQ dst_len+8(FP), R9 |
||||||
|
MOVQ R8, DI |
||||||
|
MOVQ R8, R10 |
||||||
|
ADDQ R9, R10 |
||||||
|
MOVQ src_base+24(FP), R11 |
||||||
|
MOVQ src_len+32(FP), R12 |
||||||
|
MOVQ R11, SI |
||||||
|
MOVQ R11, R13 |
||||||
|
ADDQ R12, R13 |
||||||
|
|
||||||
|
loop: |
||||||
|
// for s < len(src) |
||||||
|
CMPQ SI, R13 |
||||||
|
JEQ end |
||||||
|
|
||||||
|
// CX = uint32(src[s]) |
||||||
|
// |
||||||
|
// switch src[s] & 0x03 |
||||||
|
MOVBLZX (SI), CX |
||||||
|
MOVL CX, BX |
||||||
|
ANDL $3, BX |
||||||
|
CMPL BX, $1 |
||||||
|
JAE tagCopy |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// The code below handles literal tags. |
||||||
|
|
||||||
|
// case tagLiteral: |
||||||
|
// x := uint32(src[s] >> 2) |
||||||
|
// switch |
||||||
|
SHRL $2, CX |
||||||
|
CMPL CX, $60 |
||||||
|
JAE tagLit60Plus |
||||||
|
|
||||||
|
// case x < 60: |
||||||
|
// s++ |
||||||
|
INCQ SI |
||||||
|
|
||||||
|
doLit: |
||||||
|
// This is the end of the inner "switch", when we have a literal tag. |
||||||
|
// |
||||||
|
// We assume that CX == x and x fits in a uint32, where x is the variable |
||||||
|
// used in the pure Go decode_other.go code. |
||||||
|
|
||||||
|
// length = int(x) + 1 |
||||||
|
// |
||||||
|
// Unlike the pure Go code, we don't need to check if length <= 0 because |
||||||
|
// CX can hold 64 bits, so the increment cannot overflow. |
||||||
|
INCQ CX |
||||||
|
|
||||||
|
// Prepare to check if copying length bytes will run past the end of dst or |
||||||
|
// src. |
||||||
|
// |
||||||
|
// AX = len(dst) - d |
||||||
|
// BX = len(src) - s |
||||||
|
MOVQ R10, AX |
||||||
|
SUBQ DI, AX |
||||||
|
MOVQ R13, BX |
||||||
|
SUBQ SI, BX |
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) copies. |
||||||
|
// |
||||||
|
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { |
||||||
|
// goto callMemmove // Fall back on calling runtime·memmove. |
||||||
|
// } |
||||||
|
// |
||||||
|
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s |
||||||
|
// against 21 instead of 16, because it cannot assume that all of its input |
||||||
|
// is contiguous in memory and so it needs to leave enough source bytes to |
||||||
|
// read the next tag without refilling buffers, but Go's Decode assumes |
||||||
|
// contiguousness (the src argument is a []byte). |
||||||
|
CMPQ CX, $16 |
||||||
|
JGT callMemmove |
||||||
|
CMPQ AX, $16 |
||||||
|
JLT callMemmove |
||||||
|
CMPQ BX, $16 |
||||||
|
JLT callMemmove |
||||||
|
|
||||||
|
// !!! Implement the copy from src to dst as a 16-byte load and store. |
||||||
|
// (Decode's documentation says that dst and src must not overlap.) |
||||||
|
// |
||||||
|
// This always copies 16 bytes, instead of only length bytes, but that's |
||||||
|
// OK. If the input is a valid Snappy encoding then subsequent iterations |
||||||
|
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a |
||||||
|
// non-nil error), so the overrun will be ignored. |
||||||
|
// |
||||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or |
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as |
||||||
|
// effective on architectures that are fussier about alignment. |
||||||
|
MOVOU 0(SI), X0 |
||||||
|
MOVOU X0, 0(DI) |
||||||
|
|
||||||
|
// d += length |
||||||
|
// s += length |
||||||
|
ADDQ CX, DI |
||||||
|
ADDQ CX, SI |
||||||
|
JMP loop |
||||||
|
|
||||||
|
callMemmove: |
||||||
|
// if length > len(dst)-d || length > len(src)-s { etc } |
||||||
|
CMPQ CX, AX |
||||||
|
JGT errCorrupt |
||||||
|
CMPQ CX, BX |
||||||
|
JGT errCorrupt |
||||||
|
|
||||||
|
// copy(dst[d:], src[s:s+length]) |
||||||
|
// |
||||||
|
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push |
||||||
|
// DI, SI and CX as arguments. Coincidentally, we also need to spill those |
||||||
|
// three registers to the stack, to save local variables across the CALL. |
||||||
|
MOVQ DI, 0(SP) |
||||||
|
MOVQ SI, 8(SP) |
||||||
|
MOVQ CX, 16(SP) |
||||||
|
MOVQ DI, 24(SP) |
||||||
|
MOVQ SI, 32(SP) |
||||||
|
MOVQ CX, 40(SP) |
||||||
|
CALL runtime·memmove(SB) |
||||||
|
|
||||||
|
// Restore local variables: unspill registers from the stack and |
||||||
|
// re-calculate R8-R13. |
||||||
|
MOVQ 24(SP), DI |
||||||
|
MOVQ 32(SP), SI |
||||||
|
MOVQ 40(SP), CX |
||||||
|
MOVQ dst_base+0(FP), R8 |
||||||
|
MOVQ dst_len+8(FP), R9 |
||||||
|
MOVQ R8, R10 |
||||||
|
ADDQ R9, R10 |
||||||
|
MOVQ src_base+24(FP), R11 |
||||||
|
MOVQ src_len+32(FP), R12 |
||||||
|
MOVQ R11, R13 |
||||||
|
ADDQ R12, R13 |
||||||
|
|
||||||
|
// d += length |
||||||
|
// s += length |
||||||
|
ADDQ CX, DI |
||||||
|
ADDQ CX, SI |
||||||
|
JMP loop |
||||||
|
|
||||||
|
tagLit60Plus: |
||||||
|
// !!! This fragment does the |
||||||
|
// |
||||||
|
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||||
|
// |
||||||
|
// checks. In the asm version, we code it once instead of once per switch case. |
||||||
|
ADDQ CX, SI |
||||||
|
SUBQ $58, SI |
||||||
|
MOVQ SI, BX |
||||||
|
SUBQ R11, BX |
||||||
|
CMPQ BX, R12 |
||||||
|
JA errCorrupt |
||||||
|
|
||||||
|
// case x == 60: |
||||||
|
CMPL CX, $61 |
||||||
|
JEQ tagLit61 |
||||||
|
JA tagLit62Plus |
||||||
|
|
||||||
|
// x = uint32(src[s-1]) |
||||||
|
MOVBLZX -1(SI), CX |
||||||
|
JMP doLit |
||||||
|
|
||||||
|
tagLit61: |
||||||
|
// case x == 61: |
||||||
|
// x = uint32(src[s-2]) | uint32(src[s-1])<<8 |
||||||
|
MOVWLZX -2(SI), CX |
||||||
|
JMP doLit |
||||||
|
|
||||||
|
tagLit62Plus: |
||||||
|
CMPL CX, $62 |
||||||
|
JA tagLit63 |
||||||
|
|
||||||
|
// case x == 62: |
||||||
|
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 |
||||||
|
MOVWLZX -3(SI), CX |
||||||
|
MOVBLZX -1(SI), BX |
||||||
|
SHLL $16, BX |
||||||
|
ORL BX, CX |
||||||
|
JMP doLit |
||||||
|
|
||||||
|
tagLit63: |
||||||
|
// case x == 63: |
||||||
|
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 |
||||||
|
MOVL -4(SI), CX |
||||||
|
JMP doLit |
||||||
|
|
||||||
|
// The code above handles literal tags. |
||||||
|
// ---------------------------------------- |
||||||
|
// The code below handles copy tags. |
||||||
|
|
||||||
|
tagCopy4: |
||||||
|
// case tagCopy4: |
||||||
|
// s += 5 |
||||||
|
ADDQ $5, SI |
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc } |
||||||
|
MOVQ SI, BX |
||||||
|
SUBQ R11, BX |
||||||
|
CMPQ BX, R12 |
||||||
|
JA errCorrupt |
||||||
|
|
||||||
|
// length = 1 + int(src[s-5])>>2 |
||||||
|
SHRQ $2, CX |
||||||
|
INCQ CX |
||||||
|
|
||||||
|
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) |
||||||
|
MOVLQZX -4(SI), DX |
||||||
|
JMP doCopy |
||||||
|
|
||||||
|
tagCopy2: |
||||||
|
// case tagCopy2: |
||||||
|
// s += 3 |
||||||
|
ADDQ $3, SI |
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc } |
||||||
|
MOVQ SI, BX |
||||||
|
SUBQ R11, BX |
||||||
|
CMPQ BX, R12 |
||||||
|
JA errCorrupt |
||||||
|
|
||||||
|
// length = 1 + int(src[s-3])>>2 |
||||||
|
SHRQ $2, CX |
||||||
|
INCQ CX |
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) |
||||||
|
MOVWQZX -2(SI), DX |
||||||
|
JMP doCopy |
||||||
|
|
||||||
|
tagCopy: |
||||||
|
// We have a copy tag. We assume that: |
||||||
|
// - BX == src[s] & 0x03 |
||||||
|
// - CX == src[s] |
||||||
|
CMPQ BX, $2 |
||||||
|
JEQ tagCopy2 |
||||||
|
JA tagCopy4 |
||||||
|
|
||||||
|
// case tagCopy1: |
||||||
|
// s += 2 |
||||||
|
ADDQ $2, SI |
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc } |
||||||
|
MOVQ SI, BX |
||||||
|
SUBQ R11, BX |
||||||
|
CMPQ BX, R12 |
||||||
|
JA errCorrupt |
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) |
||||||
|
MOVQ CX, DX |
||||||
|
ANDQ $0xe0, DX |
||||||
|
SHLQ $3, DX |
||||||
|
MOVBQZX -1(SI), BX |
||||||
|
ORQ BX, DX |
||||||
|
|
||||||
|
// length = 4 + int(src[s-2])>>2&0x7 |
||||||
|
SHRQ $2, CX |
||||||
|
ANDQ $7, CX |
||||||
|
ADDQ $4, CX |
||||||
|
|
||||||
|
doCopy: |
||||||
|
// This is the end of the outer "switch", when we have a copy tag. |
||||||
|
// |
||||||
|
// We assume that: |
||||||
|
// - CX == length && CX > 0 |
||||||
|
// - DX == offset |
||||||
|
|
||||||
|
// if offset <= 0 { etc } |
||||||
|
CMPQ DX, $0 |
||||||
|
JLE errCorrupt |
||||||
|
|
||||||
|
// if d < offset { etc } |
||||||
|
MOVQ DI, BX |
||||||
|
SUBQ R8, BX |
||||||
|
CMPQ BX, DX |
||||||
|
JLT errCorrupt |
||||||
|
|
||||||
|
// if length > len(dst)-d { etc } |
||||||
|
MOVQ R10, BX |
||||||
|
SUBQ DI, BX |
||||||
|
CMPQ CX, BX |
||||||
|
JGT errCorrupt |
||||||
|
|
||||||
|
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||||
|
// |
||||||
|
// Set: |
||||||
|
// - R14 = len(dst)-d |
||||||
|
// - R15 = &dst[d-offset] |
||||||
|
MOVQ R10, R14 |
||||||
|
SUBQ DI, R14 |
||||||
|
MOVQ DI, R15 |
||||||
|
SUBQ DX, R15 |
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) forward copies. |
||||||
|
// |
||||||
|
// First, try using two 8-byte load/stores, similar to the doLit technique |
||||||
|
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is |
||||||
|
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores |
||||||
|
// and not one 16-byte load/store, and the first store has to be before the |
||||||
|
// second load, due to the overlap if offset is in the range [8, 16). |
||||||
|
// |
||||||
|
// if length > 16 || offset < 8 || len(dst)-d < 16 { |
||||||
|
// goto slowForwardCopy |
||||||
|
// } |
||||||
|
// copy 16 bytes |
||||||
|
// d += length |
||||||
|
CMPQ CX, $16 |
||||||
|
JGT slowForwardCopy |
||||||
|
CMPQ DX, $8 |
||||||
|
JLT slowForwardCopy |
||||||
|
CMPQ R14, $16 |
||||||
|
JLT slowForwardCopy |
||||||
|
MOVQ 0(R15), AX |
||||||
|
MOVQ AX, 0(DI) |
||||||
|
MOVQ 8(R15), BX |
||||||
|
MOVQ BX, 8(DI) |
||||||
|
ADDQ CX, DI |
||||||
|
JMP loop |
||||||
|
|
||||||
|
slowForwardCopy: |
||||||
|
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we |
||||||
|
// can still try 8-byte load stores, provided we can overrun up to 10 extra |
||||||
|
// bytes. As above, the overrun will be fixed up by subsequent iterations |
||||||
|
// of the outermost loop. |
||||||
|
// |
||||||
|
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its |
||||||
|
// commentary says: |
||||||
|
// |
||||||
|
// ---- |
||||||
|
// |
||||||
|
// The main part of this loop is a simple copy of eight bytes at a time |
||||||
|
// until we've copied (at least) the requested amount of bytes. However, |
||||||
|
// if d and d-offset are less than eight bytes apart (indicating a |
||||||
|
// repeating pattern of length < 8), we first need to expand the pattern in |
||||||
|
// order to get the correct results. For instance, if the buffer looks like |
||||||
|
// this, with the eight-byte <d-offset> and <d> patterns marked as |
||||||
|
// intervals: |
||||||
|
// |
||||||
|
// abxxxxxxxxxxxx |
||||||
|
// [------] d-offset |
||||||
|
// [------] d |
||||||
|
// |
||||||
|
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern |
||||||
|
// once, after which we can move <d> two bytes without moving <d-offset>: |
||||||
|
// |
||||||
|
// ababxxxxxxxxxx |
||||||
|
// [------] d-offset |
||||||
|
// [------] d |
||||||
|
// |
||||||
|
// and repeat the exercise until the two no longer overlap. |
||||||
|
// |
||||||
|
// This allows us to do very well in the special case of one single byte |
||||||
|
// repeated many times, without taking a big hit for more general cases. |
||||||
|
// |
||||||
|
// The worst case of extra writing past the end of the match occurs when |
||||||
|
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||||
|
// [0..7] and write to [4..11], whereas it was only supposed to write to |
||||||
|
// position 1. Thus, ten excess bytes. |
||||||
|
// |
||||||
|
// ---- |
||||||
|
// |
||||||
|
// That "10 byte overrun" worst case is confirmed by Go's |
||||||
|
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy |
||||||
|
// and finishSlowForwardCopy algorithm. |
||||||
|
// |
||||||
|
// if length > len(dst)-d-10 { |
||||||
|
// goto verySlowForwardCopy |
||||||
|
// } |
||||||
|
SUBQ $10, R14 |
||||||
|
CMPQ CX, R14 |
||||||
|
JGT verySlowForwardCopy |
||||||
|
|
||||||
|
makeOffsetAtLeast8: |
||||||
|
// !!! As above, expand the pattern so that offset >= 8 and we can use |
||||||
|
// 8-byte load/stores. |
||||||
|
// |
||||||
|
// for offset < 8 { |
||||||
|
// copy 8 bytes from dst[d-offset:] to dst[d:] |
||||||
|
// length -= offset |
||||||
|
// d += offset |
||||||
|
// offset += offset |
||||||
|
// // The two previous lines together means that d-offset, and therefore |
||||||
|
// // R15, is unchanged. |
||||||
|
// } |
||||||
|
CMPQ DX, $8 |
||||||
|
JGE fixUpSlowForwardCopy |
||||||
|
MOVQ (R15), BX |
||||||
|
MOVQ BX, (DI) |
||||||
|
SUBQ DX, CX |
||||||
|
ADDQ DX, DI |
||||||
|
ADDQ DX, DX |
||||||
|
JMP makeOffsetAtLeast8 |
||||||
|
|
||||||
|
fixUpSlowForwardCopy: |
||||||
|
// !!! Add length (which might be negative now) to d (implied by DI being |
||||||
|
// &dst[d]) so that d ends up at the right place when we jump back to the |
||||||
|
// top of the loop. Before we do that, though, we save DI to AX so that, if |
||||||
|
// length is positive, copying the remaining length bytes will write to the |
||||||
|
// right place. |
||||||
|
MOVQ DI, AX |
||||||
|
ADDQ CX, DI |
||||||
|
|
||||||
|
finishSlowForwardCopy: |
||||||
|
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative |
||||||
|
// length means that we overrun, but as above, that will be fixed up by |
||||||
|
// subsequent iterations of the outermost loop. |
||||||
|
CMPQ CX, $0 |
||||||
|
JLE loop |
||||||
|
MOVQ (R15), BX |
||||||
|
MOVQ BX, (AX) |
||||||
|
ADDQ $8, R15 |
||||||
|
ADDQ $8, AX |
||||||
|
SUBQ $8, CX |
||||||
|
JMP finishSlowForwardCopy |
||||||
|
|
||||||
|
verySlowForwardCopy: |
||||||
|
// verySlowForwardCopy is a simple implementation of forward copy. In C |
||||||
|
// parlance, this is a do/while loop instead of a while loop, since we know |
||||||
|
// that length > 0. In Go syntax: |
||||||
|
// |
||||||
|
// for { |
||||||
|
// dst[d] = dst[d - offset] |
||||||
|
// d++ |
||||||
|
// length-- |
||||||
|
// if length == 0 { |
||||||
|
// break |
||||||
|
// } |
||||||
|
// } |
||||||
|
MOVB (R15), BX |
||||||
|
MOVB BX, (DI) |
||||||
|
INCQ R15 |
||||||
|
INCQ DI |
||||||
|
DECQ CX |
||||||
|
JNZ verySlowForwardCopy |
||||||
|
JMP loop |
||||||
|
|
||||||
|
// The code above handles copy tags. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
end: |
||||||
|
// This is the end of the "for s < len(src)". |
||||||
|
// |
||||||
|
// if d != len(dst) { etc } |
||||||
|
CMPQ DI, R10 |
||||||
|
JNE errCorrupt |
||||||
|
|
||||||
|
// return 0 |
||||||
|
MOVQ $0, ret+48(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
errCorrupt: |
||||||
|
// return decodeErrCodeCorrupt |
||||||
|
MOVQ $1, ret+48(FP) |
||||||
|
RET |
@ -0,0 +1,494 @@ |
|||||||
|
// Copyright 2020 The Go Authors. All rights reserved. |
||||||
|
// Use of this source code is governed by a BSD-style |
||||||
|
// license that can be found in the LICENSE file. |
||||||
|
|
||||||
|
// +build !appengine |
||||||
|
// +build gc |
||||||
|
// +build !noasm |
||||||
|
|
||||||
|
#include "textflag.h" |
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in decode_other.go, except |
||||||
|
// where marked with a "!!!". |
||||||
|
|
||||||
|
// func decode(dst, src []byte) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The non-zero stack size is only to |
||||||
|
// spill registers and push args when issuing a CALL. The register allocation: |
||||||
|
// - R2 scratch |
||||||
|
// - R3 scratch |
||||||
|
// - R4 length or x |
||||||
|
// - R5 offset |
||||||
|
// - R6 &src[s] |
||||||
|
// - R7 &dst[d] |
||||||
|
// + R8 dst_base |
||||||
|
// + R9 dst_len |
||||||
|
// + R10 dst_base + dst_len |
||||||
|
// + R11 src_base |
||||||
|
// + R12 src_len |
||||||
|
// + R13 src_base + src_len |
||||||
|
// - R14 used by doCopy |
||||||
|
// - R15 used by doCopy |
||||||
|
// |
||||||
|
// The registers R8-R13 (marked with a "+") are set at the start of the |
||||||
|
// function, and after a CALL returns, and are not otherwise modified. |
||||||
|
// |
||||||
|
// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. |
||||||
|
// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. |
||||||
|
TEXT ·decode(SB), NOSPLIT, $56-56 |
||||||
|
// Initialize R6, R7 and R8-R13. |
||||||
|
MOVD dst_base+0(FP), R8 |
||||||
|
MOVD dst_len+8(FP), R9 |
||||||
|
MOVD R8, R7 |
||||||
|
MOVD R8, R10 |
||||||
|
ADD R9, R10, R10 |
||||||
|
MOVD src_base+24(FP), R11 |
||||||
|
MOVD src_len+32(FP), R12 |
||||||
|
MOVD R11, R6 |
||||||
|
MOVD R11, R13 |
||||||
|
ADD R12, R13, R13 |
||||||
|
|
||||||
|
loop: |
||||||
|
// for s < len(src) |
||||||
|
CMP R13, R6 |
||||||
|
BEQ end |
||||||
|
|
||||||
|
// R4 = uint32(src[s]) |
||||||
|
// |
||||||
|
// switch src[s] & 0x03 |
||||||
|
MOVBU (R6), R4 |
||||||
|
MOVW R4, R3 |
||||||
|
ANDW $3, R3 |
||||||
|
MOVW $1, R1 |
||||||
|
CMPW R1, R3 |
||||||
|
BGE tagCopy |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// The code below handles literal tags. |
||||||
|
|
||||||
|
// case tagLiteral: |
||||||
|
// x := uint32(src[s] >> 2) |
||||||
|
// switch |
||||||
|
MOVW $60, R1 |
||||||
|
LSRW $2, R4, R4 |
||||||
|
CMPW R4, R1 |
||||||
|
BLS tagLit60Plus |
||||||
|
|
||||||
|
// case x < 60: |
||||||
|
// s++ |
||||||
|
ADD $1, R6, R6 |
||||||
|
|
||||||
|
doLit: |
||||||
|
// This is the end of the inner "switch", when we have a literal tag. |
||||||
|
// |
||||||
|
// We assume that R4 == x and x fits in a uint32, where x is the variable |
||||||
|
// used in the pure Go decode_other.go code. |
||||||
|
|
||||||
|
// length = int(x) + 1 |
||||||
|
// |
||||||
|
// Unlike the pure Go code, we don't need to check if length <= 0 because |
||||||
|
// R4 can hold 64 bits, so the increment cannot overflow. |
||||||
|
ADD $1, R4, R4 |
||||||
|
|
||||||
|
// Prepare to check if copying length bytes will run past the end of dst or |
||||||
|
// src. |
||||||
|
// |
||||||
|
// R2 = len(dst) - d |
||||||
|
// R3 = len(src) - s |
||||||
|
MOVD R10, R2 |
||||||
|
SUB R7, R2, R2 |
||||||
|
MOVD R13, R3 |
||||||
|
SUB R6, R3, R3 |
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) copies. |
||||||
|
// |
||||||
|
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { |
||||||
|
// goto callMemmove // Fall back on calling runtime·memmove. |
||||||
|
// } |
||||||
|
// |
||||||
|
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s |
||||||
|
// against 21 instead of 16, because it cannot assume that all of its input |
||||||
|
// is contiguous in memory and so it needs to leave enough source bytes to |
||||||
|
// read the next tag without refilling buffers, but Go's Decode assumes |
||||||
|
// contiguousness (the src argument is a []byte). |
||||||
|
CMP $16, R4 |
||||||
|
BGT callMemmove |
||||||
|
CMP $16, R2 |
||||||
|
BLT callMemmove |
||||||
|
CMP $16, R3 |
||||||
|
BLT callMemmove |
||||||
|
|
||||||
|
// !!! Implement the copy from src to dst as a 16-byte load and store. |
||||||
|
// (Decode's documentation says that dst and src must not overlap.) |
||||||
|
// |
||||||
|
// This always copies 16 bytes, instead of only length bytes, but that's |
||||||
|
// OK. If the input is a valid Snappy encoding then subsequent iterations |
||||||
|
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a |
||||||
|
// non-nil error), so the overrun will be ignored. |
||||||
|
// |
||||||
|
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or |
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as |
||||||
|
// effective on architectures that are fussier about alignment. |
||||||
|
LDP 0(R6), (R14, R15) |
||||||
|
STP (R14, R15), 0(R7) |
||||||
|
|
||||||
|
// d += length |
||||||
|
// s += length |
||||||
|
ADD R4, R7, R7 |
||||||
|
ADD R4, R6, R6 |
||||||
|
B loop |
||||||
|
|
||||||
|
callMemmove: |
||||||
|
// if length > len(dst)-d || length > len(src)-s { etc } |
||||||
|
CMP R2, R4 |
||||||
|
BGT errCorrupt |
||||||
|
CMP R3, R4 |
||||||
|
BGT errCorrupt |
||||||
|
|
||||||
|
// copy(dst[d:], src[s:s+length]) |
||||||
|
// |
||||||
|
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push |
||||||
|
// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those |
||||||
|
// three registers to the stack, to save local variables across the CALL. |
||||||
|
MOVD R7, 8(RSP) |
||||||
|
MOVD R6, 16(RSP) |
||||||
|
MOVD R4, 24(RSP) |
||||||
|
MOVD R7, 32(RSP) |
||||||
|
MOVD R6, 40(RSP) |
||||||
|
MOVD R4, 48(RSP) |
||||||
|
CALL runtime·memmove(SB) |
||||||
|
|
||||||
|
// Restore local variables: unspill registers from the stack and |
||||||
|
// re-calculate R8-R13. |
||||||
|
MOVD 32(RSP), R7 |
||||||
|
MOVD 40(RSP), R6 |
||||||
|
MOVD 48(RSP), R4 |
||||||
|
MOVD dst_base+0(FP), R8 |
||||||
|
MOVD dst_len+8(FP), R9 |
||||||
|
MOVD R8, R10 |
||||||
|
ADD R9, R10, R10 |
||||||
|
MOVD src_base+24(FP), R11 |
||||||
|
MOVD src_len+32(FP), R12 |
||||||
|
MOVD R11, R13 |
||||||
|
ADD R12, R13, R13 |
||||||
|
|
||||||
|
// d += length |
||||||
|
// s += length |
||||||
|
ADD R4, R7, R7 |
||||||
|
ADD R4, R6, R6 |
||||||
|
B loop |
||||||
|
|
||||||
|
tagLit60Plus: |
||||||
|
// !!! This fragment does the |
||||||
|
// |
||||||
|
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||||
|
// |
||||||
|
// checks. In the asm version, we code it once instead of once per switch case. |
||||||
|
ADD R4, R6, R6 |
||||||
|
SUB $58, R6, R6 |
||||||
|
MOVD R6, R3 |
||||||
|
SUB R11, R3, R3 |
||||||
|
CMP R12, R3 |
||||||
|
BGT errCorrupt |
||||||
|
|
||||||
|
// case x == 60: |
||||||
|
MOVW $61, R1 |
||||||
|
CMPW R1, R4 |
||||||
|
BEQ tagLit61 |
||||||
|
BGT tagLit62Plus |
||||||
|
|
||||||
|
// x = uint32(src[s-1]) |
||||||
|
MOVBU -1(R6), R4 |
||||||
|
B doLit |
||||||
|
|
||||||
|
tagLit61: |
||||||
|
// case x == 61: |
||||||
|
// x = uint32(src[s-2]) | uint32(src[s-1])<<8 |
||||||
|
MOVHU -2(R6), R4 |
||||||
|
B doLit |
||||||
|
|
||||||
|
tagLit62Plus: |
||||||
|
CMPW $62, R4 |
||||||
|
BHI tagLit63 |
||||||
|
|
||||||
|
// case x == 62: |
||||||
|
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 |
||||||
|
MOVHU -3(R6), R4 |
||||||
|
MOVBU -1(R6), R3 |
||||||
|
ORR R3<<16, R4 |
||||||
|
B doLit |
||||||
|
|
||||||
|
tagLit63: |
||||||
|
// case x == 63: |
||||||
|
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 |
||||||
|
MOVWU -4(R6), R4 |
||||||
|
B doLit |
||||||
|
|
||||||
|
// The code above handles literal tags. |
||||||
|
// ---------------------------------------- |
||||||
|
// The code below handles copy tags. |
||||||
|
|
||||||
|
tagCopy4: |
||||||
|
// case tagCopy4: |
||||||
|
// s += 5 |
||||||
|
ADD $5, R6, R6 |
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc } |
||||||
|
MOVD R6, R3 |
||||||
|
SUB R11, R3, R3 |
||||||
|
CMP R12, R3 |
||||||
|
BGT errCorrupt |
||||||
|
|
||||||
|
// length = 1 + int(src[s-5])>>2 |
||||||
|
MOVD $1, R1 |
||||||
|
ADD R4>>2, R1, R4 |
||||||
|
|
||||||
|
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) |
||||||
|
MOVWU -4(R6), R5 |
||||||
|
B doCopy |
||||||
|
|
||||||
|
tagCopy2: |
||||||
|
// case tagCopy2: |
||||||
|
// s += 3 |
||||||
|
ADD $3, R6, R6 |
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc } |
||||||
|
MOVD R6, R3 |
||||||
|
SUB R11, R3, R3 |
||||||
|
CMP R12, R3 |
||||||
|
BGT errCorrupt |
||||||
|
|
||||||
|
// length = 1 + int(src[s-3])>>2 |
||||||
|
MOVD $1, R1 |
||||||
|
ADD R4>>2, R1, R4 |
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) |
||||||
|
MOVHU -2(R6), R5 |
||||||
|
B doCopy |
||||||
|
|
||||||
|
tagCopy: |
||||||
|
// We have a copy tag. We assume that: |
||||||
|
// - R3 == src[s] & 0x03 |
||||||
|
// - R4 == src[s] |
||||||
|
CMP $2, R3 |
||||||
|
BEQ tagCopy2 |
||||||
|
BGT tagCopy4 |
||||||
|
|
||||||
|
// case tagCopy1: |
||||||
|
// s += 2 |
||||||
|
ADD $2, R6, R6 |
||||||
|
|
||||||
|
// if uint(s) > uint(len(src)) { etc } |
||||||
|
MOVD R6, R3 |
||||||
|
SUB R11, R3, R3 |
||||||
|
CMP R12, R3 |
||||||
|
BGT errCorrupt |
||||||
|
|
||||||
|
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) |
||||||
|
MOVD R4, R5 |
||||||
|
AND $0xe0, R5 |
||||||
|
MOVBU -1(R6), R3 |
||||||
|
ORR R5<<3, R3, R5 |
||||||
|
|
||||||
|
// length = 4 + int(src[s-2])>>2&0x7 |
||||||
|
MOVD $7, R1 |
||||||
|
AND R4>>2, R1, R4 |
||||||
|
ADD $4, R4, R4 |
||||||
|
|
||||||
|
doCopy: |
||||||
|
// This is the end of the outer "switch", when we have a copy tag. |
||||||
|
// |
||||||
|
// We assume that: |
||||||
|
// - R4 == length && R4 > 0 |
||||||
|
// - R5 == offset |
||||||
|
|
||||||
|
// if offset <= 0 { etc } |
||||||
|
MOVD $0, R1 |
||||||
|
CMP R1, R5 |
||||||
|
BLE errCorrupt |
||||||
|
|
||||||
|
// if d < offset { etc } |
||||||
|
MOVD R7, R3 |
||||||
|
SUB R8, R3, R3 |
||||||
|
CMP R5, R3 |
||||||
|
BLT errCorrupt |
||||||
|
|
||||||
|
// if length > len(dst)-d { etc } |
||||||
|
MOVD R10, R3 |
||||||
|
SUB R7, R3, R3 |
||||||
|
CMP R3, R4 |
||||||
|
BGT errCorrupt |
||||||
|
|
||||||
|
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||||
|
// |
||||||
|
// Set: |
||||||
|
// - R14 = len(dst)-d |
||||||
|
// - R15 = &dst[d-offset] |
||||||
|
MOVD R10, R14 |
||||||
|
SUB R7, R14, R14 |
||||||
|
MOVD R7, R15 |
||||||
|
SUB R5, R15, R15 |
||||||
|
|
||||||
|
// !!! Try a faster technique for short (16 or fewer bytes) forward copies. |
||||||
|
// |
||||||
|
// First, try using two 8-byte load/stores, similar to the doLit technique |
||||||
|
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is |
||||||
|
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores |
||||||
|
// and not one 16-byte load/store, and the first store has to be before the |
||||||
|
// second load, due to the overlap if offset is in the range [8, 16). |
||||||
|
// |
||||||
|
// if length > 16 || offset < 8 || len(dst)-d < 16 { |
||||||
|
// goto slowForwardCopy |
||||||
|
// } |
||||||
|
// copy 16 bytes |
||||||
|
// d += length |
||||||
|
CMP $16, R4 |
||||||
|
BGT slowForwardCopy |
||||||
|
CMP $8, R5 |
||||||
|
BLT slowForwardCopy |
||||||
|
CMP $16, R14 |
||||||
|
BLT slowForwardCopy |
||||||
|
MOVD 0(R15), R2 |
||||||
|
MOVD R2, 0(R7) |
||||||
|
MOVD 8(R15), R3 |
||||||
|
MOVD R3, 8(R7) |
||||||
|
ADD R4, R7, R7 |
||||||
|
B loop |
||||||
|
|
||||||
|
slowForwardCopy: |
||||||
|
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we |
||||||
|
// can still try 8-byte load stores, provided we can overrun up to 10 extra |
||||||
|
// bytes. As above, the overrun will be fixed up by subsequent iterations |
||||||
|
// of the outermost loop. |
||||||
|
// |
||||||
|
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its |
||||||
|
// commentary says: |
||||||
|
// |
||||||
|
// ---- |
||||||
|
// |
||||||
|
// The main part of this loop is a simple copy of eight bytes at a time |
||||||
|
// until we've copied (at least) the requested amount of bytes. However, |
||||||
|
// if d and d-offset are less than eight bytes apart (indicating a |
||||||
|
// repeating pattern of length < 8), we first need to expand the pattern in |
||||||
|
// order to get the correct results. For instance, if the buffer looks like |
||||||
|
// this, with the eight-byte <d-offset> and <d> patterns marked as |
||||||
|
// intervals: |
||||||
|
// |
||||||
|
// abxxxxxxxxxxxx |
||||||
|
// [------] d-offset |
||||||
|
// [------] d |
||||||
|
// |
||||||
|
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern |
||||||
|
// once, after which we can move <d> two bytes without moving <d-offset>: |
||||||
|
// |
||||||
|
// ababxxxxxxxxxx |
||||||
|
// [------] d-offset |
||||||
|
// [------] d |
||||||
|
// |
||||||
|
// and repeat the exercise until the two no longer overlap. |
||||||
|
// |
||||||
|
// This allows us to do very well in the special case of one single byte |
||||||
|
// repeated many times, without taking a big hit for more general cases. |
||||||
|
// |
||||||
|
// The worst case of extra writing past the end of the match occurs when |
||||||
|
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||||
|
// [0..7] and write to [4..11], whereas it was only supposed to write to |
||||||
|
// position 1. Thus, ten excess bytes. |
||||||
|
// |
||||||
|
// ---- |
||||||
|
// |
||||||
|
// That "10 byte overrun" worst case is confirmed by Go's |
||||||
|
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy |
||||||
|
// and finishSlowForwardCopy algorithm. |
||||||
|
// |
||||||
|
// if length > len(dst)-d-10 { |
||||||
|
// goto verySlowForwardCopy |
||||||
|
// } |
||||||
|
SUB $10, R14, R14 |
||||||
|
CMP R14, R4 |
||||||
|
BGT verySlowForwardCopy |
||||||
|
|
||||||
|
makeOffsetAtLeast8: |
||||||
|
// !!! As above, expand the pattern so that offset >= 8 and we can use |
||||||
|
// 8-byte load/stores. |
||||||
|
// |
||||||
|
// for offset < 8 { |
||||||
|
// copy 8 bytes from dst[d-offset:] to dst[d:] |
||||||
|
// length -= offset |
||||||
|
// d += offset |
||||||
|
// offset += offset |
||||||
|
// // The two previous lines together means that d-offset, and therefore |
||||||
|
// // R15, is unchanged. |
||||||
|
// } |
||||||
|
CMP $8, R5 |
||||||
|
BGE fixUpSlowForwardCopy |
||||||
|
MOVD (R15), R3 |
||||||
|
MOVD R3, (R7) |
||||||
|
SUB R5, R4, R4 |
||||||
|
ADD R5, R7, R7 |
||||||
|
ADD R5, R5, R5 |
||||||
|
B makeOffsetAtLeast8 |
||||||
|
|
||||||
|
fixUpSlowForwardCopy: |
||||||
|
// !!! Add length (which might be negative now) to d (implied by R7 being |
||||||
|
// &dst[d]) so that d ends up at the right place when we jump back to the |
||||||
|
// top of the loop. Before we do that, though, we save R7 to R2 so that, if |
||||||
|
// length is positive, copying the remaining length bytes will write to the |
||||||
|
// right place. |
||||||
|
MOVD R7, R2 |
||||||
|
ADD R4, R7, R7 |
||||||
|
|
||||||
|
finishSlowForwardCopy: |
||||||
|
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative |
||||||
|
// length means that we overrun, but as above, that will be fixed up by |
||||||
|
// subsequent iterations of the outermost loop. |
||||||
|
MOVD $0, R1 |
||||||
|
CMP R1, R4 |
||||||
|
BLE loop |
||||||
|
MOVD (R15), R3 |
||||||
|
MOVD R3, (R2) |
||||||
|
ADD $8, R15, R15 |
||||||
|
ADD $8, R2, R2 |
||||||
|
SUB $8, R4, R4 |
||||||
|
B finishSlowForwardCopy |
||||||
|
|
||||||
|
verySlowForwardCopy: |
||||||
|
// verySlowForwardCopy is a simple implementation of forward copy. In C |
||||||
|
// parlance, this is a do/while loop instead of a while loop, since we know |
||||||
|
// that length > 0. In Go syntax: |
||||||
|
// |
||||||
|
// for { |
||||||
|
// dst[d] = dst[d - offset] |
||||||
|
// d++ |
||||||
|
// length-- |
||||||
|
// if length == 0 { |
||||||
|
// break |
||||||
|
// } |
||||||
|
// } |
||||||
|
MOVB (R15), R3 |
||||||
|
MOVB R3, (R7) |
||||||
|
ADD $1, R15, R15 |
||||||
|
ADD $1, R7, R7 |
||||||
|
SUB $1, R4, R4 |
||||||
|
CBNZ R4, verySlowForwardCopy |
||||||
|
B loop |
||||||
|
|
||||||
|
// The code above handles copy tags. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
end: |
||||||
|
// This is the end of the "for s < len(src)". |
||||||
|
// |
||||||
|
// if d != len(dst) { etc } |
||||||
|
CMP R10, R7 |
||||||
|
BNE errCorrupt |
||||||
|
|
||||||
|
// return 0 |
||||||
|
MOVD $0, ret+48(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
errCorrupt: |
||||||
|
// return decodeErrCodeCorrupt |
||||||
|
MOVD $1, R2 |
||||||
|
MOVD R2, ret+48(FP) |
||||||
|
RET |
@ -0,0 +1,15 @@ |
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
// +build amd64 arm64
|
||||||
|
|
||||||
|
package snappy |
||||||
|
|
||||||
|
// decode has the same semantics as in decode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func decode(dst, src []byte) int |
@ -0,0 +1,115 @@ |
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64,!arm64 appengine !gc noasm
|
||||||
|
|
||||||
|
package snappy |
||||||
|
|
||||||
|
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||||
|
// length of the decompressed bytes has already been read, and that len(dst)
|
||||||
|
// equals that length.
|
||||||
|
//
|
||||||
|
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||||
|
func decode(dst, src []byte) int { |
||||||
|
var d, s, offset, length int |
||||||
|
for s < len(src) { |
||||||
|
switch src[s] & 0x03 { |
||||||
|
case tagLiteral: |
||||||
|
x := uint32(src[s] >> 2) |
||||||
|
switch { |
||||||
|
case x < 60: |
||||||
|
s++ |
||||||
|
case x == 60: |
||||||
|
s += 2 |
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
x = uint32(src[s-1]) |
||||||
|
case x == 61: |
||||||
|
s += 3 |
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
x = uint32(src[s-2]) | uint32(src[s-1])<<8 |
||||||
|
case x == 62: |
||||||
|
s += 4 |
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 |
||||||
|
case x == 63: |
||||||
|
s += 5 |
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 |
||||||
|
} |
||||||
|
length = int(x) + 1 |
||||||
|
if length <= 0 { |
||||||
|
return decodeErrCodeUnsupportedLiteralLength |
||||||
|
} |
||||||
|
if length > len(dst)-d || length > len(src)-s { |
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
copy(dst[d:], src[s:s+length]) |
||||||
|
d += length |
||||||
|
s += length |
||||||
|
continue |
||||||
|
|
||||||
|
case tagCopy1: |
||||||
|
s += 2 |
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
length = 4 + int(src[s-2])>>2&0x7 |
||||||
|
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) |
||||||
|
|
||||||
|
case tagCopy2: |
||||||
|
s += 3 |
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
length = 1 + int(src[s-3])>>2 |
||||||
|
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) |
||||||
|
|
||||||
|
case tagCopy4: |
||||||
|
s += 5 |
||||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
length = 1 + int(src[s-5])>>2 |
||||||
|
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) |
||||||
|
} |
||||||
|
|
||||||
|
if offset <= 0 || d < offset || length > len(dst)-d { |
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
// Copy from an earlier sub-slice of dst to a later sub-slice.
|
||||||
|
// If no overlap, use the built-in copy:
|
||||||
|
if offset >= length { |
||||||
|
copy(dst[d:d+length], dst[d-offset:]) |
||||||
|
d += length |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// Unlike the built-in copy function, this byte-by-byte copy always runs
|
||||||
|
// forwards, even if the slices overlap. Conceptually, this is:
|
||||||
|
//
|
||||||
|
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||||
|
//
|
||||||
|
// We align the slices into a and b and show the compiler they are the same size.
|
||||||
|
// This allows the loop to run without bounds checks.
|
||||||
|
a := dst[d : d+length] |
||||||
|
b := dst[d-offset:] |
||||||
|
b = b[:len(a)] |
||||||
|
for i := range a { |
||||||
|
a[i] = b[i] |
||||||
|
} |
||||||
|
d += length |
||||||
|
} |
||||||
|
if d != len(dst) { |
||||||
|
return decodeErrCodeCorrupt |
||||||
|
} |
||||||
|
return 0 |
||||||
|
} |
@ -0,0 +1,289 @@ |
|||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package snappy |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
"errors" |
||||||
|
"io" |
||||||
|
) |
||||||
|
|
||||||
|
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||||
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||||
|
//
|
||||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||||
|
//
|
||||||
|
// Encode handles the Snappy block format, not the Snappy stream format.
|
||||||
|
func Encode(dst, src []byte) []byte { |
||||||
|
if n := MaxEncodedLen(len(src)); n < 0 { |
||||||
|
panic(ErrTooLarge) |
||||||
|
} else if len(dst) < n { |
||||||
|
dst = make([]byte, n) |
||||||
|
} |
||||||
|
|
||||||
|
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||||
|
d := binary.PutUvarint(dst, uint64(len(src))) |
||||||
|
|
||||||
|
for len(src) > 0 { |
||||||
|
p := src |
||||||
|
src = nil |
||||||
|
if len(p) > maxBlockSize { |
||||||
|
p, src = p[:maxBlockSize], p[maxBlockSize:] |
||||||
|
} |
||||||
|
if len(p) < minNonLiteralBlockSize { |
||||||
|
d += emitLiteral(dst[d:], p) |
||||||
|
} else { |
||||||
|
d += encodeBlock(dst[d:], p) |
||||||
|
} |
||||||
|
} |
||||||
|
return dst[:d] |
||||||
|
} |
||||||
|
|
||||||
|
// inputMargin is the minimum number of extra input bytes to keep, inside
|
||||||
|
// encodeBlock's inner loop. On some architectures, this margin lets us
|
||||||
|
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
|
||||||
|
// literals can be implemented as a single load to and store from a 16-byte
|
||||||
|
// register. That literal's actual length can be as short as 1 byte, so this
|
||||||
|
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
|
||||||
|
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
|
||||||
|
// that we don't overrun the dst and src buffers.
|
||||||
|
const inputMargin = 16 - 1 |
||||||
|
|
||||||
|
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
|
||||||
|
// could be encoded with a copy tag. This is the minimum with respect to the
|
||||||
|
// algorithm used by encodeBlock, not a minimum enforced by the file format.
|
||||||
|
//
|
||||||
|
// The encoded output must start with at least a 1 byte literal, as there are
|
||||||
|
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
|
||||||
|
// from an emitCopy call in encodeBlock's main loop, would require at least
|
||||||
|
// another inputMargin bytes, for the reason above: we want any emitLiteral
|
||||||
|
// calls inside encodeBlock's main loop to use the fast path if possible, which
|
||||||
|
// requires being able to overrun by inputMargin bytes. Thus,
|
||||||
|
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
|
||||||
|
//
|
||||||
|
// The C++ code doesn't use this exact threshold, but it could, as discussed at
|
||||||
|
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
|
||||||
|
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
|
||||||
|
// optimization. It should not affect the encoded form. This is tested by
|
||||||
|
// TestSameEncodingAsCppShortCopies.
|
||||||
|
const minNonLiteralBlockSize = 1 + 1 + inputMargin |
||||||
|
|
||||||
|
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||||
|
// uncompressed length.
|
||||||
|
//
|
||||||
|
// It will return a negative value if srcLen is too large to encode.
|
||||||
|
func MaxEncodedLen(srcLen int) int { |
||||||
|
n := uint64(srcLen) |
||||||
|
if n > 0xffffffff { |
||||||
|
return -1 |
||||||
|
} |
||||||
|
// Compressed data can be defined as:
|
||||||
|
// compressed := item* literal*
|
||||||
|
// item := literal* copy
|
||||||
|
//
|
||||||
|
// The trailing literal sequence has a space blowup of at most 62/60
|
||||||
|
// since a literal of length 60 needs one tag byte + one extra byte
|
||||||
|
// for length information.
|
||||||
|
//
|
||||||
|
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||||
|
// 4 bytes of data. Because of a special check in the encoding code,
|
||||||
|
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||||
|
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||||
|
// to at most the 62/60 blowup for representing literals.
|
||||||
|
//
|
||||||
|
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||||
|
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||||
|
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||||
|
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||||
|
//
|
||||||
|
// This last factor dominates the blowup, so the final estimate is:
|
||||||
|
n = 32 + n + n/6 |
||||||
|
if n > 0xffffffff { |
||||||
|
return -1 |
||||||
|
} |
||||||
|
return int(n) |
||||||
|
} |
||||||
|
|
||||||
|
var errClosed = errors.New("snappy: Writer is closed") |
||||||
|
|
||||||
|
// NewWriter returns a new Writer that compresses to w.
|
||||||
|
//
|
||||||
|
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||||
|
// Close such a Writer.
|
||||||
|
//
|
||||||
|
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||||
|
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||||
|
// regardless of the frequency and shape of the writes, and remember to Close
|
||||||
|
// that Writer when done.
|
||||||
|
func NewWriter(w io.Writer) *Writer { |
||||||
|
return &Writer{ |
||||||
|
w: w, |
||||||
|
obuf: make([]byte, obufLen), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||||
|
// framing format described at
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
//
|
||||||
|
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||||
|
// data has been forwarded to the underlying io.Writer. They may also call
|
||||||
|
// Flush zero or more times before calling Close.
|
||||||
|
func NewBufferedWriter(w io.Writer) *Writer { |
||||||
|
return &Writer{ |
||||||
|
w: w, |
||||||
|
ibuf: make([]byte, 0, maxBlockSize), |
||||||
|
obuf: make([]byte, obufLen), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Writer is an io.Writer that can write Snappy-compressed bytes.
|
||||||
|
//
|
||||||
|
// Writer handles the Snappy stream format, not the Snappy block format.
|
||||||
|
type Writer struct { |
||||||
|
w io.Writer |
||||||
|
err error |
||||||
|
|
||||||
|
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||||
|
//
|
||||||
|
// Its use is optional. For backwards compatibility, Writers created by the
|
||||||
|
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||||
|
// therefore do not need to be Flush'ed or Close'd.
|
||||||
|
ibuf []byte |
||||||
|
|
||||||
|
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||||
|
obuf []byte |
||||||
|
|
||||||
|
// wroteStreamHeader is whether we have written the stream header.
|
||||||
|
wroteStreamHeader bool |
||||||
|
} |
||||||
|
|
||||||
|
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||||
|
// w. This permits reusing a Writer rather than allocating a new one.
|
||||||
|
func (w *Writer) Reset(writer io.Writer) { |
||||||
|
w.w = writer |
||||||
|
w.err = nil |
||||||
|
if w.ibuf != nil { |
||||||
|
w.ibuf = w.ibuf[:0] |
||||||
|
} |
||||||
|
w.wroteStreamHeader = false |
||||||
|
} |
||||||
|
|
||||||
|
// Write satisfies the io.Writer interface.
|
||||||
|
func (w *Writer) Write(p []byte) (nRet int, errRet error) { |
||||||
|
if w.ibuf == nil { |
||||||
|
// Do not buffer incoming bytes. This does not perform or compress well
|
||||||
|
// if the caller of Writer.Write writes many small slices. This
|
||||||
|
// behavior is therefore deprecated, but still supported for backwards
|
||||||
|
// compatibility with code that doesn't explicitly Flush or Close.
|
||||||
|
return w.write(p) |
||||||
|
} |
||||||
|
|
||||||
|
// The remainder of this method is based on bufio.Writer.Write from the
|
||||||
|
// standard library.
|
||||||
|
|
||||||
|
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { |
||||||
|
var n int |
||||||
|
if len(w.ibuf) == 0 { |
||||||
|
// Large write, empty buffer.
|
||||||
|
// Write directly from p to avoid copy.
|
||||||
|
n, _ = w.write(p) |
||||||
|
} else { |
||||||
|
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) |
||||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n] |
||||||
|
w.Flush() |
||||||
|
} |
||||||
|
nRet += n |
||||||
|
p = p[n:] |
||||||
|
} |
||||||
|
if w.err != nil { |
||||||
|
return nRet, w.err |
||||||
|
} |
||||||
|
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) |
||||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n] |
||||||
|
nRet += n |
||||||
|
return nRet, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (w *Writer) write(p []byte) (nRet int, errRet error) { |
||||||
|
if w.err != nil { |
||||||
|
return 0, w.err |
||||||
|
} |
||||||
|
for len(p) > 0 { |
||||||
|
obufStart := len(magicChunk) |
||||||
|
if !w.wroteStreamHeader { |
||||||
|
w.wroteStreamHeader = true |
||||||
|
copy(w.obuf, magicChunk) |
||||||
|
obufStart = 0 |
||||||
|
} |
||||||
|
|
||||||
|
var uncompressed []byte |
||||||
|
if len(p) > maxBlockSize { |
||||||
|
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] |
||||||
|
} else { |
||||||
|
uncompressed, p = p, nil |
||||||
|
} |
||||||
|
checksum := crc(uncompressed) |
||||||
|
|
||||||
|
// Compress the buffer, discarding the result if the improvement
|
||||||
|
// isn't at least 12.5%.
|
||||||
|
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) |
||||||
|
chunkType := uint8(chunkTypeCompressedData) |
||||||
|
chunkLen := 4 + len(compressed) |
||||||
|
obufEnd := obufHeaderLen + len(compressed) |
||||||
|
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { |
||||||
|
chunkType = chunkTypeUncompressedData |
||||||
|
chunkLen = 4 + len(uncompressed) |
||||||
|
obufEnd = obufHeaderLen |
||||||
|
} |
||||||
|
|
||||||
|
// Fill in the per-chunk header that comes before the body.
|
||||||
|
w.obuf[len(magicChunk)+0] = chunkType |
||||||
|
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) |
||||||
|
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) |
||||||
|
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) |
||||||
|
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) |
||||||
|
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) |
||||||
|
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) |
||||||
|
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) |
||||||
|
|
||||||
|
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { |
||||||
|
w.err = err |
||||||
|
return nRet, err |
||||||
|
} |
||||||
|
if chunkType == chunkTypeUncompressedData { |
||||||
|
if _, err := w.w.Write(uncompressed); err != nil { |
||||||
|
w.err = err |
||||||
|
return nRet, err |
||||||
|
} |
||||||
|
} |
||||||
|
nRet += len(uncompressed) |
||||||
|
} |
||||||
|
return nRet, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Flush flushes the Writer to its underlying io.Writer.
|
||||||
|
func (w *Writer) Flush() error { |
||||||
|
if w.err != nil { |
||||||
|
return w.err |
||||||
|
} |
||||||
|
if len(w.ibuf) == 0 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
w.write(w.ibuf) |
||||||
|
w.ibuf = w.ibuf[:0] |
||||||
|
return w.err |
||||||
|
} |
||||||
|
|
||||||
|
// Close calls Flush and then closes the Writer.
|
||||||
|
func (w *Writer) Close() error { |
||||||
|
w.Flush() |
||||||
|
ret := w.err |
||||||
|
if w.err == nil { |
||||||
|
w.err = errClosed |
||||||
|
} |
||||||
|
return ret |
||||||
|
} |
@ -0,0 +1,730 @@ |
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved. |
||||||
|
// Use of this source code is governed by a BSD-style |
||||||
|
// license that can be found in the LICENSE file. |
||||||
|
|
||||||
|
// +build !appengine |
||||||
|
// +build gc |
||||||
|
// +build !noasm |
||||||
|
|
||||||
|
#include "textflag.h" |
||||||
|
|
||||||
|
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a |
||||||
|
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and |
||||||
|
// https://github.com/golang/snappy/issues/29 |
||||||
|
// |
||||||
|
// As a workaround, the package was built with a known good assembler, and |
||||||
|
// those instructions were disassembled by "objdump -d" to yield the |
||||||
|
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 |
||||||
|
// style comments, in AT&T asm syntax. Note that rsp here is a physical |
||||||
|
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). |
||||||
|
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble |
||||||
|
// fine on Go 1.6. |
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in encode_other.go, except |
||||||
|
// where marked with a "!!!". |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func emitLiteral(dst, lit []byte) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The register allocation: |
||||||
|
// - AX len(lit) |
||||||
|
// - BX n |
||||||
|
// - DX return value |
||||||
|
// - DI &dst[i] |
||||||
|
// - R10 &lit[0] |
||||||
|
// |
||||||
|
// The 24 bytes of stack space is to call runtime·memmove. |
||||||
|
// |
||||||
|
// The unusual register allocation of local variables, such as R10 for the |
||||||
|
// source pointer, matches the allocation used at the call site in encodeBlock, |
||||||
|
// which makes it easier to manually inline this function. |
||||||
|
TEXT ·emitLiteral(SB), NOSPLIT, $24-56 |
||||||
|
MOVQ dst_base+0(FP), DI |
||||||
|
MOVQ lit_base+24(FP), R10 |
||||||
|
MOVQ lit_len+32(FP), AX |
||||||
|
MOVQ AX, DX |
||||||
|
MOVL AX, BX |
||||||
|
SUBL $1, BX |
||||||
|
|
||||||
|
CMPL BX, $60 |
||||||
|
JLT oneByte |
||||||
|
CMPL BX, $256 |
||||||
|
JLT twoBytes |
||||||
|
|
||||||
|
threeBytes: |
||||||
|
MOVB $0xf4, 0(DI) |
||||||
|
MOVW BX, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
ADDQ $3, DX |
||||||
|
JMP memmove |
||||||
|
|
||||||
|
twoBytes: |
||||||
|
MOVB $0xf0, 0(DI) |
||||||
|
MOVB BX, 1(DI) |
||||||
|
ADDQ $2, DI |
||||||
|
ADDQ $2, DX |
||||||
|
JMP memmove |
||||||
|
|
||||||
|
oneByte: |
||||||
|
SHLB $2, BX |
||||||
|
MOVB BX, 0(DI) |
||||||
|
ADDQ $1, DI |
||||||
|
ADDQ $1, DX |
||||||
|
|
||||||
|
memmove: |
||||||
|
MOVQ DX, ret+48(FP) |
||||||
|
|
||||||
|
// copy(dst[i:], lit) |
||||||
|
// |
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push |
||||||
|
// DI, R10 and AX as arguments. |
||||||
|
MOVQ DI, 0(SP) |
||||||
|
MOVQ R10, 8(SP) |
||||||
|
MOVQ AX, 16(SP) |
||||||
|
CALL runtime·memmove(SB) |
||||||
|
RET |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func emitCopy(dst []byte, offset, length int) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The register allocation: |
||||||
|
// - AX length |
||||||
|
// - SI &dst[0] |
||||||
|
// - DI &dst[i] |
||||||
|
// - R11 offset |
||||||
|
// |
||||||
|
// The unusual register allocation of local variables, such as R11 for the |
||||||
|
// offset, matches the allocation used at the call site in encodeBlock, which |
||||||
|
// makes it easier to manually inline this function. |
||||||
|
TEXT ·emitCopy(SB), NOSPLIT, $0-48 |
||||||
|
MOVQ dst_base+0(FP), DI |
||||||
|
MOVQ DI, SI |
||||||
|
MOVQ offset+24(FP), R11 |
||||||
|
MOVQ length+32(FP), AX |
||||||
|
|
||||||
|
loop0: |
||||||
|
// for length >= 68 { etc } |
||||||
|
CMPL AX, $68 |
||||||
|
JLT step1 |
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes. |
||||||
|
MOVB $0xfe, 0(DI) |
||||||
|
MOVW R11, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
SUBL $64, AX |
||||||
|
JMP loop0 |
||||||
|
|
||||||
|
step1: |
||||||
|
// if length > 64 { etc } |
||||||
|
CMPL AX, $64 |
||||||
|
JLE step2 |
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes. |
||||||
|
MOVB $0xee, 0(DI) |
||||||
|
MOVW R11, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
SUBL $60, AX |
||||||
|
|
||||||
|
step2: |
||||||
|
// if length >= 12 || offset >= 2048 { goto step3 } |
||||||
|
CMPL AX, $12 |
||||||
|
JGE step3 |
||||||
|
CMPL R11, $2048 |
||||||
|
JGE step3 |
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes. |
||||||
|
MOVB R11, 1(DI) |
||||||
|
SHRL $8, R11 |
||||||
|
SHLB $5, R11 |
||||||
|
SUBB $4, AX |
||||||
|
SHLB $2, AX |
||||||
|
ORB AX, R11 |
||||||
|
ORB $1, R11 |
||||||
|
MOVB R11, 0(DI) |
||||||
|
ADDQ $2, DI |
||||||
|
|
||||||
|
// Return the number of bytes written. |
||||||
|
SUBQ SI, DI |
||||||
|
MOVQ DI, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
step3: |
||||||
|
// Emit the remaining copy, encoded as 3 bytes. |
||||||
|
SUBL $1, AX |
||||||
|
SHLB $2, AX |
||||||
|
ORB $2, AX |
||||||
|
MOVB AX, 0(DI) |
||||||
|
MOVW R11, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
|
||||||
|
// Return the number of bytes written. |
||||||
|
SUBQ SI, DI |
||||||
|
MOVQ DI, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func extendMatch(src []byte, i, j int) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The register allocation: |
||||||
|
// - DX &src[0] |
||||||
|
// - SI &src[j] |
||||||
|
// - R13 &src[len(src) - 8] |
||||||
|
// - R14 &src[len(src)] |
||||||
|
// - R15 &src[i] |
||||||
|
// |
||||||
|
// The unusual register allocation of local variables, such as R15 for a source |
||||||
|
// pointer, matches the allocation used at the call site in encodeBlock, which |
||||||
|
// makes it easier to manually inline this function. |
||||||
|
TEXT ·extendMatch(SB), NOSPLIT, $0-48 |
||||||
|
MOVQ src_base+0(FP), DX |
||||||
|
MOVQ src_len+8(FP), R14 |
||||||
|
MOVQ i+24(FP), R15 |
||||||
|
MOVQ j+32(FP), SI |
||||||
|
ADDQ DX, R14 |
||||||
|
ADDQ DX, R15 |
||||||
|
ADDQ DX, SI |
||||||
|
MOVQ R14, R13 |
||||||
|
SUBQ $8, R13 |
||||||
|
|
||||||
|
cmp8: |
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and |
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat. |
||||||
|
CMPQ SI, R13 |
||||||
|
JA cmp1 |
||||||
|
MOVQ (R15), AX |
||||||
|
MOVQ (SI), BX |
||||||
|
CMPQ AX, BX |
||||||
|
JNE bsf |
||||||
|
ADDQ $8, R15 |
||||||
|
ADDQ $8, SI |
||||||
|
JMP cmp8 |
||||||
|
|
||||||
|
bsf: |
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return |
||||||
|
// the index of the first byte that differs. The BSF instruction finds the |
||||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and |
||||||
|
// the shift by 3 converts a bit index to a byte index. |
||||||
|
XORQ AX, BX |
||||||
|
BSFQ BX, BX |
||||||
|
SHRQ $3, BX |
||||||
|
ADDQ BX, SI |
||||||
|
|
||||||
|
// Convert from &src[ret] to ret. |
||||||
|
SUBQ DX, SI |
||||||
|
MOVQ SI, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
cmp1: |
||||||
|
// In src's tail, compare 1 byte at a time. |
||||||
|
CMPQ SI, R14 |
||||||
|
JAE extendMatchEnd |
||||||
|
MOVB (R15), AX |
||||||
|
MOVB (SI), BX |
||||||
|
CMPB AX, BX |
||||||
|
JNE extendMatchEnd |
||||||
|
ADDQ $1, R15 |
||||||
|
ADDQ $1, SI |
||||||
|
JMP cmp1 |
||||||
|
|
||||||
|
extendMatchEnd: |
||||||
|
// Convert from &src[ret] to ret. |
||||||
|
SUBQ DX, SI |
||||||
|
MOVQ SI, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func encodeBlock(dst, src []byte) (d int) |
||||||
|
// |
||||||
|
// All local variables fit into registers, other than "var table". The register |
||||||
|
// allocation: |
||||||
|
// - AX . . |
||||||
|
// - BX . . |
||||||
|
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). |
||||||
|
// - DX 64 &src[0], tableSize |
||||||
|
// - SI 72 &src[s] |
||||||
|
// - DI 80 &dst[d] |
||||||
|
// - R9 88 sLimit |
||||||
|
// - R10 . &src[nextEmit] |
||||||
|
// - R11 96 prevHash, currHash, nextHash, offset |
||||||
|
// - R12 104 &src[base], skip |
||||||
|
// - R13 . &src[nextS], &src[len(src) - 8] |
||||||
|
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x |
||||||
|
// - R15 112 candidate |
||||||
|
// |
||||||
|
// The second column (56, 64, etc) is the stack offset to spill the registers |
||||||
|
// when calling other functions. We could pack this slightly tighter, but it's |
||||||
|
// simpler to have a dedicated spill map independent of the function called. |
||||||
|
// |
||||||
|
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An |
||||||
|
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill |
||||||
|
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. |
||||||
|
TEXT ·encodeBlock(SB), 0, $32888-56 |
||||||
|
MOVQ dst_base+0(FP), DI |
||||||
|
MOVQ src_base+24(FP), SI |
||||||
|
MOVQ src_len+32(FP), R14 |
||||||
|
|
||||||
|
// shift, tableSize := uint32(32-8), 1<<8 |
||||||
|
MOVQ $24, CX |
||||||
|
MOVQ $256, DX |
||||||
|
|
||||||
|
calcShift: |
||||||
|
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||||
|
// shift-- |
||||||
|
// } |
||||||
|
CMPQ DX, $16384 |
||||||
|
JGE varTable |
||||||
|
CMPQ DX, R14 |
||||||
|
JGE varTable |
||||||
|
SUBQ $1, CX |
||||||
|
SHLQ $1, DX |
||||||
|
JMP calcShift |
||||||
|
|
||||||
|
varTable: |
||||||
|
// var table [maxTableSize]uint16 |
||||||
|
// |
||||||
|
// In the asm code, unlike the Go code, we can zero-initialize only the |
||||||
|
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU |
||||||
|
// writes 16 bytes, so we can do only tableSize/8 writes instead of the |
||||||
|
// 2048 writes that would zero-initialize all of table's 32768 bytes. |
||||||
|
SHRQ $3, DX |
||||||
|
LEAQ table-32768(SP), BX |
||||||
|
PXOR X0, X0 |
||||||
|
|
||||||
|
memclr: |
||||||
|
MOVOU X0, 0(BX) |
||||||
|
ADDQ $16, BX |
||||||
|
SUBQ $1, DX |
||||||
|
JNZ memclr |
||||||
|
|
||||||
|
// !!! DX = &src[0] |
||||||
|
MOVQ SI, DX |
||||||
|
|
||||||
|
// sLimit := len(src) - inputMargin |
||||||
|
MOVQ R14, R9 |
||||||
|
SUBQ $15, R9 |
||||||
|
|
||||||
|
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't |
||||||
|
// change for the rest of the function. |
||||||
|
MOVQ CX, 56(SP) |
||||||
|
MOVQ DX, 64(SP) |
||||||
|
MOVQ R9, 88(SP) |
||||||
|
|
||||||
|
// nextEmit := 0 |
||||||
|
MOVQ DX, R10 |
||||||
|
|
||||||
|
// s := 1 |
||||||
|
ADDQ $1, SI |
||||||
|
|
||||||
|
// nextHash := hash(load32(src, s), shift) |
||||||
|
MOVL 0(SI), R11 |
||||||
|
IMULL $0x1e35a7bd, R11 |
||||||
|
SHRL CX, R11 |
||||||
|
|
||||||
|
outer: |
||||||
|
// for { etc } |
||||||
|
|
||||||
|
// skip := 32 |
||||||
|
MOVQ $32, R12 |
||||||
|
|
||||||
|
// nextS := s |
||||||
|
MOVQ SI, R13 |
||||||
|
|
||||||
|
// candidate := 0 |
||||||
|
MOVQ $0, R15 |
||||||
|
|
||||||
|
inner0: |
||||||
|
// for { etc } |
||||||
|
|
||||||
|
// s := nextS |
||||||
|
MOVQ R13, SI |
||||||
|
|
||||||
|
// bytesBetweenHashLookups := skip >> 5 |
||||||
|
MOVQ R12, R14 |
||||||
|
SHRQ $5, R14 |
||||||
|
|
||||||
|
// nextS = s + bytesBetweenHashLookups |
||||||
|
ADDQ R14, R13 |
||||||
|
|
||||||
|
// skip += bytesBetweenHashLookups |
||||||
|
ADDQ R14, R12 |
||||||
|
|
||||||
|
// if nextS > sLimit { goto emitRemainder } |
||||||
|
MOVQ R13, AX |
||||||
|
SUBQ DX, AX |
||||||
|
CMPQ AX, R9 |
||||||
|
JA emitRemainder |
||||||
|
|
||||||
|
// candidate = int(table[nextHash]) |
||||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15 |
||||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 |
||||||
|
BYTE $0x4e |
||||||
|
BYTE $0x0f |
||||||
|
BYTE $0xb7 |
||||||
|
BYTE $0x7c |
||||||
|
BYTE $0x5c |
||||||
|
BYTE $0x78 |
||||||
|
|
||||||
|
// table[nextHash] = uint16(s) |
||||||
|
MOVQ SI, AX |
||||||
|
SUBQ DX, AX |
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2) |
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) |
||||||
|
BYTE $0x66 |
||||||
|
BYTE $0x42 |
||||||
|
BYTE $0x89 |
||||||
|
BYTE $0x44 |
||||||
|
BYTE $0x5c |
||||||
|
BYTE $0x78 |
||||||
|
|
||||||
|
// nextHash = hash(load32(src, nextS), shift) |
||||||
|
MOVL 0(R13), R11 |
||||||
|
IMULL $0x1e35a7bd, R11 |
||||||
|
SHRL CX, R11 |
||||||
|
|
||||||
|
// if load32(src, s) != load32(src, candidate) { continue } break |
||||||
|
MOVL 0(SI), AX |
||||||
|
MOVL (DX)(R15*1), BX |
||||||
|
CMPL AX, BX |
||||||
|
JNE inner0 |
||||||
|
|
||||||
|
fourByteMatch: |
||||||
|
// As per the encode_other.go code: |
||||||
|
// |
||||||
|
// A 4-byte match has been found. We'll later see etc. |
||||||
|
|
||||||
|
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment |
||||||
|
// on inputMargin in encode.go. |
||||||
|
MOVQ SI, AX |
||||||
|
SUBQ R10, AX |
||||||
|
CMPQ AX, $16 |
||||||
|
JLE emitLiteralFastPath |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// Begin inline of the emitLiteral call. |
||||||
|
// |
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:s]) |
||||||
|
|
||||||
|
MOVL AX, BX |
||||||
|
SUBL $1, BX |
||||||
|
|
||||||
|
CMPL BX, $60 |
||||||
|
JLT inlineEmitLiteralOneByte |
||||||
|
CMPL BX, $256 |
||||||
|
JLT inlineEmitLiteralTwoBytes |
||||||
|
|
||||||
|
inlineEmitLiteralThreeBytes: |
||||||
|
MOVB $0xf4, 0(DI) |
||||||
|
MOVW BX, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
JMP inlineEmitLiteralMemmove |
||||||
|
|
||||||
|
inlineEmitLiteralTwoBytes: |
||||||
|
MOVB $0xf0, 0(DI) |
||||||
|
MOVB BX, 1(DI) |
||||||
|
ADDQ $2, DI |
||||||
|
JMP inlineEmitLiteralMemmove |
||||||
|
|
||||||
|
inlineEmitLiteralOneByte: |
||||||
|
SHLB $2, BX |
||||||
|
MOVB BX, 0(DI) |
||||||
|
ADDQ $1, DI |
||||||
|
|
||||||
|
inlineEmitLiteralMemmove: |
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
// |
||||||
|
// copy(dst[i:], lit) |
||||||
|
// |
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push |
||||||
|
// DI, R10 and AX as arguments. |
||||||
|
MOVQ DI, 0(SP) |
||||||
|
MOVQ R10, 8(SP) |
||||||
|
MOVQ AX, 16(SP) |
||||||
|
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". |
||||||
|
MOVQ SI, 72(SP) |
||||||
|
MOVQ DI, 80(SP) |
||||||
|
MOVQ R15, 112(SP) |
||||||
|
CALL runtime·memmove(SB) |
||||||
|
MOVQ 56(SP), CX |
||||||
|
MOVQ 64(SP), DX |
||||||
|
MOVQ 72(SP), SI |
||||||
|
MOVQ 80(SP), DI |
||||||
|
MOVQ 88(SP), R9 |
||||||
|
MOVQ 112(SP), R15 |
||||||
|
JMP inner1 |
||||||
|
|
||||||
|
inlineEmitLiteralEnd: |
||||||
|
// End inline of the emitLiteral call. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
emitLiteralFastPath: |
||||||
|
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". |
||||||
|
MOVB AX, BX |
||||||
|
SUBB $1, BX |
||||||
|
SHLB $2, BX |
||||||
|
MOVB BX, (DI) |
||||||
|
ADDQ $1, DI |
||||||
|
|
||||||
|
// !!! Implement the copy from lit to dst as a 16-byte load and store. |
||||||
|
// (Encode's documentation says that dst and src must not overlap.) |
||||||
|
// |
||||||
|
// This always copies 16 bytes, instead of only len(lit) bytes, but that's |
||||||
|
// OK. Subsequent iterations will fix up the overrun. |
||||||
|
// |
||||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or |
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as |
||||||
|
// effective on architectures that are fussier about alignment. |
||||||
|
MOVOU 0(R10), X0 |
||||||
|
MOVOU X0, 0(DI) |
||||||
|
ADDQ AX, DI |
||||||
|
|
||||||
|
inner1: |
||||||
|
// for { etc } |
||||||
|
|
||||||
|
// base := s |
||||||
|
MOVQ SI, R12 |
||||||
|
|
||||||
|
// !!! offset := base - candidate |
||||||
|
MOVQ R12, R11 |
||||||
|
SUBQ R15, R11 |
||||||
|
SUBQ DX, R11 |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// Begin inline of the extendMatch call. |
||||||
|
// |
||||||
|
// s = extendMatch(src, candidate+4, s+4) |
||||||
|
|
||||||
|
// !!! R14 = &src[len(src)] |
||||||
|
MOVQ src_len+32(FP), R14 |
||||||
|
ADDQ DX, R14 |
||||||
|
|
||||||
|
// !!! R13 = &src[len(src) - 8] |
||||||
|
MOVQ R14, R13 |
||||||
|
SUBQ $8, R13 |
||||||
|
|
||||||
|
// !!! R15 = &src[candidate + 4] |
||||||
|
ADDQ $4, R15 |
||||||
|
ADDQ DX, R15 |
||||||
|
|
||||||
|
// !!! s += 4 |
||||||
|
ADDQ $4, SI |
||||||
|
|
||||||
|
inlineExtendMatchCmp8: |
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and |
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat. |
||||||
|
CMPQ SI, R13 |
||||||
|
JA inlineExtendMatchCmp1 |
||||||
|
MOVQ (R15), AX |
||||||
|
MOVQ (SI), BX |
||||||
|
CMPQ AX, BX |
||||||
|
JNE inlineExtendMatchBSF |
||||||
|
ADDQ $8, R15 |
||||||
|
ADDQ $8, SI |
||||||
|
JMP inlineExtendMatchCmp8 |
||||||
|
|
||||||
|
inlineExtendMatchBSF: |
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return |
||||||
|
// the index of the first byte that differs. The BSF instruction finds the |
||||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and |
||||||
|
// the shift by 3 converts a bit index to a byte index. |
||||||
|
XORQ AX, BX |
||||||
|
BSFQ BX, BX |
||||||
|
SHRQ $3, BX |
||||||
|
ADDQ BX, SI |
||||||
|
JMP inlineExtendMatchEnd |
||||||
|
|
||||||
|
inlineExtendMatchCmp1: |
||||||
|
// In src's tail, compare 1 byte at a time. |
||||||
|
CMPQ SI, R14 |
||||||
|
JAE inlineExtendMatchEnd |
||||||
|
MOVB (R15), AX |
||||||
|
MOVB (SI), BX |
||||||
|
CMPB AX, BX |
||||||
|
JNE inlineExtendMatchEnd |
||||||
|
ADDQ $1, R15 |
||||||
|
ADDQ $1, SI |
||||||
|
JMP inlineExtendMatchCmp1 |
||||||
|
|
||||||
|
inlineExtendMatchEnd: |
||||||
|
// End inline of the extendMatch call. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// Begin inline of the emitCopy call. |
||||||
|
// |
||||||
|
// d += emitCopy(dst[d:], base-candidate, s-base) |
||||||
|
|
||||||
|
// !!! length := s - base |
||||||
|
MOVQ SI, AX |
||||||
|
SUBQ R12, AX |
||||||
|
|
||||||
|
inlineEmitCopyLoop0: |
||||||
|
// for length >= 68 { etc } |
||||||
|
CMPL AX, $68 |
||||||
|
JLT inlineEmitCopyStep1 |
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes. |
||||||
|
MOVB $0xfe, 0(DI) |
||||||
|
MOVW R11, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
SUBL $64, AX |
||||||
|
JMP inlineEmitCopyLoop0 |
||||||
|
|
||||||
|
inlineEmitCopyStep1: |
||||||
|
// if length > 64 { etc } |
||||||
|
CMPL AX, $64 |
||||||
|
JLE inlineEmitCopyStep2 |
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes. |
||||||
|
MOVB $0xee, 0(DI) |
||||||
|
MOVW R11, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
SUBL $60, AX |
||||||
|
|
||||||
|
inlineEmitCopyStep2: |
||||||
|
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } |
||||||
|
CMPL AX, $12 |
||||||
|
JGE inlineEmitCopyStep3 |
||||||
|
CMPL R11, $2048 |
||||||
|
JGE inlineEmitCopyStep3 |
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes. |
||||||
|
MOVB R11, 1(DI) |
||||||
|
SHRL $8, R11 |
||||||
|
SHLB $5, R11 |
||||||
|
SUBB $4, AX |
||||||
|
SHLB $2, AX |
||||||
|
ORB AX, R11 |
||||||
|
ORB $1, R11 |
||||||
|
MOVB R11, 0(DI) |
||||||
|
ADDQ $2, DI |
||||||
|
JMP inlineEmitCopyEnd |
||||||
|
|
||||||
|
inlineEmitCopyStep3: |
||||||
|
// Emit the remaining copy, encoded as 3 bytes. |
||||||
|
SUBL $1, AX |
||||||
|
SHLB $2, AX |
||||||
|
ORB $2, AX |
||||||
|
MOVB AX, 0(DI) |
||||||
|
MOVW R11, 1(DI) |
||||||
|
ADDQ $3, DI |
||||||
|
|
||||||
|
inlineEmitCopyEnd: |
||||||
|
// End inline of the emitCopy call. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
// nextEmit = s |
||||||
|
MOVQ SI, R10 |
||||||
|
|
||||||
|
// if s >= sLimit { goto emitRemainder } |
||||||
|
MOVQ SI, AX |
||||||
|
SUBQ DX, AX |
||||||
|
CMPQ AX, R9 |
||||||
|
JAE emitRemainder |
||||||
|
|
||||||
|
// As per the encode_other.go code: |
||||||
|
// |
||||||
|
// We could immediately etc. |
||||||
|
|
||||||
|
// x := load64(src, s-1) |
||||||
|
MOVQ -1(SI), R14 |
||||||
|
|
||||||
|
// prevHash := hash(uint32(x>>0), shift) |
||||||
|
MOVL R14, R11 |
||||||
|
IMULL $0x1e35a7bd, R11 |
||||||
|
SHRL CX, R11 |
||||||
|
|
||||||
|
// table[prevHash] = uint16(s-1) |
||||||
|
MOVQ SI, AX |
||||||
|
SUBQ DX, AX |
||||||
|
SUBQ $1, AX |
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2) |
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) |
||||||
|
BYTE $0x66 |
||||||
|
BYTE $0x42 |
||||||
|
BYTE $0x89 |
||||||
|
BYTE $0x44 |
||||||
|
BYTE $0x5c |
||||||
|
BYTE $0x78 |
||||||
|
|
||||||
|
// currHash := hash(uint32(x>>8), shift) |
||||||
|
SHRQ $8, R14 |
||||||
|
MOVL R14, R11 |
||||||
|
IMULL $0x1e35a7bd, R11 |
||||||
|
SHRL CX, R11 |
||||||
|
|
||||||
|
// candidate = int(table[currHash]) |
||||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15 |
||||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 |
||||||
|
BYTE $0x4e |
||||||
|
BYTE $0x0f |
||||||
|
BYTE $0xb7 |
||||||
|
BYTE $0x7c |
||||||
|
BYTE $0x5c |
||||||
|
BYTE $0x78 |
||||||
|
|
||||||
|
// table[currHash] = uint16(s) |
||||||
|
ADDQ $1, AX |
||||||
|
|
||||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2) |
||||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) |
||||||
|
BYTE $0x66 |
||||||
|
BYTE $0x42 |
||||||
|
BYTE $0x89 |
||||||
|
BYTE $0x44 |
||||||
|
BYTE $0x5c |
||||||
|
BYTE $0x78 |
||||||
|
|
||||||
|
// if uint32(x>>8) == load32(src, candidate) { continue } |
||||||
|
MOVL (DX)(R15*1), BX |
||||||
|
CMPL R14, BX |
||||||
|
JEQ inner1 |
||||||
|
|
||||||
|
// nextHash = hash(uint32(x>>16), shift) |
||||||
|
SHRQ $8, R14 |
||||||
|
MOVL R14, R11 |
||||||
|
IMULL $0x1e35a7bd, R11 |
||||||
|
SHRL CX, R11 |
||||||
|
|
||||||
|
// s++ |
||||||
|
ADDQ $1, SI |
||||||
|
|
||||||
|
// break out of the inner1 for loop, i.e. continue the outer loop. |
||||||
|
JMP outer |
||||||
|
|
||||||
|
emitRemainder: |
||||||
|
// if nextEmit < len(src) { etc } |
||||||
|
MOVQ src_len+32(FP), AX |
||||||
|
ADDQ DX, AX |
||||||
|
CMPQ R10, AX |
||||||
|
JEQ encodeBlockEnd |
||||||
|
|
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:]) |
||||||
|
// |
||||||
|
// Push args. |
||||||
|
MOVQ DI, 0(SP) |
||||||
|
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. |
||||||
|
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. |
||||||
|
MOVQ R10, 24(SP) |
||||||
|
SUBQ R10, AX |
||||||
|
MOVQ AX, 32(SP) |
||||||
|
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. |
||||||
|
|
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
MOVQ DI, 80(SP) |
||||||
|
CALL ·emitLiteral(SB) |
||||||
|
MOVQ 80(SP), DI |
||||||
|
|
||||||
|
// Finish the "d +=" part of "d += emitLiteral(etc)". |
||||||
|
ADDQ 48(SP), DI |
||||||
|
|
||||||
|
encodeBlockEnd: |
||||||
|
MOVQ dst_base+0(FP), AX |
||||||
|
SUBQ AX, DI |
||||||
|
MOVQ DI, d+48(FP) |
||||||
|
RET |
@ -0,0 +1,722 @@ |
|||||||
|
// Copyright 2020 The Go Authors. All rights reserved. |
||||||
|
// Use of this source code is governed by a BSD-style |
||||||
|
// license that can be found in the LICENSE file. |
||||||
|
|
||||||
|
// +build !appengine |
||||||
|
// +build gc |
||||||
|
// +build !noasm |
||||||
|
|
||||||
|
#include "textflag.h" |
||||||
|
|
||||||
|
// The asm code generally follows the pure Go code in encode_other.go, except |
||||||
|
// where marked with a "!!!". |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func emitLiteral(dst, lit []byte) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The register allocation: |
||||||
|
// - R3 len(lit) |
||||||
|
// - R4 n |
||||||
|
// - R6 return value |
||||||
|
// - R8 &dst[i] |
||||||
|
// - R10 &lit[0] |
||||||
|
// |
||||||
|
// The 32 bytes of stack space is to call runtime·memmove. |
||||||
|
// |
||||||
|
// The unusual register allocation of local variables, such as R10 for the |
||||||
|
// source pointer, matches the allocation used at the call site in encodeBlock, |
||||||
|
// which makes it easier to manually inline this function. |
||||||
|
TEXT ·emitLiteral(SB), NOSPLIT, $32-56 |
||||||
|
MOVD dst_base+0(FP), R8 |
||||||
|
MOVD lit_base+24(FP), R10 |
||||||
|
MOVD lit_len+32(FP), R3 |
||||||
|
MOVD R3, R6 |
||||||
|
MOVW R3, R4 |
||||||
|
SUBW $1, R4, R4 |
||||||
|
|
||||||
|
CMPW $60, R4 |
||||||
|
BLT oneByte |
||||||
|
CMPW $256, R4 |
||||||
|
BLT twoBytes |
||||||
|
|
||||||
|
threeBytes: |
||||||
|
MOVD $0xf4, R2 |
||||||
|
MOVB R2, 0(R8) |
||||||
|
MOVW R4, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
ADD $3, R6, R6 |
||||||
|
B memmove |
||||||
|
|
||||||
|
twoBytes: |
||||||
|
MOVD $0xf0, R2 |
||||||
|
MOVB R2, 0(R8) |
||||||
|
MOVB R4, 1(R8) |
||||||
|
ADD $2, R8, R8 |
||||||
|
ADD $2, R6, R6 |
||||||
|
B memmove |
||||||
|
|
||||||
|
oneByte: |
||||||
|
LSLW $2, R4, R4 |
||||||
|
MOVB R4, 0(R8) |
||||||
|
ADD $1, R8, R8 |
||||||
|
ADD $1, R6, R6 |
||||||
|
|
||||||
|
memmove: |
||||||
|
MOVD R6, ret+48(FP) |
||||||
|
|
||||||
|
// copy(dst[i:], lit) |
||||||
|
// |
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push |
||||||
|
// R8, R10 and R3 as arguments. |
||||||
|
MOVD R8, 8(RSP) |
||||||
|
MOVD R10, 16(RSP) |
||||||
|
MOVD R3, 24(RSP) |
||||||
|
CALL runtime·memmove(SB) |
||||||
|
RET |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func emitCopy(dst []byte, offset, length int) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The register allocation: |
||||||
|
// - R3 length |
||||||
|
// - R7 &dst[0] |
||||||
|
// - R8 &dst[i] |
||||||
|
// - R11 offset |
||||||
|
// |
||||||
|
// The unusual register allocation of local variables, such as R11 for the |
||||||
|
// offset, matches the allocation used at the call site in encodeBlock, which |
||||||
|
// makes it easier to manually inline this function. |
||||||
|
TEXT ·emitCopy(SB), NOSPLIT, $0-48 |
||||||
|
MOVD dst_base+0(FP), R8 |
||||||
|
MOVD R8, R7 |
||||||
|
MOVD offset+24(FP), R11 |
||||||
|
MOVD length+32(FP), R3 |
||||||
|
|
||||||
|
loop0: |
||||||
|
// for length >= 68 { etc } |
||||||
|
CMPW $68, R3 |
||||||
|
BLT step1 |
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes. |
||||||
|
MOVD $0xfe, R2 |
||||||
|
MOVB R2, 0(R8) |
||||||
|
MOVW R11, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
SUB $64, R3, R3 |
||||||
|
B loop0 |
||||||
|
|
||||||
|
step1: |
||||||
|
// if length > 64 { etc } |
||||||
|
CMP $64, R3 |
||||||
|
BLE step2 |
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes. |
||||||
|
MOVD $0xee, R2 |
||||||
|
MOVB R2, 0(R8) |
||||||
|
MOVW R11, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
SUB $60, R3, R3 |
||||||
|
|
||||||
|
step2: |
||||||
|
// if length >= 12 || offset >= 2048 { goto step3 } |
||||||
|
CMP $12, R3 |
||||||
|
BGE step3 |
||||||
|
CMPW $2048, R11 |
||||||
|
BGE step3 |
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes. |
||||||
|
MOVB R11, 1(R8) |
||||||
|
LSRW $3, R11, R11 |
||||||
|
AND $0xe0, R11, R11 |
||||||
|
SUB $4, R3, R3 |
||||||
|
LSLW $2, R3 |
||||||
|
AND $0xff, R3, R3 |
||||||
|
ORRW R3, R11, R11 |
||||||
|
ORRW $1, R11, R11 |
||||||
|
MOVB R11, 0(R8) |
||||||
|
ADD $2, R8, R8 |
||||||
|
|
||||||
|
// Return the number of bytes written. |
||||||
|
SUB R7, R8, R8 |
||||||
|
MOVD R8, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
step3: |
||||||
|
// Emit the remaining copy, encoded as 3 bytes. |
||||||
|
SUB $1, R3, R3 |
||||||
|
AND $0xff, R3, R3 |
||||||
|
LSLW $2, R3, R3 |
||||||
|
ORRW $2, R3, R3 |
||||||
|
MOVB R3, 0(R8) |
||||||
|
MOVW R11, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
|
||||||
|
// Return the number of bytes written. |
||||||
|
SUB R7, R8, R8 |
||||||
|
MOVD R8, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func extendMatch(src []byte, i, j int) int |
||||||
|
// |
||||||
|
// All local variables fit into registers. The register allocation: |
||||||
|
// - R6 &src[0] |
||||||
|
// - R7 &src[j] |
||||||
|
// - R13 &src[len(src) - 8] |
||||||
|
// - R14 &src[len(src)] |
||||||
|
// - R15 &src[i] |
||||||
|
// |
||||||
|
// The unusual register allocation of local variables, such as R15 for a source |
||||||
|
// pointer, matches the allocation used at the call site in encodeBlock, which |
||||||
|
// makes it easier to manually inline this function. |
||||||
|
TEXT ·extendMatch(SB), NOSPLIT, $0-48 |
||||||
|
MOVD src_base+0(FP), R6 |
||||||
|
MOVD src_len+8(FP), R14 |
||||||
|
MOVD i+24(FP), R15 |
||||||
|
MOVD j+32(FP), R7 |
||||||
|
ADD R6, R14, R14 |
||||||
|
ADD R6, R15, R15 |
||||||
|
ADD R6, R7, R7 |
||||||
|
MOVD R14, R13 |
||||||
|
SUB $8, R13, R13 |
||||||
|
|
||||||
|
cmp8: |
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and |
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat. |
||||||
|
CMP R13, R7 |
||||||
|
BHI cmp1 |
||||||
|
MOVD (R15), R3 |
||||||
|
MOVD (R7), R4 |
||||||
|
CMP R4, R3 |
||||||
|
BNE bsf |
||||||
|
ADD $8, R15, R15 |
||||||
|
ADD $8, R7, R7 |
||||||
|
B cmp8 |
||||||
|
|
||||||
|
bsf: |
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return |
||||||
|
// the index of the first byte that differs. |
||||||
|
// RBIT reverses the bit order, then CLZ counts the leading zeros, the |
||||||
|
// combination of which finds the least significant bit which is set. |
||||||
|
// The arm64 architecture is little-endian, and the shift by 3 converts |
||||||
|
// a bit index to a byte index. |
||||||
|
EOR R3, R4, R4 |
||||||
|
RBIT R4, R4 |
||||||
|
CLZ R4, R4 |
||||||
|
ADD R4>>3, R7, R7 |
||||||
|
|
||||||
|
// Convert from &src[ret] to ret. |
||||||
|
SUB R6, R7, R7 |
||||||
|
MOVD R7, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
cmp1: |
||||||
|
// In src's tail, compare 1 byte at a time. |
||||||
|
CMP R7, R14 |
||||||
|
BLS extendMatchEnd |
||||||
|
MOVB (R15), R3 |
||||||
|
MOVB (R7), R4 |
||||||
|
CMP R4, R3 |
||||||
|
BNE extendMatchEnd |
||||||
|
ADD $1, R15, R15 |
||||||
|
ADD $1, R7, R7 |
||||||
|
B cmp1 |
||||||
|
|
||||||
|
extendMatchEnd: |
||||||
|
// Convert from &src[ret] to ret. |
||||||
|
SUB R6, R7, R7 |
||||||
|
MOVD R7, ret+40(FP) |
||||||
|
RET |
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------- |
||||||
|
|
||||||
|
// func encodeBlock(dst, src []byte) (d int) |
||||||
|
// |
||||||
|
// All local variables fit into registers, other than "var table". The register |
||||||
|
// allocation: |
||||||
|
// - R3 . . |
||||||
|
// - R4 . . |
||||||
|
// - R5 64 shift |
||||||
|
// - R6 72 &src[0], tableSize |
||||||
|
// - R7 80 &src[s] |
||||||
|
// - R8 88 &dst[d] |
||||||
|
// - R9 96 sLimit |
||||||
|
// - R10 . &src[nextEmit] |
||||||
|
// - R11 104 prevHash, currHash, nextHash, offset |
||||||
|
// - R12 112 &src[base], skip |
||||||
|
// - R13 . &src[nextS], &src[len(src) - 8] |
||||||
|
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x |
||||||
|
// - R15 120 candidate |
||||||
|
// - R16 . hash constant, 0x1e35a7bd |
||||||
|
// - R17 . &table |
||||||
|
// - . 128 table |
||||||
|
// |
||||||
|
// The second column (64, 72, etc) is the stack offset to spill the registers |
||||||
|
// when calling other functions. We could pack this slightly tighter, but it's |
||||||
|
// simpler to have a dedicated spill map independent of the function called. |
||||||
|
// |
||||||
|
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An |
||||||
|
// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill |
||||||
|
// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. |
||||||
|
TEXT ·encodeBlock(SB), 0, $32896-56 |
||||||
|
MOVD dst_base+0(FP), R8 |
||||||
|
MOVD src_base+24(FP), R7 |
||||||
|
MOVD src_len+32(FP), R14 |
||||||
|
|
||||||
|
// shift, tableSize := uint32(32-8), 1<<8 |
||||||
|
MOVD $24, R5 |
||||||
|
MOVD $256, R6 |
||||||
|
MOVW $0xa7bd, R16 |
||||||
|
MOVKW $(0x1e35<<16), R16 |
||||||
|
|
||||||
|
calcShift: |
||||||
|
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
|
||||||
|
// shift-- |
||||||
|
// } |
||||||
|
MOVD $16384, R2 |
||||||
|
CMP R2, R6 |
||||||
|
BGE varTable |
||||||
|
CMP R14, R6 |
||||||
|
BGE varTable |
||||||
|
SUB $1, R5, R5 |
||||||
|
LSL $1, R6, R6 |
||||||
|
B calcShift |
||||||
|
|
||||||
|
varTable: |
||||||
|
// var table [maxTableSize]uint16 |
||||||
|
// |
||||||
|
// In the asm code, unlike the Go code, we can zero-initialize only the |
||||||
|
// first tableSize elements. Each uint16 element is 2 bytes and each |
||||||
|
// iterations writes 64 bytes, so we can do only tableSize/32 writes |
||||||
|
// instead of the 2048 writes that would zero-initialize all of table's |
||||||
|
// 32768 bytes. This clear could overrun the first tableSize elements, but |
||||||
|
// it won't overrun the allocated stack size. |
||||||
|
ADD $128, RSP, R17 |
||||||
|
MOVD R17, R4 |
||||||
|
|
||||||
|
// !!! R6 = &src[tableSize] |
||||||
|
ADD R6<<1, R17, R6 |
||||||
|
|
||||||
|
memclr: |
||||||
|
STP.P (ZR, ZR), 64(R4) |
||||||
|
STP (ZR, ZR), -48(R4) |
||||||
|
STP (ZR, ZR), -32(R4) |
||||||
|
STP (ZR, ZR), -16(R4) |
||||||
|
CMP R4, R6 |
||||||
|
BHI memclr |
||||||
|
|
||||||
|
// !!! R6 = &src[0] |
||||||
|
MOVD R7, R6 |
||||||
|
|
||||||
|
// sLimit := len(src) - inputMargin |
||||||
|
MOVD R14, R9 |
||||||
|
SUB $15, R9, R9 |
||||||
|
|
||||||
|
// !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't |
||||||
|
// change for the rest of the function. |
||||||
|
MOVD R5, 64(RSP) |
||||||
|
MOVD R6, 72(RSP) |
||||||
|
MOVD R9, 96(RSP) |
||||||
|
|
||||||
|
// nextEmit := 0 |
||||||
|
MOVD R6, R10 |
||||||
|
|
||||||
|
// s := 1 |
||||||
|
ADD $1, R7, R7 |
||||||
|
|
||||||
|
// nextHash := hash(load32(src, s), shift) |
||||||
|
MOVW 0(R7), R11 |
||||||
|
MULW R16, R11, R11 |
||||||
|
LSRW R5, R11, R11 |
||||||
|
|
||||||
|
outer: |
||||||
|
// for { etc } |
||||||
|
|
||||||
|
// skip := 32 |
||||||
|
MOVD $32, R12 |
||||||
|
|
||||||
|
// nextS := s |
||||||
|
MOVD R7, R13 |
||||||
|
|
||||||
|
// candidate := 0 |
||||||
|
MOVD $0, R15 |
||||||
|
|
||||||
|
inner0: |
||||||
|
// for { etc } |
||||||
|
|
||||||
|
// s := nextS |
||||||
|
MOVD R13, R7 |
||||||
|
|
||||||
|
// bytesBetweenHashLookups := skip >> 5 |
||||||
|
MOVD R12, R14 |
||||||
|
LSR $5, R14, R14 |
||||||
|
|
||||||
|
// nextS = s + bytesBetweenHashLookups |
||||||
|
ADD R14, R13, R13 |
||||||
|
|
||||||
|
// skip += bytesBetweenHashLookups |
||||||
|
ADD R14, R12, R12 |
||||||
|
|
||||||
|
// if nextS > sLimit { goto emitRemainder } |
||||||
|
MOVD R13, R3 |
||||||
|
SUB R6, R3, R3 |
||||||
|
CMP R9, R3 |
||||||
|
BHI emitRemainder |
||||||
|
|
||||||
|
// candidate = int(table[nextHash]) |
||||||
|
MOVHU 0(R17)(R11<<1), R15 |
||||||
|
|
||||||
|
// table[nextHash] = uint16(s) |
||||||
|
MOVD R7, R3 |
||||||
|
SUB R6, R3, R3 |
||||||
|
|
||||||
|
MOVH R3, 0(R17)(R11<<1) |
||||||
|
|
||||||
|
// nextHash = hash(load32(src, nextS), shift) |
||||||
|
MOVW 0(R13), R11 |
||||||
|
MULW R16, R11 |
||||||
|
LSRW R5, R11, R11 |
||||||
|
|
||||||
|
// if load32(src, s) != load32(src, candidate) { continue } break |
||||||
|
MOVW 0(R7), R3 |
||||||
|
MOVW (R6)(R15), R4 |
||||||
|
CMPW R4, R3 |
||||||
|
BNE inner0 |
||||||
|
|
||||||
|
fourByteMatch: |
||||||
|
// As per the encode_other.go code: |
||||||
|
// |
||||||
|
// A 4-byte match has been found. We'll later see etc. |
||||||
|
|
||||||
|
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment |
||||||
|
// on inputMargin in encode.go. |
||||||
|
MOVD R7, R3 |
||||||
|
SUB R10, R3, R3 |
||||||
|
CMP $16, R3 |
||||||
|
BLE emitLiteralFastPath |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// Begin inline of the emitLiteral call. |
||||||
|
// |
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:s]) |
||||||
|
|
||||||
|
MOVW R3, R4 |
||||||
|
SUBW $1, R4, R4 |
||||||
|
|
||||||
|
MOVW $60, R2 |
||||||
|
CMPW R2, R4 |
||||||
|
BLT inlineEmitLiteralOneByte |
||||||
|
MOVW $256, R2 |
||||||
|
CMPW R2, R4 |
||||||
|
BLT inlineEmitLiteralTwoBytes |
||||||
|
|
||||||
|
inlineEmitLiteralThreeBytes: |
||||||
|
MOVD $0xf4, R1 |
||||||
|
MOVB R1, 0(R8) |
||||||
|
MOVW R4, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
B inlineEmitLiteralMemmove |
||||||
|
|
||||||
|
inlineEmitLiteralTwoBytes: |
||||||
|
MOVD $0xf0, R1 |
||||||
|
MOVB R1, 0(R8) |
||||||
|
MOVB R4, 1(R8) |
||||||
|
ADD $2, R8, R8 |
||||||
|
B inlineEmitLiteralMemmove |
||||||
|
|
||||||
|
inlineEmitLiteralOneByte: |
||||||
|
LSLW $2, R4, R4 |
||||||
|
MOVB R4, 0(R8) |
||||||
|
ADD $1, R8, R8 |
||||||
|
|
||||||
|
inlineEmitLiteralMemmove: |
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
// |
||||||
|
// copy(dst[i:], lit) |
||||||
|
// |
||||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push |
||||||
|
// R8, R10 and R3 as arguments. |
||||||
|
MOVD R8, 8(RSP) |
||||||
|
MOVD R10, 16(RSP) |
||||||
|
MOVD R3, 24(RSP) |
||||||
|
|
||||||
|
// Finish the "d +=" part of "d += emitLiteral(etc)". |
||||||
|
ADD R3, R8, R8 |
||||||
|
MOVD R7, 80(RSP) |
||||||
|
MOVD R8, 88(RSP) |
||||||
|
MOVD R15, 120(RSP) |
||||||
|
CALL runtime·memmove(SB) |
||||||
|
MOVD 64(RSP), R5 |
||||||
|
MOVD 72(RSP), R6 |
||||||
|
MOVD 80(RSP), R7 |
||||||
|
MOVD 88(RSP), R8 |
||||||
|
MOVD 96(RSP), R9 |
||||||
|
MOVD 120(RSP), R15 |
||||||
|
ADD $128, RSP, R17 |
||||||
|
MOVW $0xa7bd, R16 |
||||||
|
MOVKW $(0x1e35<<16), R16 |
||||||
|
B inner1 |
||||||
|
|
||||||
|
inlineEmitLiteralEnd: |
||||||
|
// End inline of the emitLiteral call. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
emitLiteralFastPath: |
||||||
|
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". |
||||||
|
MOVB R3, R4 |
||||||
|
SUBW $1, R4, R4 |
||||||
|
AND $0xff, R4, R4 |
||||||
|
LSLW $2, R4, R4 |
||||||
|
MOVB R4, (R8) |
||||||
|
ADD $1, R8, R8 |
||||||
|
|
||||||
|
// !!! Implement the copy from lit to dst as a 16-byte load and store. |
||||||
|
// (Encode's documentation says that dst and src must not overlap.) |
||||||
|
// |
||||||
|
// This always copies 16 bytes, instead of only len(lit) bytes, but that's |
||||||
|
// OK. Subsequent iterations will fix up the overrun. |
||||||
|
// |
||||||
|
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or |
||||||
|
// 16-byte loads and stores. This technique probably wouldn't be as |
||||||
|
// effective on architectures that are fussier about alignment. |
||||||
|
LDP 0(R10), (R0, R1) |
||||||
|
STP (R0, R1), 0(R8) |
||||||
|
ADD R3, R8, R8 |
||||||
|
|
||||||
|
inner1: |
||||||
|
// for { etc } |
||||||
|
|
||||||
|
// base := s |
||||||
|
MOVD R7, R12 |
||||||
|
|
||||||
|
// !!! offset := base - candidate |
||||||
|
MOVD R12, R11 |
||||||
|
SUB R15, R11, R11 |
||||||
|
SUB R6, R11, R11 |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// Begin inline of the extendMatch call. |
||||||
|
// |
||||||
|
// s = extendMatch(src, candidate+4, s+4) |
||||||
|
|
||||||
|
// !!! R14 = &src[len(src)] |
||||||
|
MOVD src_len+32(FP), R14 |
||||||
|
ADD R6, R14, R14 |
||||||
|
|
||||||
|
// !!! R13 = &src[len(src) - 8] |
||||||
|
MOVD R14, R13 |
||||||
|
SUB $8, R13, R13 |
||||||
|
|
||||||
|
// !!! R15 = &src[candidate + 4] |
||||||
|
ADD $4, R15, R15 |
||||||
|
ADD R6, R15, R15 |
||||||
|
|
||||||
|
// !!! s += 4 |
||||||
|
ADD $4, R7, R7 |
||||||
|
|
||||||
|
inlineExtendMatchCmp8: |
||||||
|
// As long as we are 8 or more bytes before the end of src, we can load and |
||||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat. |
||||||
|
CMP R13, R7 |
||||||
|
BHI inlineExtendMatchCmp1 |
||||||
|
MOVD (R15), R3 |
||||||
|
MOVD (R7), R4 |
||||||
|
CMP R4, R3 |
||||||
|
BNE inlineExtendMatchBSF |
||||||
|
ADD $8, R15, R15 |
||||||
|
ADD $8, R7, R7 |
||||||
|
B inlineExtendMatchCmp8 |
||||||
|
|
||||||
|
inlineExtendMatchBSF: |
||||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return |
||||||
|
// the index of the first byte that differs. |
||||||
|
// RBIT reverses the bit order, then CLZ counts the leading zeros, the |
||||||
|
// combination of which finds the least significant bit which is set. |
||||||
|
// The arm64 architecture is little-endian, and the shift by 3 converts |
||||||
|
// a bit index to a byte index. |
||||||
|
EOR R3, R4, R4 |
||||||
|
RBIT R4, R4 |
||||||
|
CLZ R4, R4 |
||||||
|
ADD R4>>3, R7, R7 |
||||||
|
B inlineExtendMatchEnd |
||||||
|
|
||||||
|
inlineExtendMatchCmp1: |
||||||
|
// In src's tail, compare 1 byte at a time. |
||||||
|
CMP R7, R14 |
||||||
|
BLS inlineExtendMatchEnd |
||||||
|
MOVB (R15), R3 |
||||||
|
MOVB (R7), R4 |
||||||
|
CMP R4, R3 |
||||||
|
BNE inlineExtendMatchEnd |
||||||
|
ADD $1, R15, R15 |
||||||
|
ADD $1, R7, R7 |
||||||
|
B inlineExtendMatchCmp1 |
||||||
|
|
||||||
|
inlineExtendMatchEnd: |
||||||
|
// End inline of the extendMatch call. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
// ---------------------------------------- |
||||||
|
// Begin inline of the emitCopy call. |
||||||
|
// |
||||||
|
// d += emitCopy(dst[d:], base-candidate, s-base) |
||||||
|
|
||||||
|
// !!! length := s - base |
||||||
|
MOVD R7, R3 |
||||||
|
SUB R12, R3, R3 |
||||||
|
|
||||||
|
inlineEmitCopyLoop0: |
||||||
|
// for length >= 68 { etc } |
||||||
|
MOVW $68, R2 |
||||||
|
CMPW R2, R3 |
||||||
|
BLT inlineEmitCopyStep1 |
||||||
|
|
||||||
|
// Emit a length 64 copy, encoded as 3 bytes. |
||||||
|
MOVD $0xfe, R1 |
||||||
|
MOVB R1, 0(R8) |
||||||
|
MOVW R11, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
SUBW $64, R3, R3 |
||||||
|
B inlineEmitCopyLoop0 |
||||||
|
|
||||||
|
inlineEmitCopyStep1: |
||||||
|
// if length > 64 { etc } |
||||||
|
MOVW $64, R2 |
||||||
|
CMPW R2, R3 |
||||||
|
BLE inlineEmitCopyStep2 |
||||||
|
|
||||||
|
// Emit a length 60 copy, encoded as 3 bytes. |
||||||
|
MOVD $0xee, R1 |
||||||
|
MOVB R1, 0(R8) |
||||||
|
MOVW R11, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
SUBW $60, R3, R3 |
||||||
|
|
||||||
|
inlineEmitCopyStep2: |
||||||
|
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } |
||||||
|
MOVW $12, R2 |
||||||
|
CMPW R2, R3 |
||||||
|
BGE inlineEmitCopyStep3 |
||||||
|
MOVW $2048, R2 |
||||||
|
CMPW R2, R11 |
||||||
|
BGE inlineEmitCopyStep3 |
||||||
|
|
||||||
|
// Emit the remaining copy, encoded as 2 bytes. |
||||||
|
MOVB R11, 1(R8) |
||||||
|
LSRW $8, R11, R11 |
||||||
|
LSLW $5, R11, R11 |
||||||
|
SUBW $4, R3, R3 |
||||||
|
AND $0xff, R3, R3 |
||||||
|
LSLW $2, R3, R3 |
||||||
|
ORRW R3, R11, R11 |
||||||
|
ORRW $1, R11, R11 |
||||||
|
MOVB R11, 0(R8) |
||||||
|
ADD $2, R8, R8 |
||||||
|
B inlineEmitCopyEnd |
||||||
|
|
||||||
|
inlineEmitCopyStep3: |
||||||
|
// Emit the remaining copy, encoded as 3 bytes. |
||||||
|
SUBW $1, R3, R3 |
||||||
|
LSLW $2, R3, R3 |
||||||
|
ORRW $2, R3, R3 |
||||||
|
MOVB R3, 0(R8) |
||||||
|
MOVW R11, 1(R8) |
||||||
|
ADD $3, R8, R8 |
||||||
|
|
||||||
|
inlineEmitCopyEnd: |
||||||
|
// End inline of the emitCopy call. |
||||||
|
// ---------------------------------------- |
||||||
|
|
||||||
|
// nextEmit = s |
||||||
|
MOVD R7, R10 |
||||||
|
|
||||||
|
// if s >= sLimit { goto emitRemainder } |
||||||
|
MOVD R7, R3 |
||||||
|
SUB R6, R3, R3 |
||||||
|
CMP R3, R9 |
||||||
|
BLS emitRemainder |
||||||
|
|
||||||
|
// As per the encode_other.go code: |
||||||
|
// |
||||||
|
// We could immediately etc. |
||||||
|
|
||||||
|
// x := load64(src, s-1) |
||||||
|
MOVD -1(R7), R14 |
||||||
|
|
||||||
|
// prevHash := hash(uint32(x>>0), shift) |
||||||
|
MOVW R14, R11 |
||||||
|
MULW R16, R11, R11 |
||||||
|
LSRW R5, R11, R11 |
||||||
|
|
||||||
|
// table[prevHash] = uint16(s-1) |
||||||
|
MOVD R7, R3 |
||||||
|
SUB R6, R3, R3 |
||||||
|
SUB $1, R3, R3 |
||||||
|
|
||||||
|
MOVHU R3, 0(R17)(R11<<1) |
||||||
|
|
||||||
|
// currHash := hash(uint32(x>>8), shift) |
||||||
|
LSR $8, R14, R14 |
||||||
|
MOVW R14, R11 |
||||||
|
MULW R16, R11, R11 |
||||||
|
LSRW R5, R11, R11 |
||||||
|
|
||||||
|
// candidate = int(table[currHash]) |
||||||
|
MOVHU 0(R17)(R11<<1), R15 |
||||||
|
|
||||||
|
// table[currHash] = uint16(s) |
||||||
|
ADD $1, R3, R3 |
||||||
|
MOVHU R3, 0(R17)(R11<<1) |
||||||
|
|
||||||
|
// if uint32(x>>8) == load32(src, candidate) { continue } |
||||||
|
MOVW (R6)(R15), R4 |
||||||
|
CMPW R4, R14 |
||||||
|
BEQ inner1 |
||||||
|
|
||||||
|
// nextHash = hash(uint32(x>>16), shift) |
||||||
|
LSR $8, R14, R14 |
||||||
|
MOVW R14, R11 |
||||||
|
MULW R16, R11, R11 |
||||||
|
LSRW R5, R11, R11 |
||||||
|
|
||||||
|
// s++ |
||||||
|
ADD $1, R7, R7 |
||||||
|
|
||||||
|
// break out of the inner1 for loop, i.e. continue the outer loop. |
||||||
|
B outer |
||||||
|
|
||||||
|
emitRemainder: |
||||||
|
// if nextEmit < len(src) { etc } |
||||||
|
MOVD src_len+32(FP), R3 |
||||||
|
ADD R6, R3, R3 |
||||||
|
CMP R3, R10 |
||||||
|
BEQ encodeBlockEnd |
||||||
|
|
||||||
|
// d += emitLiteral(dst[d:], src[nextEmit:]) |
||||||
|
// |
||||||
|
// Push args. |
||||||
|
MOVD R8, 8(RSP) |
||||||
|
MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. |
||||||
|
MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. |
||||||
|
MOVD R10, 32(RSP) |
||||||
|
SUB R10, R3, R3 |
||||||
|
MOVD R3, 40(RSP) |
||||||
|
MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. |
||||||
|
|
||||||
|
// Spill local variables (registers) onto the stack; call; unspill.
|
||||||
|
MOVD R8, 88(RSP) |
||||||
|
CALL ·emitLiteral(SB) |
||||||
|
MOVD 88(RSP), R8 |
||||||
|
|
||||||
|
// Finish the "d +=" part of "d += emitLiteral(etc)". |
||||||
|
MOVD 56(RSP), R1 |
||||||
|
ADD R1, R8, R8 |
||||||
|
|
||||||
|
encodeBlockEnd: |
||||||
|
MOVD dst_base+0(FP), R3 |
||||||
|
SUB R3, R8, R8 |
||||||
|
MOVD R8, d+48(FP) |
||||||
|
RET |
@ -0,0 +1,30 @@ |
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
// +build amd64 arm64
|
||||||
|
|
||||||
|
package snappy |
||||||
|
|
||||||
|
// emitLiteral has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func emitLiteral(dst, lit []byte) int |
||||||
|
|
||||||
|
// emitCopy has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func emitCopy(dst []byte, offset, length int) int |
||||||
|
|
||||||
|
// extendMatch has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func extendMatch(src []byte, i, j int) int |
||||||
|
|
||||||
|
// encodeBlock has the same semantics as in encode_other.go.
|
||||||
|
//
|
||||||
|
//go:noescape
|
||||||
|
func encodeBlock(dst, src []byte) (d int) |
@ -0,0 +1,238 @@ |
|||||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64,!arm64 appengine !gc noasm
|
||||||
|
|
||||||
|
package snappy |
||||||
|
|
||||||
|
func load32(b []byte, i int) uint32 { |
||||||
|
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||||
|
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 |
||||||
|
} |
||||||
|
|
||||||
|
func load64(b []byte, i int) uint64 { |
||||||
|
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||||
|
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | |
||||||
|
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 |
||||||
|
} |
||||||
|
|
||||||
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 1 <= len(lit) && len(lit) <= 65536
|
||||||
|
func emitLiteral(dst, lit []byte) int { |
||||||
|
i, n := 0, uint(len(lit)-1) |
||||||
|
switch { |
||||||
|
case n < 60: |
||||||
|
dst[0] = uint8(n)<<2 | tagLiteral |
||||||
|
i = 1 |
||||||
|
case n < 1<<8: |
||||||
|
dst[0] = 60<<2 | tagLiteral |
||||||
|
dst[1] = uint8(n) |
||||||
|
i = 2 |
||||||
|
default: |
||||||
|
dst[0] = 61<<2 | tagLiteral |
||||||
|
dst[1] = uint8(n) |
||||||
|
dst[2] = uint8(n >> 8) |
||||||
|
i = 3 |
||||||
|
} |
||||||
|
return i + copy(dst[i:], lit) |
||||||
|
} |
||||||
|
|
||||||
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// dst is long enough to hold the encoded bytes
|
||||||
|
// 1 <= offset && offset <= 65535
|
||||||
|
// 4 <= length && length <= 65535
|
||||||
|
func emitCopy(dst []byte, offset, length int) int { |
||||||
|
i := 0 |
||||||
|
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||||
|
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||||
|
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||||
|
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||||
|
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||||
|
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||||
|
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
|
||||||
|
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
|
||||||
|
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
|
||||||
|
for length >= 68 { |
||||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = 63<<2 | tagCopy2 |
||||||
|
dst[i+1] = uint8(offset) |
||||||
|
dst[i+2] = uint8(offset >> 8) |
||||||
|
i += 3 |
||||||
|
length -= 64 |
||||||
|
} |
||||||
|
if length > 64 { |
||||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = 59<<2 | tagCopy2 |
||||||
|
dst[i+1] = uint8(offset) |
||||||
|
dst[i+2] = uint8(offset >> 8) |
||||||
|
i += 3 |
||||||
|
length -= 60 |
||||||
|
} |
||||||
|
if length >= 12 || offset >= 2048 { |
||||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||||
|
dst[i+0] = uint8(length-1)<<2 | tagCopy2 |
||||||
|
dst[i+1] = uint8(offset) |
||||||
|
dst[i+2] = uint8(offset >> 8) |
||||||
|
return i + 3 |
||||||
|
} |
||||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||||
|
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 |
||||||
|
dst[i+1] = uint8(offset) |
||||||
|
return i + 2 |
||||||
|
} |
||||||
|
|
||||||
|
// extendMatch returns the largest k such that k <= len(src) and that
|
||||||
|
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||||
|
//
|
||||||
|
// It assumes that:
|
||||||
|
// 0 <= i && i < j && j <= len(src)
|
||||||
|
func extendMatch(src []byte, i, j int) int { |
||||||
|
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { |
||||||
|
} |
||||||
|
return j |
||||||
|
} |
||||||
|
|
||||||
|
func hash(u, shift uint32) uint32 { |
||||||
|
return (u * 0x1e35a7bd) >> shift |
||||||
|
} |
||||||
|
|
||||||
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
|
// been written.
|
||||||
|
//
|
||||||
|
// It also assumes that:
|
||||||
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||||
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||||
|
func encodeBlock(dst, src []byte) (d int) { |
||||||
|
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||||
|
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||||
|
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
|
||||||
|
const ( |
||||||
|
maxTableSize = 1 << 14 |
||||||
|
// tableMask is redundant, but helps the compiler eliminate bounds
|
||||||
|
// checks.
|
||||||
|
tableMask = maxTableSize - 1 |
||||||
|
) |
||||||
|
shift := uint32(32 - 8) |
||||||
|
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { |
||||||
|
shift-- |
||||||
|
} |
||||||
|
// In Go, all array elements are zero-initialized, so there is no advantage
|
||||||
|
// to a smaller tableSize per se. However, it matches the C++ algorithm,
|
||||||
|
// and in the asm versions of this code, we can get away with zeroing only
|
||||||
|
// the first tableSize elements.
|
||||||
|
var table [maxTableSize]uint16 |
||||||
|
|
||||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||||
|
// looking for copies.
|
||||||
|
sLimit := len(src) - inputMargin |
||||||
|
|
||||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
|
nextEmit := 0 |
||||||
|
|
||||||
|
// The encoded form must start with a literal, as there are no previous
|
||||||
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||||
|
s := 1 |
||||||
|
nextHash := hash(load32(src, s), shift) |
||||||
|
|
||||||
|
for { |
||||||
|
// Copied from the C++ snappy implementation:
|
||||||
|
//
|
||||||
|
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||||
|
// found, start looking only at every other byte. If 32 more bytes are
|
||||||
|
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||||
|
// is found, immediately go back to looking at every byte. This is a
|
||||||
|
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||||
|
// due to more bookkeeping, but for non-compressible data (such as
|
||||||
|
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||||
|
// data is incompressible and doesn't bother looking for matches
|
||||||
|
// everywhere.
|
||||||
|
//
|
||||||
|
// The "skip" variable keeps track of how many bytes there are since
|
||||||
|
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||||
|
// the number of bytes to move ahead for each iteration.
|
||||||
|
skip := 32 |
||||||
|
|
||||||
|
nextS := s |
||||||
|
candidate := 0 |
||||||
|
for { |
||||||
|
s = nextS |
||||||
|
bytesBetweenHashLookups := skip >> 5 |
||||||
|
nextS = s + bytesBetweenHashLookups |
||||||
|
skip += bytesBetweenHashLookups |
||||||
|
if nextS > sLimit { |
||||||
|
goto emitRemainder |
||||||
|
} |
||||||
|
candidate = int(table[nextHash&tableMask]) |
||||||
|
table[nextHash&tableMask] = uint16(s) |
||||||
|
nextHash = hash(load32(src, nextS), shift) |
||||||
|
if load32(src, s) == load32(src, candidate) { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||||
|
// them as literal bytes.
|
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:s]) |
||||||
|
|
||||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||||
|
// move. Repeat until we find no match for the input immediately after
|
||||||
|
// what was consumed by the last emitCopy call.
|
||||||
|
//
|
||||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||||
|
for { |
||||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||||
|
// literal bytes prior to s.
|
||||||
|
base := s |
||||||
|
|
||||||
|
// Extend the 4-byte match as long as possible.
|
||||||
|
//
|
||||||
|
// This is an inlined version of:
|
||||||
|
// s = extendMatch(src, candidate+4, s+4)
|
||||||
|
s += 4 |
||||||
|
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { |
||||||
|
} |
||||||
|
|
||||||
|
d += emitCopy(dst[d:], base-candidate, s-base) |
||||||
|
nextEmit = s |
||||||
|
if s >= sLimit { |
||||||
|
goto emitRemainder |
||||||
|
} |
||||||
|
|
||||||
|
// We could immediately start working at s now, but to improve
|
||||||
|
// compression we first update the hash table at s-1 and at s. If
|
||||||
|
// another emitCopy is not our next move, also calculate nextHash
|
||||||
|
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||||
|
// are faster as one load64 call (with some shifts) instead of
|
||||||
|
// three load32 calls.
|
||||||
|
x := load64(src, s-1) |
||||||
|
prevHash := hash(uint32(x>>0), shift) |
||||||
|
table[prevHash&tableMask] = uint16(s - 1) |
||||||
|
currHash := hash(uint32(x>>8), shift) |
||||||
|
candidate = int(table[currHash&tableMask]) |
||||||
|
table[currHash&tableMask] = uint16(s) |
||||||
|
if uint32(x>>8) != load32(src, candidate) { |
||||||
|
nextHash = hash(uint32(x>>16), shift) |
||||||
|
s++ |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
emitRemainder: |
||||||
|
if nextEmit < len(src) { |
||||||
|
d += emitLiteral(dst[d:], src[nextEmit:]) |
||||||
|
} |
||||||
|
return d |
||||||
|
} |
@ -0,0 +1,98 @@ |
|||||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package snappy implements the Snappy compression format. It aims for very
|
||||||
|
// high speeds and reasonable compression.
|
||||||
|
//
|
||||||
|
// There are actually two Snappy formats: block and stream. They are related,
|
||||||
|
// but different: trying to decompress block-compressed data as a Snappy stream
|
||||||
|
// will fail, and vice versa. The block format is the Decode and Encode
|
||||||
|
// functions and the stream format is the Reader and Writer types.
|
||||||
|
//
|
||||||
|
// The block format, the more common case, is used when the complete size (the
|
||||||
|
// number of bytes) of the original data is known upfront, at the time
|
||||||
|
// compression starts. The stream format, also known as the framing format, is
|
||||||
|
// for when that isn't always true.
|
||||||
|
//
|
||||||
|
// The canonical, C++ implementation is at https://github.com/google/snappy and
|
||||||
|
// it only implements the block format.
|
||||||
|
package snappy // import "github.com/golang/snappy"
|
||||||
|
|
||||||
|
import ( |
||||||
|
"hash/crc32" |
||||||
|
) |
||||||
|
|
||||||
|
/* |
||||||
|
Each encoded block begins with the varint-encoded length of the decoded data, |
||||||
|
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The |
||||||
|
first byte of each chunk is broken into its 2 least and 6 most significant bits |
||||||
|
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. |
||||||
|
Zero means a literal tag. All other values mean a copy tag. |
||||||
|
|
||||||
|
For literal tags: |
||||||
|
- If m < 60, the next 1 + m bytes are literal bytes. |
||||||
|
- Otherwise, let n be the little-endian unsigned integer denoted by the next |
||||||
|
m - 59 bytes. The next 1 + n bytes after that are literal bytes. |
||||||
|
|
||||||
|
For copy tags, length bytes are copied from offset bytes ago, in the style of |
||||||
|
Lempel-Ziv compression algorithms. In particular: |
||||||
|
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). |
||||||
|
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 |
||||||
|
of the offset. The next byte is bits 0-7 of the offset. |
||||||
|
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). |
||||||
|
The length is 1 + m. The offset is the little-endian unsigned integer |
||||||
|
denoted by the next 2 bytes. |
||||||
|
- For l == 3, this tag is a legacy format that is no longer issued by most |
||||||
|
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in |
||||||
|
[1, 65). The length is 1 + m. The offset is the little-endian unsigned |
||||||
|
integer denoted by the next 4 bytes. |
||||||
|
*/ |
||||||
|
const ( |
||||||
|
tagLiteral = 0x00 |
||||||
|
tagCopy1 = 0x01 |
||||||
|
tagCopy2 = 0x02 |
||||||
|
tagCopy4 = 0x03 |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
checksumSize = 4 |
||||||
|
chunkHeaderSize = 4 |
||||||
|
magicChunk = "\xff\x06\x00\x00" + magicBody |
||||||
|
magicBody = "sNaPpY" |
||||||
|
|
||||||
|
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||||
|
// part of the wire format per se, but some parts of the encoder assume
|
||||||
|
// that an offset fits into a uint16.
|
||||||
|
//
|
||||||
|
// Also, for the framing format (Writer type instead of Encode function),
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||||
|
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||||
|
// bytes".
|
||||||
|
maxBlockSize = 65536 |
||||||
|
|
||||||
|
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||||
|
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||||
|
// be a const. Their equivalence is confirmed by
|
||||||
|
// TestMaxEncodedLenOfMaxBlockSize.
|
||||||
|
maxEncodedLenOfMaxBlockSize = 76490 |
||||||
|
|
||||||
|
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize |
||||||
|
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
chunkTypeCompressedData = 0x00 |
||||||
|
chunkTypeUncompressedData = 0x01 |
||||||
|
chunkTypePadding = 0xfe |
||||||
|
chunkTypeStreamIdentifier = 0xff |
||||||
|
) |
||||||
|
|
||||||
|
var crcTable = crc32.MakeTable(crc32.Castagnoli) |
||||||
|
|
||||||
|
// crc implements the checksum specified in section 3 of
|
||||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||||
|
func crc(b []byte) uint32 { |
||||||
|
c := crc32.Update(0, crcTable, b) |
||||||
|
return uint32(c>>15|c<<17) + 0xa282ead8 |
||||||
|
} |
@ -0,0 +1,9 @@ |
|||||||
|
language: go |
||||||
|
|
||||||
|
go: |
||||||
|
- 1.4.3 |
||||||
|
- 1.5.3 |
||||||
|
- tip |
||||||
|
|
||||||
|
script: |
||||||
|
- go test -v ./... |
@ -0,0 +1,10 @@ |
|||||||
|
# How to contribute |
||||||
|
|
||||||
|
We definitely welcome patches and contribution to this project! |
||||||
|
|
||||||
|
### Legal requirements |
||||||
|
|
||||||
|
In order to protect both you and ourselves, you will need to sign the |
||||||
|
[Contributor License Agreement](https://cla.developers.google.com/clas). |
||||||
|
|
||||||
|
You may have already signed it for other Google projects. |
@ -0,0 +1,9 @@ |
|||||||
|
Paul Borman <borman@google.com> |
||||||
|
bmatsuo |
||||||
|
shawnps |
||||||
|
theory |
||||||
|
jboverfelt |
||||||
|
dsymonds |
||||||
|
cd1 |
||||||
|
wallclockbuilder |
||||||
|
dansouza |
@ -0,0 +1,27 @@ |
|||||||
|
Copyright (c) 2009,2014 Google Inc. All rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are |
||||||
|
met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright |
||||||
|
notice, this list of conditions and the following disclaimer. |
||||||
|
* Redistributions in binary form must reproduce the above |
||||||
|
copyright notice, this list of conditions and the following disclaimer |
||||||
|
in the documentation and/or other materials provided with the |
||||||
|
distribution. |
||||||
|
* Neither the name of Google Inc. nor the names of its |
||||||
|
contributors may be used to endorse or promote products derived from |
||||||
|
this software without specific prior written permission. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,19 @@ |
|||||||
|
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) |
||||||
|
The uuid package generates and inspects UUIDs based on |
||||||
|
[RFC 4122](http://tools.ietf.org/html/rfc4122) |
||||||
|
and DCE 1.1: Authentication and Security Services. |
||||||
|
|
||||||
|
This package is based on the github.com/pborman/uuid package (previously named |
||||||
|
code.google.com/p/go-uuid). It differs from these earlier packages in that |
||||||
|
a UUID is a 16 byte array rather than a byte slice. One loss due to this |
||||||
|
change is the ability to represent an invalid UUID (vs a NIL UUID). |
||||||
|
|
||||||
|
###### Install |
||||||
|
`go get github.com/google/uuid` |
||||||
|
|
||||||
|
###### Documentation |
||||||
|
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) |
||||||
|
|
||||||
|
Full `go doc` style documentation for the package can be viewed online without |
||||||
|
installing this package by using the GoDoc site here: |
||||||
|
http://pkg.go.dev/github.com/google/uuid |
@ -0,0 +1,80 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
"fmt" |
||||||
|
"os" |
||||||
|
) |
||||||
|
|
||||||
|
// A Domain represents a Version 2 domain
|
||||||
|
type Domain byte |
||||||
|
|
||||||
|
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||||
|
const ( |
||||||
|
Person = Domain(0) |
||||||
|
Group = Domain(1) |
||||||
|
Org = Domain(2) |
||||||
|
) |
||||||
|
|
||||||
|
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||||
|
//
|
||||||
|
// The domain should be one of Person, Group or Org.
|
||||||
|
// On a POSIX system the id should be the users UID for the Person
|
||||||
|
// domain and the users GID for the Group. The meaning of id for
|
||||||
|
// the domain Org or on non-POSIX systems is site defined.
|
||||||
|
//
|
||||||
|
// For a given domain/id pair the same token may be returned for up to
|
||||||
|
// 7 minutes and 10 seconds.
|
||||||
|
func NewDCESecurity(domain Domain, id uint32) (UUID, error) { |
||||||
|
uuid, err := NewUUID() |
||||||
|
if err == nil { |
||||||
|
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||||
|
uuid[9] = byte(domain) |
||||||
|
binary.BigEndian.PutUint32(uuid[0:], id) |
||||||
|
} |
||||||
|
return uuid, err |
||||||
|
} |
||||||
|
|
||||||
|
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||||
|
// domain with the id returned by os.Getuid.
|
||||||
|
//
|
||||||
|
// NewDCESecurity(Person, uint32(os.Getuid()))
|
||||||
|
func NewDCEPerson() (UUID, error) { |
||||||
|
return NewDCESecurity(Person, uint32(os.Getuid())) |
||||||
|
} |
||||||
|
|
||||||
|
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||||
|
// domain with the id returned by os.Getgid.
|
||||||
|
//
|
||||||
|
// NewDCESecurity(Group, uint32(os.Getgid()))
|
||||||
|
func NewDCEGroup() (UUID, error) { |
||||||
|
return NewDCESecurity(Group, uint32(os.Getgid())) |
||||||
|
} |
||||||
|
|
||||||
|
// Domain returns the domain for a Version 2 UUID. Domains are only defined
|
||||||
|
// for Version 2 UUIDs.
|
||||||
|
func (uuid UUID) Domain() Domain { |
||||||
|
return Domain(uuid[9]) |
||||||
|
} |
||||||
|
|
||||||
|
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
|
||||||
|
// UUIDs.
|
||||||
|
func (uuid UUID) ID() uint32 { |
||||||
|
return binary.BigEndian.Uint32(uuid[0:4]) |
||||||
|
} |
||||||
|
|
||||||
|
func (d Domain) String() string { |
||||||
|
switch d { |
||||||
|
case Person: |
||||||
|
return "Person" |
||||||
|
case Group: |
||||||
|
return "Group" |
||||||
|
case Org: |
||||||
|
return "Org" |
||||||
|
} |
||||||
|
return fmt.Sprintf("Domain%d", int(d)) |
||||||
|
} |
@ -0,0 +1,12 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package uuid generates and inspects UUIDs.
|
||||||
|
//
|
||||||
|
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
|
||||||
|
// Services.
|
||||||
|
//
|
||||||
|
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
|
||||||
|
// maps or compared directly.
|
||||||
|
package uuid |
@ -0,0 +1,53 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"crypto/md5" |
||||||
|
"crypto/sha1" |
||||||
|
"hash" |
||||||
|
) |
||||||
|
|
||||||
|
// Well known namespace IDs and UUIDs
|
||||||
|
var ( |
||||||
|
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) |
||||||
|
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) |
||||||
|
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) |
||||||
|
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) |
||||||
|
Nil UUID // empty UUID, all zeros
|
||||||
|
) |
||||||
|
|
||||||
|
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||||
|
// data generated by h. The hash should be at least 16 byte in length. The
|
||||||
|
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||||
|
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||||
|
// NewMD5 and NewSHA1.
|
||||||
|
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { |
||||||
|
h.Reset() |
||||||
|
h.Write(space[:]) //nolint:errcheck
|
||||||
|
h.Write(data) //nolint:errcheck
|
||||||
|
s := h.Sum(nil) |
||||||
|
var uuid UUID |
||||||
|
copy(uuid[:], s) |
||||||
|
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) |
||||||
|
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||||
|
return uuid |
||||||
|
} |
||||||
|
|
||||||
|
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||||
|
// supplied name space and data. It is the same as calling:
|
||||||
|
//
|
||||||
|
// NewHash(md5.New(), space, data, 3)
|
||||||
|
func NewMD5(space UUID, data []byte) UUID { |
||||||
|
return NewHash(md5.New(), space, data, 3) |
||||||
|
} |
||||||
|
|
||||||
|
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||||
|
// supplied name space and data. It is the same as calling:
|
||||||
|
//
|
||||||
|
// NewHash(sha1.New(), space, data, 5)
|
||||||
|
func NewSHA1(space UUID, data []byte) UUID { |
||||||
|
return NewHash(sha1.New(), space, data, 5) |
||||||
|
} |
@ -0,0 +1,38 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import "fmt" |
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler.
|
||||||
|
func (uuid UUID) MarshalText() ([]byte, error) { |
||||||
|
var js [36]byte |
||||||
|
encodeHex(js[:], uuid) |
||||||
|
return js[:], nil |
||||||
|
} |
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (uuid *UUID) UnmarshalText(data []byte) error { |
||||||
|
id, err := ParseBytes(data) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
*uuid = id |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||||
|
func (uuid UUID) MarshalBinary() ([]byte, error) { |
||||||
|
return uuid[:], nil |
||||||
|
} |
||||||
|
|
||||||
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||||
|
func (uuid *UUID) UnmarshalBinary(data []byte) error { |
||||||
|
if len(data) != 16 { |
||||||
|
return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) |
||||||
|
} |
||||||
|
copy(uuid[:], data) |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,90 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"sync" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
nodeMu sync.Mutex |
||||||
|
ifname string // name of interface being used
|
||||||
|
nodeID [6]byte // hardware for version 1 UUIDs
|
||||||
|
zeroID [6]byte // nodeID with only 0's
|
||||||
|
) |
||||||
|
|
||||||
|
// NodeInterface returns the name of the interface from which the NodeID was
|
||||||
|
// derived. The interface "user" is returned if the NodeID was set by
|
||||||
|
// SetNodeID.
|
||||||
|
func NodeInterface() string { |
||||||
|
defer nodeMu.Unlock() |
||||||
|
nodeMu.Lock() |
||||||
|
return ifname |
||||||
|
} |
||||||
|
|
||||||
|
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||||
|
// If name is "" then the first usable interface found will be used or a random
|
||||||
|
// Node ID will be generated. If a named interface cannot be found then false
|
||||||
|
// is returned.
|
||||||
|
//
|
||||||
|
// SetNodeInterface never fails when name is "".
|
||||||
|
func SetNodeInterface(name string) bool { |
||||||
|
defer nodeMu.Unlock() |
||||||
|
nodeMu.Lock() |
||||||
|
return setNodeInterface(name) |
||||||
|
} |
||||||
|
|
||||||
|
func setNodeInterface(name string) bool { |
||||||
|
iname, addr := getHardwareInterface(name) // null implementation for js
|
||||||
|
if iname != "" && addr != nil { |
||||||
|
ifname = iname |
||||||
|
copy(nodeID[:], addr) |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// We found no interfaces with a valid hardware address. If name
|
||||||
|
// does not specify a specific interface generate a random Node ID
|
||||||
|
// (section 4.1.6)
|
||||||
|
if name == "" { |
||||||
|
ifname = "random" |
||||||
|
randomBits(nodeID[:]) |
||||||
|
return true |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||||
|
// if not already set.
|
||||||
|
func NodeID() []byte { |
||||||
|
defer nodeMu.Unlock() |
||||||
|
nodeMu.Lock() |
||||||
|
if nodeID == zeroID { |
||||||
|
setNodeInterface("") |
||||||
|
} |
||||||
|
nid := nodeID |
||||||
|
return nid[:] |
||||||
|
} |
||||||
|
|
||||||
|
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||||
|
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||||
|
// Node ID is not set.
|
||||||
|
func SetNodeID(id []byte) bool { |
||||||
|
if len(id) < 6 { |
||||||
|
return false |
||||||
|
} |
||||||
|
defer nodeMu.Unlock() |
||||||
|
nodeMu.Lock() |
||||||
|
copy(nodeID[:], id) |
||||||
|
ifname = "user" |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||||
|
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||||
|
func (uuid UUID) NodeID() []byte { |
||||||
|
var node [6]byte |
||||||
|
copy(node[:], uuid[10:]) |
||||||
|
return node[:] |
||||||
|
} |
@ -0,0 +1,12 @@ |
|||||||
|
// Copyright 2017 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build js
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
// getHardwareInterface returns nil values for the JS version of the code.
|
||||||
|
// This remvoves the "net" dependency, because it is not used in the browser.
|
||||||
|
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||||
|
func getHardwareInterface(name string) (string, []byte) { return "", nil } |
@ -0,0 +1,33 @@ |
|||||||
|
// Copyright 2017 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !js
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import "net" |
||||||
|
|
||||||
|
var interfaces []net.Interface // cached list of interfaces
|
||||||
|
|
||||||
|
// getHardwareInterface returns the name and hardware address of interface name.
|
||||||
|
// If name is "" then the name and hardware address of one of the system's
|
||||||
|
// interfaces is returned. If no interfaces are found (name does not exist or
|
||||||
|
// there are no interfaces) then "", nil is returned.
|
||||||
|
//
|
||||||
|
// Only addresses of at least 6 bytes are returned.
|
||||||
|
func getHardwareInterface(name string) (string, []byte) { |
||||||
|
if interfaces == nil { |
||||||
|
var err error |
||||||
|
interfaces, err = net.Interfaces() |
||||||
|
if err != nil { |
||||||
|
return "", nil |
||||||
|
} |
||||||
|
} |
||||||
|
for _, ifs := range interfaces { |
||||||
|
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { |
||||||
|
return ifs.Name, ifs.HardwareAddr |
||||||
|
} |
||||||
|
} |
||||||
|
return "", nil |
||||||
|
} |
@ -0,0 +1,118 @@ |
|||||||
|
// Copyright 2021 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"database/sql/driver" |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
) |
||||||
|
|
||||||
|
var jsonNull = []byte("null") |
||||||
|
|
||||||
|
// NullUUID represents a UUID that may be null.
|
||||||
|
// NullUUID implements the SQL driver.Scanner interface so
|
||||||
|
// it can be used as a scan destination:
|
||||||
|
//
|
||||||
|
// var u uuid.NullUUID
|
||||||
|
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
|
||||||
|
// ...
|
||||||
|
// if u.Valid {
|
||||||
|
// // use u.UUID
|
||||||
|
// } else {
|
||||||
|
// // NULL value
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type NullUUID struct { |
||||||
|
UUID UUID |
||||||
|
Valid bool // Valid is true if UUID is not NULL
|
||||||
|
} |
||||||
|
|
||||||
|
// Scan implements the SQL driver.Scanner interface.
|
||||||
|
func (nu *NullUUID) Scan(value interface{}) error { |
||||||
|
if value == nil { |
||||||
|
nu.UUID, nu.Valid = Nil, false |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
err := nu.UUID.Scan(value) |
||||||
|
if err != nil { |
||||||
|
nu.Valid = false |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
nu.Valid = true |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Value implements the driver Valuer interface.
|
||||||
|
func (nu NullUUID) Value() (driver.Value, error) { |
||||||
|
if !nu.Valid { |
||||||
|
return nil, nil |
||||||
|
} |
||||||
|
// Delegate to UUID Value function
|
||||||
|
return nu.UUID.Value() |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||||
|
func (nu NullUUID) MarshalBinary() ([]byte, error) { |
||||||
|
if nu.Valid { |
||||||
|
return nu.UUID[:], nil |
||||||
|
} |
||||||
|
|
||||||
|
return []byte(nil), nil |
||||||
|
} |
||||||
|
|
||||||
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||||
|
func (nu *NullUUID) UnmarshalBinary(data []byte) error { |
||||||
|
if len(data) != 16 { |
||||||
|
return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) |
||||||
|
} |
||||||
|
copy(nu.UUID[:], data) |
||||||
|
nu.Valid = true |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalText implements encoding.TextMarshaler.
|
||||||
|
func (nu NullUUID) MarshalText() ([]byte, error) { |
||||||
|
if nu.Valid { |
||||||
|
return nu.UUID.MarshalText() |
||||||
|
} |
||||||
|
|
||||||
|
return jsonNull, nil |
||||||
|
} |
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (nu *NullUUID) UnmarshalText(data []byte) error { |
||||||
|
id, err := ParseBytes(data) |
||||||
|
if err != nil { |
||||||
|
nu.Valid = false |
||||||
|
return err |
||||||
|
} |
||||||
|
nu.UUID = id |
||||||
|
nu.Valid = true |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
func (nu NullUUID) MarshalJSON() ([]byte, error) { |
||||||
|
if nu.Valid { |
||||||
|
return json.Marshal(nu.UUID) |
||||||
|
} |
||||||
|
|
||||||
|
return jsonNull, nil |
||||||
|
} |
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler.
|
||||||
|
func (nu *NullUUID) UnmarshalJSON(data []byte) error { |
||||||
|
if bytes.Equal(data, jsonNull) { |
||||||
|
*nu = NullUUID{} |
||||||
|
return nil // valid null UUID
|
||||||
|
} |
||||||
|
err := json.Unmarshal(data, &nu.UUID) |
||||||
|
nu.Valid = err == nil |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,59 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"database/sql/driver" |
||||||
|
"fmt" |
||||||
|
) |
||||||
|
|
||||||
|
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||||
|
// Currently, database types that map to string and []byte are supported. Please
|
||||||
|
// consult database-specific driver documentation for matching types.
|
||||||
|
func (uuid *UUID) Scan(src interface{}) error { |
||||||
|
switch src := src.(type) { |
||||||
|
case nil: |
||||||
|
return nil |
||||||
|
|
||||||
|
case string: |
||||||
|
// if an empty UUID comes from a table, we return a null UUID
|
||||||
|
if src == "" { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// see Parse for required string format
|
||||||
|
u, err := Parse(src) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("Scan: %v", err) |
||||||
|
} |
||||||
|
|
||||||
|
*uuid = u |
||||||
|
|
||||||
|
case []byte: |
||||||
|
// if an empty UUID comes from a table, we return a null UUID
|
||||||
|
if len(src) == 0 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// assumes a simple slice of bytes if 16 bytes
|
||||||
|
// otherwise attempts to parse
|
||||||
|
if len(src) != 16 { |
||||||
|
return uuid.Scan(string(src)) |
||||||
|
} |
||||||
|
copy((*uuid)[:], src) |
||||||
|
|
||||||
|
default: |
||||||
|
return fmt.Errorf("Scan: unable to scan type %T into UUID", src) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Value implements sql.Valuer so that UUIDs can be written to databases
|
||||||
|
// transparently. Currently, UUIDs map to strings. Please consult
|
||||||
|
// database-specific driver documentation for matching types.
|
||||||
|
func (uuid UUID) Value() (driver.Value, error) { |
||||||
|
return uuid.String(), nil |
||||||
|
} |
@ -0,0 +1,123 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||||
|
// 1582.
|
||||||
|
type Time int64 |
||||||
|
|
||||||
|
const ( |
||||||
|
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||||
|
unix = 2440587 // Julian day of 1 Jan 1970
|
||||||
|
epoch = unix - lillian // Days between epochs
|
||||||
|
g1582 = epoch * 86400 // seconds between epochs
|
||||||
|
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
timeMu sync.Mutex |
||||||
|
lasttime uint64 // last time we returned
|
||||||
|
clockSeq uint16 // clock sequence for this run
|
||||||
|
|
||||||
|
timeNow = time.Now // for testing
|
||||||
|
) |
||||||
|
|
||||||
|
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||||
|
// epoch of 1 Jan 1970.
|
||||||
|
func (t Time) UnixTime() (sec, nsec int64) { |
||||||
|
sec = int64(t - g1582ns100) |
||||||
|
nsec = (sec % 10000000) * 100 |
||||||
|
sec /= 10000000 |
||||||
|
return sec, nsec |
||||||
|
} |
||||||
|
|
||||||
|
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||||
|
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||||
|
// is returned if the current time cannot be determined.
|
||||||
|
func GetTime() (Time, uint16, error) { |
||||||
|
defer timeMu.Unlock() |
||||||
|
timeMu.Lock() |
||||||
|
return getTime() |
||||||
|
} |
||||||
|
|
||||||
|
func getTime() (Time, uint16, error) { |
||||||
|
t := timeNow() |
||||||
|
|
||||||
|
// If we don't have a clock sequence already, set one.
|
||||||
|
if clockSeq == 0 { |
||||||
|
setClockSequence(-1) |
||||||
|
} |
||||||
|
now := uint64(t.UnixNano()/100) + g1582ns100 |
||||||
|
|
||||||
|
// If time has gone backwards with this clock sequence then we
|
||||||
|
// increment the clock sequence
|
||||||
|
if now <= lasttime { |
||||||
|
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 |
||||||
|
} |
||||||
|
lasttime = now |
||||||
|
return Time(now), clockSeq, nil |
||||||
|
} |
||||||
|
|
||||||
|
// ClockSequence returns the current clock sequence, generating one if not
|
||||||
|
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||||
|
//
|
||||||
|
// The uuid package does not use global static storage for the clock sequence or
|
||||||
|
// the last time a UUID was generated. Unless SetClockSequence is used, a new
|
||||||
|
// random clock sequence is generated the first time a clock sequence is
|
||||||
|
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
|
||||||
|
func ClockSequence() int { |
||||||
|
defer timeMu.Unlock() |
||||||
|
timeMu.Lock() |
||||||
|
return clockSequence() |
||||||
|
} |
||||||
|
|
||||||
|
func clockSequence() int { |
||||||
|
if clockSeq == 0 { |
||||||
|
setClockSequence(-1) |
||||||
|
} |
||||||
|
return int(clockSeq & 0x3fff) |
||||||
|
} |
||||||
|
|
||||||
|
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||||
|
// -1 causes a new sequence to be generated.
|
||||||
|
func SetClockSequence(seq int) { |
||||||
|
defer timeMu.Unlock() |
||||||
|
timeMu.Lock() |
||||||
|
setClockSequence(seq) |
||||||
|
} |
||||||
|
|
||||||
|
func setClockSequence(seq int) { |
||||||
|
if seq == -1 { |
||||||
|
var b [2]byte |
||||||
|
randomBits(b[:]) // clock sequence
|
||||||
|
seq = int(b[0])<<8 | int(b[1]) |
||||||
|
} |
||||||
|
oldSeq := clockSeq |
||||||
|
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||||
|
if oldSeq != clockSeq { |
||||||
|
lasttime = 0 |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||||
|
// uuid. The time is only defined for version 1 and 2 UUIDs.
|
||||||
|
func (uuid UUID) Time() Time { |
||||||
|
time := int64(binary.BigEndian.Uint32(uuid[0:4])) |
||||||
|
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 |
||||||
|
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 |
||||||
|
return Time(time) |
||||||
|
} |
||||||
|
|
||||||
|
// ClockSequence returns the clock sequence encoded in uuid.
|
||||||
|
// The clock sequence is only well defined for version 1 and 2 UUIDs.
|
||||||
|
func (uuid UUID) ClockSequence() int { |
||||||
|
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff |
||||||
|
} |
@ -0,0 +1,43 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"io" |
||||||
|
) |
||||||
|
|
||||||
|
// randomBits completely fills slice b with random data.
|
||||||
|
func randomBits(b []byte) { |
||||||
|
if _, err := io.ReadFull(rander, b); err != nil { |
||||||
|
panic(err.Error()) // rand should never fail
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||||
|
var xvalues = [256]byte{ |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, |
||||||
|
} |
||||||
|
|
||||||
|
// xtob converts hex characters x1 and x2 into a byte.
|
||||||
|
func xtob(x1, x2 byte) (byte, bool) { |
||||||
|
b1 := xvalues[x1] |
||||||
|
b2 := xvalues[x2] |
||||||
|
return (b1 << 4) | b2, b1 != 255 && b2 != 255 |
||||||
|
} |
@ -0,0 +1,294 @@ |
|||||||
|
// Copyright 2018 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"crypto/rand" |
||||||
|
"encoding/hex" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
) |
||||||
|
|
||||||
|
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||||
|
// 4122.
|
||||||
|
type UUID [16]byte |
||||||
|
|
||||||
|
// A Version represents a UUID's version.
|
||||||
|
type Version byte |
||||||
|
|
||||||
|
// A Variant represents a UUID's variant.
|
||||||
|
type Variant byte |
||||||
|
|
||||||
|
// Constants returned by Variant.
|
||||||
|
const ( |
||||||
|
Invalid = Variant(iota) // Invalid UUID
|
||||||
|
RFC4122 // The variant specified in RFC4122
|
||||||
|
Reserved // Reserved, NCS backward compatibility.
|
||||||
|
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||||
|
Future // Reserved for future definition.
|
||||||
|
) |
||||||
|
|
||||||
|
const randPoolSize = 16 * 16 |
||||||
|
|
||||||
|
var ( |
||||||
|
rander = rand.Reader // random function
|
||||||
|
poolEnabled = false |
||||||
|
poolMu sync.Mutex |
||||||
|
poolPos = randPoolSize // protected with poolMu
|
||||||
|
pool [randPoolSize]byte // protected with poolMu
|
||||||
|
) |
||||||
|
|
||||||
|
type invalidLengthError struct{ len int } |
||||||
|
|
||||||
|
func (err invalidLengthError) Error() string { |
||||||
|
return fmt.Sprintf("invalid UUID length: %d", err.len) |
||||||
|
} |
||||||
|
|
||||||
|
// IsInvalidLengthError is matcher function for custom error invalidLengthError
|
||||||
|
func IsInvalidLengthError(err error) bool { |
||||||
|
_, ok := err.(invalidLengthError) |
||||||
|
return ok |
||||||
|
} |
||||||
|
|
||||||
|
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||||
|
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||||
|
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
||||||
|
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
|
||||||
|
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
|
||||||
|
func Parse(s string) (UUID, error) { |
||||||
|
var uuid UUID |
||||||
|
switch len(s) { |
||||||
|
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
|
case 36: |
||||||
|
|
||||||
|
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
|
case 36 + 9: |
||||||
|
if strings.ToLower(s[:9]) != "urn:uuid:" { |
||||||
|
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) |
||||||
|
} |
||||||
|
s = s[9:] |
||||||
|
|
||||||
|
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||||
|
case 36 + 2: |
||||||
|
s = s[1:] |
||||||
|
|
||||||
|
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
case 32: |
||||||
|
var ok bool |
||||||
|
for i := range uuid { |
||||||
|
uuid[i], ok = xtob(s[i*2], s[i*2+1]) |
||||||
|
if !ok { |
||||||
|
return uuid, errors.New("invalid UUID format") |
||||||
|
} |
||||||
|
} |
||||||
|
return uuid, nil |
||||||
|
default: |
||||||
|
return uuid, invalidLengthError{len(s)} |
||||||
|
} |
||||||
|
// s is now at least 36 bytes long
|
||||||
|
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
|
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { |
||||||
|
return uuid, errors.New("invalid UUID format") |
||||||
|
} |
||||||
|
for i, x := range [16]int{ |
||||||
|
0, 2, 4, 6, |
||||||
|
9, 11, |
||||||
|
14, 16, |
||||||
|
19, 21, |
||||||
|
24, 26, 28, 30, 32, 34} { |
||||||
|
v, ok := xtob(s[x], s[x+1]) |
||||||
|
if !ok { |
||||||
|
return uuid, errors.New("invalid UUID format") |
||||||
|
} |
||||||
|
uuid[i] = v |
||||||
|
} |
||||||
|
return uuid, nil |
||||||
|
} |
||||||
|
|
||||||
|
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
|
||||||
|
func ParseBytes(b []byte) (UUID, error) { |
||||||
|
var uuid UUID |
||||||
|
switch len(b) { |
||||||
|
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
|
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
|
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { |
||||||
|
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) |
||||||
|
} |
||||||
|
b = b[9:] |
||||||
|
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||||
|
b = b[1:] |
||||||
|
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||||
|
var ok bool |
||||||
|
for i := 0; i < 32; i += 2 { |
||||||
|
uuid[i/2], ok = xtob(b[i], b[i+1]) |
||||||
|
if !ok { |
||||||
|
return uuid, errors.New("invalid UUID format") |
||||||
|
} |
||||||
|
} |
||||||
|
return uuid, nil |
||||||
|
default: |
||||||
|
return uuid, invalidLengthError{len(b)} |
||||||
|
} |
||||||
|
// s is now at least 36 bytes long
|
||||||
|
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
|
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { |
||||||
|
return uuid, errors.New("invalid UUID format") |
||||||
|
} |
||||||
|
for i, x := range [16]int{ |
||||||
|
0, 2, 4, 6, |
||||||
|
9, 11, |
||||||
|
14, 16, |
||||||
|
19, 21, |
||||||
|
24, 26, 28, 30, 32, 34} { |
||||||
|
v, ok := xtob(b[x], b[x+1]) |
||||||
|
if !ok { |
||||||
|
return uuid, errors.New("invalid UUID format") |
||||||
|
} |
||||||
|
uuid[i] = v |
||||||
|
} |
||||||
|
return uuid, nil |
||||||
|
} |
||||||
|
|
||||||
|
// MustParse is like Parse but panics if the string cannot be parsed.
|
||||||
|
// It simplifies safe initialization of global variables holding compiled UUIDs.
|
||||||
|
func MustParse(s string) UUID { |
||||||
|
uuid, err := Parse(s) |
||||||
|
if err != nil { |
||||||
|
panic(`uuid: Parse(` + s + `): ` + err.Error()) |
||||||
|
} |
||||||
|
return uuid |
||||||
|
} |
||||||
|
|
||||||
|
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
||||||
|
// does not have a length of 16. The bytes are copied from the slice.
|
||||||
|
func FromBytes(b []byte) (uuid UUID, err error) { |
||||||
|
err = uuid.UnmarshalBinary(b) |
||||||
|
return uuid, err |
||||||
|
} |
||||||
|
|
||||||
|
// Must returns uuid if err is nil and panics otherwise.
|
||||||
|
func Must(uuid UUID, err error) UUID { |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
return uuid |
||||||
|
} |
||||||
|
|
||||||
|
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||||
|
// , or "" if uuid is invalid.
|
||||||
|
func (uuid UUID) String() string { |
||||||
|
var buf [36]byte |
||||||
|
encodeHex(buf[:], uuid) |
||||||
|
return string(buf[:]) |
||||||
|
} |
||||||
|
|
||||||
|
// URN returns the RFC 2141 URN form of uuid,
|
||||||
|
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||||
|
func (uuid UUID) URN() string { |
||||||
|
var buf [36 + 9]byte |
||||||
|
copy(buf[:], "urn:uuid:") |
||||||
|
encodeHex(buf[9:], uuid) |
||||||
|
return string(buf[:]) |
||||||
|
} |
||||||
|
|
||||||
|
func encodeHex(dst []byte, uuid UUID) { |
||||||
|
hex.Encode(dst, uuid[:4]) |
||||||
|
dst[8] = '-' |
||||||
|
hex.Encode(dst[9:13], uuid[4:6]) |
||||||
|
dst[13] = '-' |
||||||
|
hex.Encode(dst[14:18], uuid[6:8]) |
||||||
|
dst[18] = '-' |
||||||
|
hex.Encode(dst[19:23], uuid[8:10]) |
||||||
|
dst[23] = '-' |
||||||
|
hex.Encode(dst[24:], uuid[10:]) |
||||||
|
} |
||||||
|
|
||||||
|
// Variant returns the variant encoded in uuid.
|
||||||
|
func (uuid UUID) Variant() Variant { |
||||||
|
switch { |
||||||
|
case (uuid[8] & 0xc0) == 0x80: |
||||||
|
return RFC4122 |
||||||
|
case (uuid[8] & 0xe0) == 0xc0: |
||||||
|
return Microsoft |
||||||
|
case (uuid[8] & 0xe0) == 0xe0: |
||||||
|
return Future |
||||||
|
default: |
||||||
|
return Reserved |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Version returns the version of uuid.
|
||||||
|
func (uuid UUID) Version() Version { |
||||||
|
return Version(uuid[6] >> 4) |
||||||
|
} |
||||||
|
|
||||||
|
func (v Version) String() string { |
||||||
|
if v > 15 { |
||||||
|
return fmt.Sprintf("BAD_VERSION_%d", v) |
||||||
|
} |
||||||
|
return fmt.Sprintf("VERSION_%d", v) |
||||||
|
} |
||||||
|
|
||||||
|
func (v Variant) String() string { |
||||||
|
switch v { |
||||||
|
case RFC4122: |
||||||
|
return "RFC4122" |
||||||
|
case Reserved: |
||||||
|
return "Reserved" |
||||||
|
case Microsoft: |
||||||
|
return "Microsoft" |
||||||
|
case Future: |
||||||
|
return "Future" |
||||||
|
case Invalid: |
||||||
|
return "Invalid" |
||||||
|
} |
||||||
|
return fmt.Sprintf("BadVariant%d", int(v)) |
||||||
|
} |
||||||
|
|
||||||
|
// SetRand sets the random number generator to r, which implements io.Reader.
|
||||||
|
// If r.Read returns an error when the package requests random data then
|
||||||
|
// a panic will be issued.
|
||||||
|
//
|
||||||
|
// Calling SetRand with nil sets the random number generator to the default
|
||||||
|
// generator.
|
||||||
|
func SetRand(r io.Reader) { |
||||||
|
if r == nil { |
||||||
|
rander = rand.Reader |
||||||
|
return |
||||||
|
} |
||||||
|
rander = r |
||||||
|
} |
||||||
|
|
||||||
|
// EnableRandPool enables internal randomness pool used for Random
|
||||||
|
// (Version 4) UUID generation. The pool contains random bytes read from
|
||||||
|
// the random number generator on demand in batches. Enabling the pool
|
||||||
|
// may improve the UUID generation throughput significantly.
|
||||||
|
//
|
||||||
|
// Since the pool is stored on the Go heap, this feature may be a bad fit
|
||||||
|
// for security sensitive applications.
|
||||||
|
//
|
||||||
|
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||||
|
// only be called when there is no possibility that New or any other
|
||||||
|
// UUID Version 4 generation function will be called concurrently.
|
||||||
|
func EnableRandPool() { |
||||||
|
poolEnabled = true |
||||||
|
} |
||||||
|
|
||||||
|
// DisableRandPool disables the randomness pool if it was previously
|
||||||
|
// enabled with EnableRandPool.
|
||||||
|
//
|
||||||
|
// Both EnableRandPool and DisableRandPool are not thread-safe and should
|
||||||
|
// only be called when there is no possibility that New or any other
|
||||||
|
// UUID Version 4 generation function will be called concurrently.
|
||||||
|
func DisableRandPool() { |
||||||
|
poolEnabled = false |
||||||
|
defer poolMu.Unlock() |
||||||
|
poolMu.Lock() |
||||||
|
poolPos = randPoolSize |
||||||
|
} |
@ -0,0 +1,44 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
) |
||||||
|
|
||||||
|
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||||
|
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||||
|
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||||
|
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||||
|
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||||
|
// return the current NewUUID returns nil and an error.
|
||||||
|
//
|
||||||
|
// In most cases, New should be used.
|
||||||
|
func NewUUID() (UUID, error) { |
||||||
|
var uuid UUID |
||||||
|
now, seq, err := GetTime() |
||||||
|
if err != nil { |
||||||
|
return uuid, err |
||||||
|
} |
||||||
|
|
||||||
|
timeLow := uint32(now & 0xffffffff) |
||||||
|
timeMid := uint16((now >> 32) & 0xffff) |
||||||
|
timeHi := uint16((now >> 48) & 0x0fff) |
||||||
|
timeHi |= 0x1000 // Version 1
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint32(uuid[0:], timeLow) |
||||||
|
binary.BigEndian.PutUint16(uuid[4:], timeMid) |
||||||
|
binary.BigEndian.PutUint16(uuid[6:], timeHi) |
||||||
|
binary.BigEndian.PutUint16(uuid[8:], seq) |
||||||
|
|
||||||
|
nodeMu.Lock() |
||||||
|
if nodeID == zeroID { |
||||||
|
setNodeInterface("") |
||||||
|
} |
||||||
|
copy(uuid[10:], nodeID[:]) |
||||||
|
nodeMu.Unlock() |
||||||
|
|
||||||
|
return uuid, nil |
||||||
|
} |
@ -0,0 +1,76 @@ |
|||||||
|
// Copyright 2016 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package uuid |
||||||
|
|
||||||
|
import "io" |
||||||
|
|
||||||
|
// New creates a new random UUID or panics. New is equivalent to
|
||||||
|
// the expression
|
||||||
|
//
|
||||||
|
// uuid.Must(uuid.NewRandom())
|
||||||
|
func New() UUID { |
||||||
|
return Must(NewRandom()) |
||||||
|
} |
||||||
|
|
||||||
|
// NewString creates a new random UUID and returns it as a string or panics.
|
||||||
|
// NewString is equivalent to the expression
|
||||||
|
//
|
||||||
|
// uuid.New().String()
|
||||||
|
func NewString() string { |
||||||
|
return Must(NewRandom()).String() |
||||||
|
} |
||||||
|
|
||||||
|
// NewRandom returns a Random (Version 4) UUID.
|
||||||
|
//
|
||||||
|
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||||
|
// package.
|
||||||
|
//
|
||||||
|
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||||
|
//
|
||||||
|
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||||
|
//
|
||||||
|
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||||
|
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||||
|
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||||
|
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||||
|
// year and having one duplicate.
|
||||||
|
func NewRandom() (UUID, error) { |
||||||
|
if !poolEnabled { |
||||||
|
return NewRandomFromReader(rander) |
||||||
|
} |
||||||
|
return newRandomFromPool() |
||||||
|
} |
||||||
|
|
||||||
|
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||||
|
func NewRandomFromReader(r io.Reader) (UUID, error) { |
||||||
|
var uuid UUID |
||||||
|
_, err := io.ReadFull(r, uuid[:]) |
||||||
|
if err != nil { |
||||||
|
return Nil, err |
||||||
|
} |
||||||
|
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||||
|
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||||
|
return uuid, nil |
||||||
|
} |
||||||
|
|
||||||
|
func newRandomFromPool() (UUID, error) { |
||||||
|
var uuid UUID |
||||||
|
poolMu.Lock() |
||||||
|
if poolPos == randPoolSize { |
||||||
|
_, err := io.ReadFull(rander, pool[:]) |
||||||
|
if err != nil { |
||||||
|
poolMu.Unlock() |
||||||
|
return Nil, err |
||||||
|
} |
||||||
|
poolPos = 0 |
||||||
|
} |
||||||
|
copy(uuid[:], pool[poolPos:(poolPos+16)]) |
||||||
|
poolPos += 16 |
||||||
|
poolMu.Unlock() |
||||||
|
|
||||||
|
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||||
|
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||||
|
return uuid, nil |
||||||
|
} |
@ -0,0 +1,2 @@ |
|||||||
|
* -text |
||||||
|
*.bin -text -diff |
@ -0,0 +1,25 @@ |
|||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects) |
||||||
|
*.o |
||||||
|
*.a |
||||||
|
*.so |
||||||
|
|
||||||
|
# Folders |
||||||
|
_obj |
||||||
|
_test |
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes |
||||||
|
*.[568vq] |
||||||
|
[568vq].out |
||||||
|
|
||||||
|
*.cgo1.go |
||||||
|
*.cgo2.c |
||||||
|
_cgo_defun.c |
||||||
|
_cgo_gotypes.go |
||||||
|
_cgo_export.* |
||||||
|
|
||||||
|
_testmain.go |
||||||
|
|
||||||
|
*.exe |
||||||
|
*.test |
||||||
|
*.prof |
||||||
|
/s2/cmd/_s2sx/sfx-exe |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue